Java 类com.amazonaws.services.s3.transfer.TransferManagerBuilder 实例源码

项目:grassroot-platform    文件:StorageBrokerImpl.java   
private String uploadToS3(String bucket, String key, MultipartFile file) {
    final AmazonS3 s3 = s3ClientFactory.createClient();
    final TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build();
    try {
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(file.getSize());
        metadata.setContentType(file.getContentType());

        byte[] resultByte = DigestUtils.md5(file.getBytes());
        String streamMD5 = new String(Base64.encodeBase64(resultByte));
        metadata.setContentMD5(streamMD5);

        Upload upload = transferManager.upload(bucket, key, file.getInputStream(), metadata);
        upload.waitForCompletion();
        return streamMD5;
    } catch (AmazonServiceException | InterruptedException | IOException e) {
        logger.error("Error uploading file: {}", e.toString());
        return null;
    } finally {
        transferManager.shutdownNow();
    }
}
项目:xm-ms-entity    文件:AmazonS3Template.java   
/**
 * Gets an Amazon transfer manager.
 *
 * @return a transfer manager
 */
public TransferManager getTransferManager() {
    if (transferManager == null) {
        transferManager = TransferManagerBuilder.standard().withS3Client(getAmazonS3Client()).build();
    }
    return transferManager;
}
项目:circus-train    文件:CopyMapper.java   
/**
 * Implementation of the Mapper::setup() method. This extracts the S3MapReduceCp options specified in the Job's
 * configuration, to set up the Job.
 *
 * @param context Mapper's context.
 * @throws IOException On IO failure.
 * @throws InterruptedException If the job is interrupted.
 */
@Override
public void setup(Context context) throws IOException, InterruptedException {
  conf = new S3MapReduceCpConfiguration(context.getConfiguration());

  ignoreFailures = conf.getBoolean(ConfigurationVariable.IGNORE_FAILURES);

  targetFinalPath = new Path(conf.get(S3MapReduceCpConstants.CONF_LABEL_TARGET_FINAL_PATH));

  AwsS3ClientFactory awsS3ClientFactory = new AwsS3ClientFactory();
  transferManager = TransferManagerBuilder
      .standard()
      .withMinimumUploadPartSize(conf.getLong(ConfigurationVariable.MINIMUM_UPLOAD_PART_SIZE))
      .withMultipartUploadThreshold(conf.getLong(ConfigurationVariable.MULTIPART_UPLOAD_THRESHOLD))
      .withS3Client(awsS3ClientFactory.newInstance(conf))
      .withShutDownThreadPools(true)
      .withExecutorFactory(new ExecutorFactory() {
        @Override
        public ExecutorService newExecutor() {
          return Executors.newFixedThreadPool(conf.getInt(ConfigurationVariable.NUMBER_OF_UPLOAD_WORKERS));
        }
      })
      .build();
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
private TransferManager createTransferManager(final long multipartUploadThreshold,
    final long multipartUploadPartSize,
    final long multipartCopyThreshold,
    final long multipartCopyPartSize) {
  final ThreadFactory threadFactory = new ThreadFactory() {
    private int threadCount = 1;

    @Override
    public Thread newThread(final Runnable r) {
      final Thread thread = new Thread(r);
      thread.setName("s3-transfer-" + threadCount++);

      return thread;
    }
  };

  return TransferManagerBuilder.standard()
      .withS3Client(s3Client)
      .withExecutorFactory(() -> Executors.newFixedThreadPool(THREAD_COUNT, threadFactory))
      .withMultipartUploadThreshold(multipartUploadThreshold)
      .withMinimumUploadPartSize(multipartUploadPartSize)
      .withMultipartCopyPartSize(multipartCopyPartSize)
      .withMultipartCopyThreshold(multipartCopyThreshold)
      .build();
}
项目:nexus-public    文件:S3BlobStore.java   
@Override
@Guarded(by = STARTED)
public Blob create(final InputStream blobData, final Map<String, String> headers) {
  checkNotNull(blobData);

  return create(headers, destination -> {
      try (InputStream data = blobData) {
        MetricsInputStream input = new MetricsInputStream(data);
        TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build();
        transferManager.upload(getConfiguredBucket(), destination, input, new ObjectMetadata())
            .waitForCompletion();
        return input.getMetrics();
      } catch (InterruptedException e) {
        throw new BlobStoreException("error uploading blob", e, null);
      }
    });
}
项目:circus-train    文件:TransferManagerFactory.java   
public TransferManager newInstance(AmazonS3 targetS3Client, S3S3CopierOptions s3s3CopierOptions) {
  return TransferManagerBuilder
      .standard()
      .withMultipartCopyThreshold(s3s3CopierOptions.getMultipartCopyThreshold())
      .withMultipartCopyPartSize(s3s3CopierOptions.getMultipartCopyPartSize())
      .withS3Client(targetS3Client)
      .build();
}
项目:photon-model    文件:AWSUtils.java   
public static TransferManager getS3TransferManager(AuthCredentialsServiceState credentials,
        String region, ExecutorService executorService) {

    AWSStaticCredentialsProvider awsStaticCredentialsProvider = new AWSStaticCredentialsProvider(
            new BasicAWSCredentials(credentials.privateKeyId,
                    EncryptionUtils.decrypt(credentials.privateKey)));

    AmazonS3ClientBuilder amazonS3ClientBuilder = AmazonS3ClientBuilder.standard()
            .withCredentials(awsStaticCredentialsProvider)
            .withForceGlobalBucketAccessEnabled(true);

    if (region == null) {
        region = Regions.DEFAULT_REGION.getName();
    }

    if (isAwsS3Proxy()) {
        AwsClientBuilder.EndpointConfiguration endpointConfiguration = new AwsClientBuilder.EndpointConfiguration(
                getAwsS3ProxyHost(), region);
        amazonS3ClientBuilder.setEndpointConfiguration(endpointConfiguration);
    } else {
        amazonS3ClientBuilder.setRegion(region);
    }

    TransferManagerBuilder transferManagerBuilder = TransferManagerBuilder.standard()
            .withS3Client(amazonS3ClientBuilder.build())
            .withExecutorFactory(() -> executorService)
            .withShutDownThreadPools(false);

    return transferManagerBuilder.build();
}
项目:Sqawsh    文件:AngularjsAppCustomResourceLambda.java   
void deleteAngularjsApp(String websiteBucket, LambdaLogger logger) {
  logger.log("Removing AngularjsApp content from website versioned S3 bucket");

  // We need to delete every version of every key
  ListVersionsRequest listVersionsRequest = new ListVersionsRequest()
      .withBucketName(websiteBucket);
  VersionListing versionListing;

  AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
  do {
    versionListing = client.listVersions(listVersionsRequest);
    versionListing
        .getVersionSummaries()
        .stream()
        .filter(k -> (k.getKey().startsWith("app")))
        .forEach(
            k -> {
              logger.log("About to delete version: " + k.getVersionId()
                  + " of AngularjsApp page: " + k.getKey());
              DeleteVersionRequest deleteVersionRequest = new DeleteVersionRequest(websiteBucket,
                  k.getKey(), k.getVersionId());
              client.deleteVersion(deleteVersionRequest);
              logger.log("Successfully deleted version: " + k.getVersionId()
                  + " of AngularjsApp page: " + k.getKey());
            });

    listVersionsRequest.setKeyMarker(versionListing.getNextKeyMarker());
  } while (versionListing.isTruncated());
  logger.log("Finished removing AngularjsApp content from website S3 bucket");
}
项目:Sqawsh    文件:TransferUtils.java   
/**
 * Sets public read permissions on content within an S3 bucket.
 * 
 * <p>Web content served from an S3 bucket must have public read permissions.
 * 
 *    @param bucketName the bucket to apply the permissions to.
 *    @param prefix prefix within the bucket, beneath which to apply the permissions.
 *    @param logger a CloudwatchLogs logger.
 */
public static void setPublicReadPermissionsOnBucket(String bucketName, Optional<String> prefix,
    LambdaLogger logger) {
  // Ensure newly uploaded content has public read permission
  ListObjectsRequest listObjectsRequest;
  if (prefix.isPresent()) {
    logger.log("Setting public read permission on bucket: " + bucketName + " and prefix: "
        + prefix.get());
    listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(
        prefix.get());
  } else {
    logger.log("Setting public read permission on bucket: " + bucketName);
    listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
  }

  ObjectListing objectListing;
  AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
  do {
    objectListing = client.listObjects(listObjectsRequest);
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
      logger.log("Setting permissions for S3 object: " + objectSummary.getKey());
      client.setObjectAcl(bucketName, objectSummary.getKey(), CannedAccessControlList.PublicRead);
    }
    listObjectsRequest.setMarker(objectListing.getNextMarker());
  } while (objectListing.isTruncated());
  logger.log("Finished setting public read permissions");
}
项目:Sqawsh    文件:TransferUtils.java   
/**
 * Adds gzip content-encoding metadata to S3 objects.
 * 
 * <p>Adds gzip content-encoding metadata to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder) will have the
 *    metadata added. When the bucket serves objects it will then
 *    add a suitable Content-Encoding header.
 *
 *    @param bucketName the bucket to apply the metadata to.
 *    @param prefix prefix within the bucket, beneath which to apply the metadata.
 *    @param logger a CloudwatchLogs logger.
 */
public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix,
    LambdaLogger logger) {

  // To add new metadata, we must copy each object to itself.
  ListObjectsRequest listObjectsRequest;
  if (prefix.isPresent()) {
    logger.log("Setting gzip content encoding metadata on bucket: " + bucketName
        + " and prefix: " + prefix.get());
    listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(
        prefix.get());
  } else {
    logger.log("Setting gzip content encoding metadata on bucket: " + bucketName);
    listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
  }

  ObjectListing objectListing;
  AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
  do {
    objectListing = client.listObjects(listObjectsRequest);
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
      String key = objectSummary.getKey();
      logger.log("Setting metadata for S3 object: " + key);
      // We must specify ALL metadata - not just the one we're adding.
      ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
      objectMetadata.setContentEncoding("gzip");
      CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName,
          key).withNewObjectMetadata(objectMetadata).withCannedAccessControlList(
          CannedAccessControlList.PublicRead);
      client.copyObject(copyObjectRequest);
      logger.log("Set metadata for S3 object: " + key);
    }
    listObjectsRequest.setMarker(objectListing.getNextMarker());
  } while (objectListing.isTruncated());
  logger.log("Set gzip content encoding metadata on bucket");
}
项目:Sqawsh    文件:TransferUtils.java   
/**
 * Adds cache-control header to S3 objects.
 * 
 * <p>Adds cache-control header to S3 objects. All objects
 *    beneath the specified prefix (i.e. folder), and with the
 *    specified extension will have the header added. When the
 *    bucket serves objects it will then add a suitable
 *    Cache-Control header.
 *
 *    @param headerValue value of the cache-control header
 *    @param bucketName the bucket to apply the header to.
 *    @param prefix prefix within the bucket, beneath which to apply the header.
 *    @param extension file extension to apply header to
 *    @param logger a CloudwatchLogs logger.
 */
public static void addCacheControlHeader(String headerValue, String bucketName,
    Optional<String> prefix, String extension, LambdaLogger logger) {

  // To add new metadata, we must copy each object to itself.
  ListObjectsRequest listObjectsRequest;
  if (prefix.isPresent()) {
    logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
        + " and prefix: " + prefix.get() + " and extension: " + extension);
    listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix(
        prefix.get());
  } else {
    logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName
        + " and extension: " + extension);
    listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName);
  }

  ObjectListing objectListing;
  AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client();
  do {
    objectListing = client.listObjects(listObjectsRequest);
    for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) {
      String key = objectSummary.getKey();
      if (!key.endsWith(extension)) {
        continue;
      }
      logger.log("Setting metadata for S3 object: " + key);
      // We must specify ALL metadata - not just the one we're adding.
      ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key);
      objectMetadata.setCacheControl(headerValue);
      CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName,
          key).withNewObjectMetadata(objectMetadata).withCannedAccessControlList(
          CannedAccessControlList.PublicRead);
      client.copyObject(copyObjectRequest);
      logger.log("Set metadata for S3 object: " + key);
    }
    listObjectsRequest.setMarker(objectListing.getNextMarker());
  } while (objectListing.isTruncated());
  logger.log("Set cache-control metadata on bucket");
}
项目:uploader    文件:UploaderApplication.java   
@Override
public void run(@Nonnull final UploaderConfiguration configuration,
        @Nonnull final Environment environment) throws Exception {

    // set up S3 client
    final AwsConfiguration awsConfig = configuration.getAws();
    final AmazonS3 s3 = awsConfig.buildS3(environment);
    final Uploader uploader = new Uploader(awsConfig);

    // Configure the Netty TCP server
    final ChannelFuture future = configuration.getNetty().build(environment,
            uploader, awsConfig.getMaxUploadSize());

    // Configure the transfer manager to use the Netty event loop
    final TransferManager transfer = TransferManagerBuilder.standard()
            .withS3Client(s3)
            .withMinimumUploadPartSize(
                    awsConfig.getMinimumUploadPartSize().toBytes())
            .withShutDownThreadPools(false)
            .withExecutorFactory(new NettyExecutorFactory(future)).build();

    uploader.setTransferManager(transfer);

    environment.healthChecks().register("s3", new AmazonS3HealthCheck(s3));

    // Resources
    environment.jersey().register(new BatchResource(uploader));
    environment.jersey().register(new PingResource());
    environment.jersey().register(new VersionResource());
}
项目:ecs-samples    文件:_10_ReadLargeObjectTM.java   
public static void main(String[] args) throws Exception {
    // create the AWS S3 Client
    AmazonS3 s3 = AWSS3Factory.getS3Client();

    // retrieve the key value from user
    System.out.println( "Enter the object key:" );
    String key = new BufferedReader( new InputStreamReader( System.in ) ).readLine();

    // print start time
    Date start_date = new Date();
    System.out.println(start_date.toString());

    // file will be placed in temp dir with .tmp extension
    File file = File.createTempFile("read-large-object-tm", null);

    TransferManager tm = TransferManagerBuilder.standard()
            .withS3Client(s3)
            .build();

    // download the object to file
    Download download = tm.download(AWSS3Factory.S3_BUCKET, key, file);

    // block until download finished
    download.waitForCompletion();

    tm.shutdownNow();

    // print end time
    Date end_date = new Date();
    System.out.println(end_date.toString());
}
项目:emr-workload-profiler    文件:S3Persister.java   
public S3Persister(String s3Bucket) {
    this.s3Bucket = s3Bucket;
    this.transferManager = Optional.of(TransferManagerBuilder.defaultTransferManager());
}
项目:emr-workload-profiler    文件:S3Persister.java   
private void prepareTransferManager() {
    if (!transferManager.isPresent()) {
        this.transferManager = Optional.of(TransferManagerBuilder.defaultTransferManager());
    }
}
项目:Sqawsh    文件:S3TransferManager.java   
public S3TransferManager() {
  transferManager = TransferManagerBuilder.defaultTransferManager();
}
项目:aws-sam-gradle    文件:S3UploadTask.java   
private TransferManager createTransferManager() {
    return TransferManagerBuilder.standard().withS3Client(getS3Client()).build();
}
项目:datacollector    文件:AmazonS3Target.java   
@Override
public List<ConfigIssue> init() {
  List<ConfigIssue> issues = s3TargetConfigBean.init(getContext(), super.init());

  elVars = getContext().createELVars();
  bucketEval = getContext().createELEval(BUCKET_TEMPLATE);
  partitionEval = getContext().createELEval(PARTITION_TEMPLATE);
  timeDriverEval = getContext().createELEval(TIME_DRIVER_TEMPLATE);
  calendar = Calendar.getInstance(TimeZone.getTimeZone(ZoneId.of(s3TargetConfigBean.timeZoneID)));

  transferManager = TransferManagerBuilder
      .standard()
      .withS3Client(s3TargetConfigBean.s3Config.getS3Client())
      .withExecutorFactory(() -> Executors.newFixedThreadPool(s3TargetConfigBean.tmConfig.threadPoolSize))
      .withMinimumUploadPartSize(s3TargetConfigBean.tmConfig.minimumUploadPartSize)
      .withMultipartUploadThreshold(s3TargetConfigBean.tmConfig.multipartUploadThreshold)
      .build();

  TimeEL.setCalendarInContext(elVars, calendar);
  TimeNowEL.setTimeNowInContext(elVars, new Date());

  if (partitionTemplate.contains(EL_PREFIX)) {
    ELUtils.validateExpression(
        partitionEval,
        elVars,
        partitionTemplate,
        getContext(),
        Groups.S3.getLabel(),
        S3TargetConfigBean.S3_TARGET_CONFIG_BEAN_PREFIX + PARTITION_TEMPLATE,
        Errors.S3_03,
        String.class,
        issues
    );
  }

  if (timeDriverTemplate.contains(EL_PREFIX)) {
    ELUtils.validateExpression(
        timeDriverEval,
        elVars,
        timeDriverTemplate,
        getContext(),
        Groups.S3.getLabel(),
        S3TargetConfigBean.S3_TARGET_CONFIG_BEAN_PREFIX + TIME_DRIVER_TEMPLATE,
        Errors.S3_04,
        Date.class,
        issues
    );
  }
  if (s3TargetConfigBean.dataFormat == DataFormat.WHOLE_FILE) {
    fileHelper = new WholeFileHelper(getContext(), s3TargetConfigBean, transferManager, issues);
  } else {
    fileHelper = new DefaultFileHelper(getContext(), s3TargetConfigBean, transferManager);
  }

  this.errorRecordHandler = new DefaultErrorRecordHandler(getContext());

  return issues;
}
项目:qpp-conversion-tool    文件:S3Config.java   
/**
 * Creates a standard Transfer Manager {@link Bean} based on an available {@link AmazonS3} {@link Bean}.
 *
 * @param s3Client The {@link AmazonS3} {@link Bean}.
 * @return The S3 Transfer Manager.
 */
@Bean
public TransferManager s3TransferManager(AmazonS3 s3Client) {
    return TransferManagerBuilder.standard().withS3Client(s3Client).build();
}