Java 类com.amazonaws.services.s3.transfer.Upload 实例源码

项目:grassroot-platform    文件:StorageBrokerImpl.java   
private String uploadToS3(String bucket, String key, MultipartFile file) {
    final AmazonS3 s3 = s3ClientFactory.createClient();
    final TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build();
    try {
        ObjectMetadata metadata = new ObjectMetadata();
        metadata.setContentLength(file.getSize());
        metadata.setContentType(file.getContentType());

        byte[] resultByte = DigestUtils.md5(file.getBytes());
        String streamMD5 = new String(Base64.encodeBase64(resultByte));
        metadata.setContentMD5(streamMD5);

        Upload upload = transferManager.upload(bucket, key, file.getInputStream(), metadata);
        upload.waitForCompletion();
        return streamMD5;
    } catch (AmazonServiceException | InterruptedException | IOException e) {
        logger.error("Error uploading file: {}", e.toString());
        return null;
    } finally {
        transferManager.shutdownNow();
    }
}
项目:qpp-conversion-tool    文件:StorageServiceImpl.java   
/**
 * Uses the {@link TransferManager} to upload a file.
 *
 * @param objectToActOn The put request.
 * @return The object key in the bucket.
 */
@Override
protected String asynchronousAction(PutObjectRequest objectToActOn) {
    String returnValue;

    try {
        Upload upload = s3TransferManager.upload(objectToActOn);
        returnValue = upload.waitForUploadResult().getKey();
    } catch (InterruptedException exception) {
        Thread.currentThread().interrupt();
        throw new UncheckedInterruptedException(exception);
    }

    API_LOG.info("Successfully wrote object {} to S3 bucket {}", returnValue, objectToActOn.getBucketName());

    return returnValue;
}
项目:emr-workload-profiler    文件:S3PersisterTest.java   
@Test
public void testS3Persister() throws Exception {
    Upload upload = mock(Upload.class);
    when(upload.waitForUploadResult()).thenReturn(new UploadResult());
    TransferManager transferManager = mock(TransferManager.class);
    when(transferManager.upload(anyString(), anyString(), any())).thenReturn(upload);

    S3Persister s3Persister = new S3Persister(transferManager, "foo");
    s3Persister.saveMetrics("foo", "bar");

    verify(transferManager, times(1)).upload(anyString(), anyString(), any());
    verify(transferManager, times(1)).shutdownNow();
    verifyNoMoreInteractions(transferManager);
    verify(upload, times(1)).waitForCompletion();
    verifyNoMoreInteractions(upload);
    assertFalse(new File("foo").exists());
}
项目:hollow-reference-implementation    文件:S3Publisher.java   
/**
 * Write a list of all of the state versions to S3.
 * @param newVersion
 */
private synchronized void updateSnapshotIndex(Long newVersion) {
    /// insert the new version into the list
    int idx = Collections.binarySearch(snapshotIndex, newVersion);
    int insertionPoint = Math.abs(idx) - 1;
    snapshotIndex.add(insertionPoint, newVersion);

    /// build a binary representation of the list -- gap encoded variable-length integers
    byte[] idxBytes = buidGapEncodedVarIntSnapshotIndex();

    /// indicate the Content-Length
    ObjectMetadata metadata = new ObjectMetadata();
    metadata.setHeader("Content-Length", (long)idxBytes.length);

    /// upload the new file content.
    try(InputStream is = new ByteArrayInputStream(idxBytes)) {
        Upload upload = s3TransferManager.upload(bucketName, getSnapshotIndexObjectName(blobNamespace), is, metadata);

        upload.waitForCompletion();
    } catch(Exception e) {
        throw new RuntimeException(e);
    }
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Tests if an object can be uploaded asynchronously
 *
 * @throws Exception not expected
 */
@Test
public void shouldUploadInParallel() throws Exception {
  final File uploadFile = new File(UPLOAD_FILE_NAME);

  s3Client.createBucket(BUCKET_NAME);

  final TransferManager transferManager = createDefaultTransferManager();
  final Upload upload =
      transferManager.upload(new PutObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadFile));
  final UploadResult uploadResult = upload.waitForUploadResult();

  assertThat(uploadResult.getKey(), equalTo(UPLOAD_FILE_NAME));

  final S3Object getResult = s3Client.getObject(BUCKET_NAME, UPLOAD_FILE_NAME);
  assertThat(getResult.getKey(), equalTo(UPLOAD_FILE_NAME));
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Verify that range-downloads work.
 *
 * @throws Exception not expected
 */
@Test
public void checkRangeDownloads() throws Exception {
  final File uploadFile = new File(UPLOAD_FILE_NAME);

  s3Client.createBucket(BUCKET_NAME);

  final TransferManager transferManager = createDefaultTransferManager();
  final Upload upload =
      transferManager.upload(new PutObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadFile));
  upload.waitForUploadResult();

  final File downloadFile = File.createTempFile(UUID.randomUUID().toString(), null);
  transferManager
      .download(new GetObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME).withRange(1, 2),
          downloadFile)
      .waitForCompletion();
  assertThat("Invalid file length", downloadFile.length(), is(2L));

  transferManager
      .download(new GetObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME).withRange(0, 1000),
          downloadFile)
      .waitForCompletion();
  assertThat("Invalid file length", downloadFile.length(), is(uploadFile.length()));
}
项目:kafka-connect-s3    文件:S3Writer.java   
public long putChunk(String localDataFile, String localIndexFile, TopicPartition tp) throws IOException {
  // Put data file then index, then finally update/create the last_index_file marker
  String dataFileKey = this.getChunkFileKey(localDataFile);
  String idxFileKey = this.getChunkFileKey(localIndexFile);
  // Read offset first since we'll delete the file after upload
  long nextOffset = getNextOffsetFromIndexFileContents(new FileReader(localIndexFile));

  try {
    Upload upload = tm.upload(this.bucket, dataFileKey, new File(localDataFile));
    upload.waitForCompletion();
    upload = tm.upload(this.bucket, idxFileKey, new File(localIndexFile));
    upload.waitForCompletion();
  } catch (Exception e) {
    throw new IOException("Failed to upload to S3", e);
  }

  this.updateCursorFile(idxFileKey, tp);

  // Sanity check - return what the new nextOffset will be based on the index we just uploaded
  return nextOffset;
}
项目:kafka-connect-s3    文件:S3WriterTest.java   
public void testUpload() throws Exception {
  AmazonS3 s3Mock = mock(AmazonS3.class);
  TransferManager tmMock = mock(TransferManager.class);
  BlockGZIPFileWriter fileWriter = createDummmyFiles(0, 1000);
  S3Writer s3Writer = new S3Writer(testBucket, "pfx", s3Mock, tmMock);
  TopicPartition tp = new TopicPartition("bar", 0);

  Upload mockUpload = mock(Upload.class);

  when(tmMock.upload(eq(testBucket), eq(getKeyForFilename("pfx", "bar-00000-000000000000.gz")), isA(File.class)))
    .thenReturn(mockUpload);
  when(tmMock.upload(eq(testBucket), eq(getKeyForFilename("pfx", "bar-00000-000000000000.index.json")), isA(File.class)))
    .thenReturn(mockUpload);

  s3Writer.putChunk(fileWriter.getDataFilePath(), fileWriter.getIndexFilePath(), tp);

  verifyTMUpload(tmMock, new ExpectedRequestParams[]{
    new ExpectedRequestParams(getKeyForFilename("pfx", "bar-00000-000000000000.gz"), testBucket),
    new ExpectedRequestParams(getKeyForFilename("pfx", "bar-00000-000000000000.index.json"), testBucket)
  });

  // Verify it also wrote the index file key
  verifyStringPut(s3Mock, "pfx/last_chunk_index.bar-00000.txt",
    getKeyForFilename("pfx", "bar-00000-000000000000.index.json"));
}
项目:halvade    文件:AWSUploader.java   
public void Upload(String key, InputStream input, long size) throws InterruptedException {
    ObjectMetadata meta = new ObjectMetadata();
    if(SSE)
        meta.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION);   
    meta.setContentLength(size);
    Upload upload = tm.upload(existingBucketName, key, input, meta);

    try {
        // Or you can block and wait for the upload to finish
        upload.waitForCompletion();
            Logger.DEBUG("Upload complete.");
    } catch (AmazonClientException amazonClientException) {
        Logger.DEBUG("Unable to upload file, upload was aborted.");
        Logger.EXCEPTION(amazonClientException);
    }
}
项目:hangar    文件:S3Storage.java   
private void uploadArtifactStream(IndexArtifact ia, StorageRequest sr) throws LocalStorageException
{
    try
    {
        TransferManager tx = new TransferManager(client);
        ObjectMetadata om = new ObjectMetadata();
        om.setContentLength(sr.getLength());

        String key = getPath() + ia.getLocation() + "/" + sr.getFilename();

        Upload myUpload = tx.upload(bucketName, key, sr.getNewStream(), om);
        myUpload.waitForCompletion();
    }
    catch (Exception exc)
    {
        logger.error(exc.getLocalizedMessage());
        throw new LocalStorageException(exc);
    }
}
项目:datacollector    文件:FileHelper.java   
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) {
  final PutObjectRequest putObjectRequest = new PutObjectRequest(
      bucket,
      fileName,
      is,
      metadata
  );
  final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName;
  Upload upload = transferManager.upload(putObjectRequest);
  upload.addProgressListener((ProgressListener) progressEvent -> {
    switch (progressEvent.getEventType()) {
      case TRANSFER_STARTED_EVENT:
        LOG.debug("Started uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_COMPLETED_EVENT:
        LOG.debug("Completed uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_FAILED_EVENT:
        LOG.debug("Failed uploading object {} into Amazon S3", object);
        break;
      default:
        break;
    }
  });
  return upload;
}
项目:snap2cloud    文件:S3Backup.java   
private Map<String, String> verifyMultiPartUpload(MultipleFileUpload uploadDirectory) throws AmazonClientException {
    Collection<? extends Upload> uploadResults = uploadDirectory.getSubTransfers();
    Iterator<? extends Upload> iterator = uploadResults.iterator();

    Map<String, String> fileModifyMap = new HashMap<String, String>();
    while (iterator.hasNext()) {
        UploadResult uploadResult = null;

        try {
            uploadResult = iterator.next().waitForUploadResult();
        } catch (Exception e) {
                LOGGER.error(e.getMessage());
                throw new AmazonClientException(e.getMessage());
        }

        if (uploadResult != null) {
            LOGGER.info(String.format("Multipart upload success for file " + uploadResult.getKey() + " to Amazon S3 bucket " + uploadResult.getBucketName()));
        }
    }

    return fileModifyMap;
}
项目:transcoder    文件:S3MovieRepository.java   
@Override
public void store( final Movie movie ) throws MovieNotStoredException
{
    final String key = movie.getMovieId().getMovieId();
    logger.info( "Uploading {} to S3 key {}", movie, key );
    final File movieFile = movie.getPath().toFile();
    final PutObjectRequest putObjectRequest = new PutObjectRequest( S3_BUCKET_HOOD_ETS_SOURCE, key, movieFile );
    final ProgressListener progressListener = new S3ProgressListener( key, movieFile.length() );
    try
    {
        final Upload upload = this.transferManager.upload( putObjectRequest );
        upload.addProgressListener( progressListener );
        upload.waitForCompletion();
    }
    catch ( AmazonClientException | InterruptedException e )
    {
        this.transferManager.abortMultipartUploads( S3_BUCKET_HOOD_ETS_SOURCE, new Date() );
        throw new MovieNotStoredException( movie, e );
    }
    logger.info( "Upload complete." );
}
项目:esthree    文件:Put.java   
@Override
public Integer call() throws Exception {
    TransferManager t = new TransferManager(amazonS3Client);

    ObjectMetadata objectMetadata = new ObjectMetadata();
    objectMetadata.setUserMetadata(metadata);
    if(sse) {
        objectMetadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm());
    }

    Upload u = t.upload(new PutObjectRequest(bucket, key, inputFile).withMetadata(objectMetadata));

    // TODO this listener spews out garbage >100% on a retry, add a test to verify
    if (progressListener != null) {
        progressListener.withTransferProgress(new TransferProgressWrapper(u.getProgress()));
        u.addProgressListener(progressListener);
    }
    try {
        u.waitForCompletion();
    } finally {
        t.shutdownNow();
    }
    return 0;
}
项目:elasticsearch-lambda    文件:S3SnapshotTransport.java   
protected void transferFile(boolean deleteSource, String bucket, String filename, String localDirectory) {
    File source = new File(localDirectory + BaseESReducer.DIR_SEPARATOR + filename);
    Preconditions.checkArgument(source.exists(), "Could not find source file: " + source.getAbsolutePath());
    logger.info("Transfering + " + source + " to " + bucket + " with key " + filename);
    FileInputStream fis;
    try {
        fis = new FileInputStream(source);
        ObjectMetadata objectMetadata = new ObjectMetadata();
        objectMetadata.setSSEAlgorithm("AES256");
        objectMetadata.setContentLength(source.length());
        Upload upload = tx.upload(bucket, filename, fis, objectMetadata);

        while(!upload.isDone());
        Preconditions.checkState(upload.getState().equals(TransferState.Completed), "File " + filename + " failed to upload with state: " + upload.getState());
        if(deleteSource) {
            source.delete();
        }
    } catch (FileNotFoundException e) {
        // Exception should never be thrown because the precondition above has already validated existence of file
        logger.error("Filename could not be found " + filename, e);
    }
}
项目:emr-workload-profiler    文件:S3PersisterTest.java   
@Test(expectedExceptions = RuntimeException.class)
public void testPersistApplicationNullException() throws Exception {
    Upload upload = mock(Upload.class);
    when(upload.waitForUploadResult()).thenReturn(new UploadResult());
    TransferManager transferManager = mock(TransferManager.class);
    when(transferManager.upload(anyString(), anyString(), any())).thenReturn(upload);

    S3Persister s3Persister = new S3Persister(transferManager, "foo");

    try {
        s3Persister.saveMetrics("foo", null);
    } finally {
        assertFalse(new File("foo").exists());
    }
}
项目:hollow-reference-implementation    文件:S3Publisher.java   
private void uploadFile(File file, String s3ObjectName, ObjectMetadata metadata) {
    try (InputStream is = new BufferedInputStream(new FileInputStream(file))) {
           Upload upload = s3TransferManager.upload(bucketName, s3ObjectName, is, metadata);

           upload.waitForCompletion();
       } catch (Exception e) {
           throw new RuntimeException(e);
       }
}
项目:hadoop    文件:S3AOutputStream.java   
public ProgressableProgressListener(Upload upload, Progressable progress, 
  FileSystem.Statistics statistics) {
  this.upload = upload;
  this.progress = progress;
  this.statistics = statistics;
  this.lastBytesTransferred = 0;
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Stores a file in a previously created bucket. Downloads the file again and compares checksums
 *
 * @throws Exception if FileStreams can not be read
 */
@Test
public void shouldUploadAndDownloadStream() throws Exception {
  s3Client.createBucket(BUCKET_NAME);
  final String resourceId = UUID.randomUUID().toString();

  final byte[] resource = new byte[] {1, 2, 3, 4, 5};
  final ByteArrayInputStream bais = new ByteArrayInputStream(resource);

  final ObjectMetadata objectMetadata = new ObjectMetadata();
  objectMetadata.setContentLength(resource.length);
  final PutObjectRequest putObjectRequest =
      new PutObjectRequest(BUCKET_NAME, resourceId, bais, objectMetadata);

  final TransferManager tm = createDefaultTransferManager();
  final Upload upload = tm.upload(putObjectRequest);

  upload.waitForUploadResult();

  final S3Object s3Object = s3Client.getObject(BUCKET_NAME, resourceId);

  final String uploadHash = HashUtil.getDigest(new ByteArrayInputStream(resource));
  final String downloadedHash = HashUtil.getDigest(s3Object.getObjectContent());
  s3Object.close();

  assertThat("Up- and downloaded Files should have equal Hashes", uploadHash,
      is(equalTo(downloadedHash)));
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Verifies multipart copy.
 *
 * @throws InterruptedException
 */
@Test
public void multipartCopy() throws InterruptedException, IOException, NoSuchAlgorithmException {
  final int contentLen = 3 * _1MB;

  final ObjectMetadata objectMetadata = new ObjectMetadata();
  objectMetadata.setContentLength(contentLen);

  final String assumedSourceKey = UUID.randomUUID().toString();

  final Bucket sourceBucket = s3Client.createBucket(UUID.randomUUID().toString());
  final Bucket targetBucket = s3Client.createBucket(UUID.randomUUID().toString());

  final TransferManager transferManager = createTransferManager(_2MB, _1MB, _2MB, _1MB);

  final InputStream sourceInputStream = randomInputStream(contentLen);
  final Upload upload = transferManager
      .upload(sourceBucket.getName(), assumedSourceKey,
          sourceInputStream, objectMetadata);

  final UploadResult uploadResult = upload.waitForUploadResult();

  assertThat(uploadResult.getKey(), is(assumedSourceKey));

  final String assumedDestinationKey = UUID.randomUUID().toString();
  final Copy copy =
      transferManager.copy(sourceBucket.getName(), assumedSourceKey, targetBucket.getName(),
          assumedDestinationKey);
  final CopyResult copyResult = copy.waitForCopyResult();
  assertThat(copyResult.getDestinationKey(), is(assumedDestinationKey));

  final S3Object copiedObject = s3Client.getObject(targetBucket.getName(), assumedDestinationKey);

  assertThat("Hashes for source and target S3Object do not match.",
      HashUtil.getDigest(copiedObject.getObjectContent()) + "-1",
      is(uploadResult.getETag()));
}
项目:aliyun-oss-hadoop-fs    文件:S3AOutputStream.java   
public ProgressableProgressListener(Upload upload, Progressable progress, 
  FileSystem.Statistics statistics) {
  this.upload = upload;
  this.progress = progress;
  this.statistics = statistics;
  this.lastBytesTransferred = 0;
}
项目:MCSFS    文件:S3Store.java   
@Override
public void store(File file) throws Exception {

    LogUtils.debug(LOG_TAG, "Uploading new file. Name: " + file.getName());
       TransferManager tm = new TransferManager(new DefaultAWSCredentialsProviderChain());
       // TransferManager processes all transfers asynchronously, 
       // so this call will return immediately.
       Upload upload = tm.upload(bucketName, file.getName(), file);
       upload.waitForCompletion();
       LogUtils.debug(LOG_TAG, "Successfully uploaded file to bucket.\nName: " + file.getName() + "\nBucket name: " +
               bucketName);
       tm.shutdownNow();
}
项目:big-c    文件:S3AOutputStream.java   
public ProgressableProgressListener(Upload upload, Progressable progress, 
  FileSystem.Statistics statistics) {
  this.upload = upload;
  this.progress = progress;
  this.statistics = statistics;
  this.lastBytesTransferred = 0;
}
项目:aws-sam-gradle    文件:S3UploadTask.java   
private void transferFileToS3(final String key) {
    final long fileSizeMb = file.length() / (1024 * 1024);
    getLogger().info("Uploading {} MB from file {} to {}", fileSizeMb, file, getS3Url());
    final TransferManager transferManager = createTransferManager();
    final Instant start = Instant.now();
    final Upload upload = transferManager.upload(config.getDeploymentBucket(), key, file);
    try {
        upload.waitForCompletion();
        getLogger().info("Uploaded {} to {} in {}", file, getS3Url(), Duration.between(start, Instant.now()));
    } catch (final InterruptedException e) {
        Thread.currentThread().interrupt();
        throw new AssertionError("Upload interrupted", e);
    }
}
项目:aws-doc-sdk-examples    文件:XferMgrUpload.java   
public static void uploadFile(String file_path, String bucket_name,
        String key_prefix, boolean pause)
{
    System.out.println("file: " + file_path +
            (pause ? " (pause)" : ""));

    String key_name = null;
    if (key_prefix != null) {
        key_name = key_prefix + '/' + file_path;
    } else {
        key_name = file_path;
    }

    File f = new File(file_path);
    TransferManager xfer_mgr = new TransferManager();
    try {
        Upload xfer = xfer_mgr.upload(bucket_name, key_name, f);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        //  or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
}
项目:cerberus-lifecycle-cli    文件:PublishLambdaOperation.java   
@Override
public void run(final PublishLambdaCommand command) {
    final URL artifactUrl = command.getArtifactUrl();

    final BaseOutputs outputParameters = configStore.getBaseStackOutputs();
    final String configBucketName = outputParameters.getConfigBucketName();

    if (StringUtils.isBlank(configBucketName)) {
        final String errorMessage = "The specified environment isn't configured properly!";
        logger.error(errorMessage);
        throw new IllegalStateException(errorMessage);
    }

    initClient(configBucketName);
    final File filePath = downloadArtifact(artifactUrl);

    try {
        final Upload upload =
                transferManager.upload(configBucketName, command.getLambdaName().getBucketKey(), filePath);
        logger.info("Uploading lambda artifact.");
        upload.waitForCompletion();
        logger.info("Uploading complete.");
    } catch (InterruptedException e) {
        logger.error("Interrupted while waiting for upload to complete!", e);
    } finally {
        transferManager.shutdownNow(false);
    }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AOutputStream.java   
public ProgressableProgressListener(Upload upload, Progressable progress, 
  FileSystem.Statistics statistics) {
  this.upload = upload;
  this.progress = progress;
  this.statistics = statistics;
  this.lastBytesTransferred = 0;
}
项目:micro-server    文件:S3Resource.java   
@GET
@Path("/put")
public String put() {
    Try<Upload, Throwable> operation = writer.put("hello", "world");
    if(operation.isSuccess())
        return "added";
    return operation.failureGet().orElse(null).getMessage();
}
项目:aws-s3-utils    文件:AwsS3IamServiceImpl.java   
@Override
public Upload uploadFileAsync(final String bucketName, final String fileName, final File fileObj,
        final CannedAccessControlList cannedAcl) throws AmazonClientException, AmazonServiceException, IOException {
    LOGGER.info("uploadObjectAsync invoked, bucketName: {} , fileName: {} and cannedAccessControlList: {}", bucketName, fileName, cannedAcl);
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, fileName, fileObj).withCannedAcl(cannedAcl);
    final TransferManager transferMgr = new TransferManager(s3client);
    return transferMgr.upload(putObjectRequest);
}
项目:aws-s3-utils    文件:AwsS3IamServiceImpl.java   
@Override
public Upload uploadFileAsync(final String bucketName, final String fileName, final File fileObj,
        final boolean isPublicAccessible) throws AmazonClientException, AmazonServiceException, IOException {
    LOGGER.info("uploadObjectAsync invoked, bucketName: {} , fileName: {} and isPublicAccessible: {}", bucketName, fileName, isPublicAccessible);
    final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, fileName, fileObj);
    if(isPublicAccessible){
      putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead);
    }
    final TransferManager transferMgr = new TransferManager(s3client);
    return transferMgr.upload(putObjectRequest);
}
项目:aws-s3-utils    文件:AwsS3IamServiceTest.java   
/**
    * Test method for {@link com.github.abhinavmishra14.aws.s3.service.AwsS3IamService#uploadFileAsync(java.lang.String, java.lang.String, java.io.File)}.
    *
    * @throws Exception the exception
    */
@Test
public void testUploadFileAsync() throws Exception{
    awsS3IamService.createBucket(AWS_S3_BUCKET);//create bucket for test
    InputStream inStream = AwsS3IamServiceTest.class
            .getResourceAsStream("/sample-file/TestPutObject.txt");
    File tempFile = AWSUtil.createTempFileFromStream(inStream);
    Upload upload = awsS3IamService.uploadFileAsync(AWS_S3_BUCKET, AWSUtilConstants.SAMPLE_FILE_NAME, tempFile);
    upload.waitForUploadResult();
    assertEquals(true,upload.isDone());
}
项目:aws-s3-utils    文件:AwsS3IamServiceTest.java   
/**
    * Test method for {@link com.github.abhinavmishra14.aws.s3.service.AwsS3IamService#uploadFileAsync(java.lang.String, java.lang.String, java.io.File,boolean)}.
    *
    * @throws Exception the exception
    */
@Test
public void testUploadFileWithPublicAccessAsync() throws Exception{
    awsS3IamService.createBucket(AWS_S3_BUCKET);//create bucket for test
    InputStream inStream = AwsS3IamServiceTest.class
            .getResourceAsStream("/sample-file/TestPutObject.txt");
    File tempFile = AWSUtil.createTempFileFromStream(inStream);
    Upload upload = awsS3IamService.uploadFileAsync(AWS_S3_BUCKET, AWSUtilConstants.SAMPLE_FILE_NAME, tempFile,true);
    upload.waitForUploadResult();
    assertEquals(true,upload.isDone());
}
项目:aws-s3-utils    文件:AwsS3IamServiceTest.java   
/**
    * Test method for {@link com.github.abhinavmishra14.aws.s3.service.AwsS3IamService#uploadFileAsync(java.lang.String, java.lang.String, java.io.File,com.amazonaws.services.s3.model.CannedAccessControlList)}.
    *
    * @throws Exception the exception
    */
@Test
public void testUploadFileWithCannedACLAsync() throws Exception{
    awsS3IamService.createBucket(AWS_S3_BUCKET);//create bucket for test
    InputStream inStream = AwsS3IamServiceTest.class
            .getResourceAsStream("/sample-file/TestPutObject.txt");
    File tempFile = AWSUtil.createTempFileFromStream(inStream);
    Upload upload = awsS3IamService.uploadFileAsync(AWS_S3_BUCKET, AWSUtilConstants.SAMPLE_FILE_NAME, tempFile,CannedAccessControlList.PublicRead);
    upload.waitForUploadResult();
    assertEquals(true,upload.isDone());
}
项目:gatling-aws-maven-plugin    文件:AwsGatlingRunner.java   
public void uploadToS3(String s3bucket, String targetDirectory, File sourceDirectory) {
    // Recursively upload sourceDirectory to targetDirectory.
    FOR: for (final File file : sourceDirectory.listFiles()) {
        if (file.isDirectory()) {
            uploadToS3(s3bucket, targetDirectory + "/" + file.getName(), file);
        } else if (file.isFile()) {
            final String path = file.getAbsolutePath();
            final long uploadStartTimeMs = System.currentTimeMillis();
            final PutObjectRequest putRequest = new PutObjectRequest(s3bucket, targetDirectory + "/" + file.getName(), file)
                    .withCannedAcl(CannedAccessControlList.PublicRead);

            final Upload upload = transferManager.upload(putRequest);
            int statusChecks = 0;

            System.out.println("Uploading " + path);

            while (!upload.isDone()) {
                if (uploadTimedOut(uploadStartTimeMs)) {
                    System.err.format("Timed out uploading file to S3 (%s). Will skip file. Report might be incomplete.%n", path);
                    continue FOR;
                }

                sleep(100);
                if (++statusChecks % 100 == 0) {
                    System.out.format("Still uploading %s%n", file.getAbsolutePath());
                }
            }
            try {
                upload.waitForCompletion();
            } catch (Exception e) {
                System.out.format("Failed to upload to S3 %s/%s/%s%n", s3bucket, targetDirectory, file.getName());
                e.printStackTrace();
            }
        }
    }
}
项目:datacollector    文件:UploadMetadata.java   
public UploadMetadata(
  Upload upload,
  String bucket,
  List<Record> records,
  List<EventRecord> events
) {
  this.upload = upload;
  this.bucket = bucket;
  this.records = records;
  this.events = events;
}
项目:thunderbit    文件:AmazonS3Storage.java   
@Override
public F.Promise<Void> store(Path path, String key, String name) {
    Promise<Void> promise = Futures.promise();

    TransferManager transferManager = new TransferManager(credentials);
    try {
        Upload upload = transferManager.upload(bucketName, key, path.toFile());
        upload.addProgressListener((ProgressListener) progressEvent -> {
            if (progressEvent.getEventType().isTransferEvent()) {
                if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                    transferManager.shutdownNow();
                    promise.success(null);
                } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                    transferManager.shutdownNow();
                    logger.error(progressEvent.toString());
                    promise.failure(new Exception(progressEvent.toString()));
                }
            }
        });
    } catch (AmazonServiceException ase) {
        logAmazonServiceException (ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}
项目:storm-s3    文件:BlockingTransferManagerUploader.java   
@Override
public void upload(String bucketName, String name, InputStream input, ObjectMetadata meta) throws IOException {
    final Upload myUpload = tx.upload(bucketName, name, input, meta);
    try {
        UploadResult uploadResult = myUpload.waitForUploadResult();
        LOG.info("Upload completed, bucket={}, key={}", uploadResult.getBucketName(), uploadResult.getKey());
    } catch (InterruptedException e) {
        throw new IOException(e);
    }
}
项目:snap2cloud    文件:S3Util.java   
public void uploadFile(File file, ObjectMetadata metadata, String bucketName, String fileUploadPath) throws AmazonClientException {
    LOGGER.info("Uploading file " + file.getAbsolutePath() + " to Amazon S3 bucket " + bucketName);

    try {
        metadata.setContentLength(file.length());
        Upload upload = tm.upload(bucketName, fileUploadPath, new FileInputStream(file), metadata);
        upload.waitForCompletion();
    } catch (Exception e) {
        LOGGER.info("File upload for " + file.getAbsolutePath() + " to Amazon S3 bucket " + bucketName + " failed");
        throw new AmazonClientException(e.getMessage(), e);
    }

    LOGGER.info("File upload for " + file.getAbsolutePath() + " to Amazon S3 bucket " + bucketName + " completed successfully");
}
项目:mybus    文件:AWSClient.java   
/**
 * Uploads a file to S3 and returns the s3 file key.  The bucket that is used is configured the properties file via
 * s3.bucket
 *
 * @param s3Bucket the s3 bucket name
 * @param localFile the local file to be uploaded
 * @param s3FileKey the s3 file key that should be used
 * @return a 2-element array, where element 0 is the s3 bucket and element 1 is the s3 file key
 */
public String[] uploadFileToS3(String s3Bucket, final Path localFile, final String s3FileKey)
        throws IOException, InterruptedException {
    if (localFile == null) {
        throw new NullPointerException("localFile was null.");
    }
    if (isEmpty(s3FileKey)) {
        throw new NullPointerException("objectFileKey cannot be null");
    }
    if (logger.isTraceEnabled()) {
        logger.trace(format("uploadFileToS3(%s)", localFile.getFileName().toString()));
    }
    AWSCredentials awsCredentials = AmazonAWSHelper.getCredentials();
    TransferManager tx = new TransferManager(awsCredentials);

    ObjectMetadata metadata = new ObjectMetadata();
    final String contentType = detectContentTypeFromFilename(s3FileKey);
    if (logger.isDebugEnabled()) {
        logger.debug(format("Setting contentType to '%s' in metadata for S3 object '%s'", contentType, s3FileKey));
    }
    metadata.setContentType(contentType);
    Upload myUpload = tx.upload(s3Bucket, s3FileKey, Files.newInputStream(localFile), metadata);

    myUpload.waitForCompletion();

    String[] retval = {s3Bucket, s3FileKey};
    if (logger.isDebugEnabled()) {
        logger.debug(format("Upload to S3 was successful.  bucket: '%s', file key: '%s'", s3Bucket, s3FileKey));
    }
    return retval;
}
项目:snomed-release-service    文件:StreamFactory.java   
public BufferedWriter createStreamWriter(String correlationID, String streamUri) throws IOException {
    String[] split = streamUri.split("://", 2);
    String protocol = split[0];
    String path = split[1];
    if (Constants.FILE.equals(protocol)) {
        return new BufferedWriter(new FileWriter(path));
    } else if (Constants.s3.equals(protocol)) {

        String[] split1 = path.split("/", 2);
        final String bucketName = split1[0];
        final String objectKey = split1[1];

        String tempFilePath = tempDirectoryPath + "/" + correlationID;

        final File tempFile = new File(tempFilePath);
        return new BufferedWriterTaskOnClose(new FileWriter(tempFile), new Task() {
            @Override
            public void run() throws InterruptedException {
                if (!offlineMode) {
                    Upload upload = transferManager.upload(bucketName, objectKey, tempFile);
                    upload.waitForUploadResult();
                    tempFile.delete();
                }
            }
        });
    } else {
        throw new NotImplementedException("Unrecognised stream URI protocol: " + protocol);
    }
}