private String uploadToS3(String bucket, String key, MultipartFile file) { final AmazonS3 s3 = s3ClientFactory.createClient(); final TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build(); try { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(file.getSize()); metadata.setContentType(file.getContentType()); byte[] resultByte = DigestUtils.md5(file.getBytes()); String streamMD5 = new String(Base64.encodeBase64(resultByte)); metadata.setContentMD5(streamMD5); Upload upload = transferManager.upload(bucket, key, file.getInputStream(), metadata); upload.waitForCompletion(); return streamMD5; } catch (AmazonServiceException | InterruptedException | IOException e) { logger.error("Error uploading file: {}", e.toString()); return null; } finally { transferManager.shutdownNow(); } }
/** * Uses the {@link TransferManager} to upload a file. * * @param objectToActOn The put request. * @return The object key in the bucket. */ @Override protected String asynchronousAction(PutObjectRequest objectToActOn) { String returnValue; try { Upload upload = s3TransferManager.upload(objectToActOn); returnValue = upload.waitForUploadResult().getKey(); } catch (InterruptedException exception) { Thread.currentThread().interrupt(); throw new UncheckedInterruptedException(exception); } API_LOG.info("Successfully wrote object {} to S3 bucket {}", returnValue, objectToActOn.getBucketName()); return returnValue; }
@Test public void testS3Persister() throws Exception { Upload upload = mock(Upload.class); when(upload.waitForUploadResult()).thenReturn(new UploadResult()); TransferManager transferManager = mock(TransferManager.class); when(transferManager.upload(anyString(), anyString(), any())).thenReturn(upload); S3Persister s3Persister = new S3Persister(transferManager, "foo"); s3Persister.saveMetrics("foo", "bar"); verify(transferManager, times(1)).upload(anyString(), anyString(), any()); verify(transferManager, times(1)).shutdownNow(); verifyNoMoreInteractions(transferManager); verify(upload, times(1)).waitForCompletion(); verifyNoMoreInteractions(upload); assertFalse(new File("foo").exists()); }
/** * Write a list of all of the state versions to S3. * @param newVersion */ private synchronized void updateSnapshotIndex(Long newVersion) { /// insert the new version into the list int idx = Collections.binarySearch(snapshotIndex, newVersion); int insertionPoint = Math.abs(idx) - 1; snapshotIndex.add(insertionPoint, newVersion); /// build a binary representation of the list -- gap encoded variable-length integers byte[] idxBytes = buidGapEncodedVarIntSnapshotIndex(); /// indicate the Content-Length ObjectMetadata metadata = new ObjectMetadata(); metadata.setHeader("Content-Length", (long)idxBytes.length); /// upload the new file content. try(InputStream is = new ByteArrayInputStream(idxBytes)) { Upload upload = s3TransferManager.upload(bucketName, getSnapshotIndexObjectName(blobNamespace), is, metadata); upload.waitForCompletion(); } catch(Exception e) { throw new RuntimeException(e); } }
/** * Tests if an object can be uploaded asynchronously * * @throws Exception not expected */ @Test public void shouldUploadInParallel() throws Exception { final File uploadFile = new File(UPLOAD_FILE_NAME); s3Client.createBucket(BUCKET_NAME); final TransferManager transferManager = createDefaultTransferManager(); final Upload upload = transferManager.upload(new PutObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadFile)); final UploadResult uploadResult = upload.waitForUploadResult(); assertThat(uploadResult.getKey(), equalTo(UPLOAD_FILE_NAME)); final S3Object getResult = s3Client.getObject(BUCKET_NAME, UPLOAD_FILE_NAME); assertThat(getResult.getKey(), equalTo(UPLOAD_FILE_NAME)); }
/** * Verify that range-downloads work. * * @throws Exception not expected */ @Test public void checkRangeDownloads() throws Exception { final File uploadFile = new File(UPLOAD_FILE_NAME); s3Client.createBucket(BUCKET_NAME); final TransferManager transferManager = createDefaultTransferManager(); final Upload upload = transferManager.upload(new PutObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadFile)); upload.waitForUploadResult(); final File downloadFile = File.createTempFile(UUID.randomUUID().toString(), null); transferManager .download(new GetObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME).withRange(1, 2), downloadFile) .waitForCompletion(); assertThat("Invalid file length", downloadFile.length(), is(2L)); transferManager .download(new GetObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME).withRange(0, 1000), downloadFile) .waitForCompletion(); assertThat("Invalid file length", downloadFile.length(), is(uploadFile.length())); }
public long putChunk(String localDataFile, String localIndexFile, TopicPartition tp) throws IOException { // Put data file then index, then finally update/create the last_index_file marker String dataFileKey = this.getChunkFileKey(localDataFile); String idxFileKey = this.getChunkFileKey(localIndexFile); // Read offset first since we'll delete the file after upload long nextOffset = getNextOffsetFromIndexFileContents(new FileReader(localIndexFile)); try { Upload upload = tm.upload(this.bucket, dataFileKey, new File(localDataFile)); upload.waitForCompletion(); upload = tm.upload(this.bucket, idxFileKey, new File(localIndexFile)); upload.waitForCompletion(); } catch (Exception e) { throw new IOException("Failed to upload to S3", e); } this.updateCursorFile(idxFileKey, tp); // Sanity check - return what the new nextOffset will be based on the index we just uploaded return nextOffset; }
public void testUpload() throws Exception { AmazonS3 s3Mock = mock(AmazonS3.class); TransferManager tmMock = mock(TransferManager.class); BlockGZIPFileWriter fileWriter = createDummmyFiles(0, 1000); S3Writer s3Writer = new S3Writer(testBucket, "pfx", s3Mock, tmMock); TopicPartition tp = new TopicPartition("bar", 0); Upload mockUpload = mock(Upload.class); when(tmMock.upload(eq(testBucket), eq(getKeyForFilename("pfx", "bar-00000-000000000000.gz")), isA(File.class))) .thenReturn(mockUpload); when(tmMock.upload(eq(testBucket), eq(getKeyForFilename("pfx", "bar-00000-000000000000.index.json")), isA(File.class))) .thenReturn(mockUpload); s3Writer.putChunk(fileWriter.getDataFilePath(), fileWriter.getIndexFilePath(), tp); verifyTMUpload(tmMock, new ExpectedRequestParams[]{ new ExpectedRequestParams(getKeyForFilename("pfx", "bar-00000-000000000000.gz"), testBucket), new ExpectedRequestParams(getKeyForFilename("pfx", "bar-00000-000000000000.index.json"), testBucket) }); // Verify it also wrote the index file key verifyStringPut(s3Mock, "pfx/last_chunk_index.bar-00000.txt", getKeyForFilename("pfx", "bar-00000-000000000000.index.json")); }
public void Upload(String key, InputStream input, long size) throws InterruptedException { ObjectMetadata meta = new ObjectMetadata(); if(SSE) meta.setServerSideEncryption(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); meta.setContentLength(size); Upload upload = tm.upload(existingBucketName, key, input, meta); try { // Or you can block and wait for the upload to finish upload.waitForCompletion(); Logger.DEBUG("Upload complete."); } catch (AmazonClientException amazonClientException) { Logger.DEBUG("Unable to upload file, upload was aborted."); Logger.EXCEPTION(amazonClientException); } }
private void uploadArtifactStream(IndexArtifact ia, StorageRequest sr) throws LocalStorageException { try { TransferManager tx = new TransferManager(client); ObjectMetadata om = new ObjectMetadata(); om.setContentLength(sr.getLength()); String key = getPath() + ia.getLocation() + "/" + sr.getFilename(); Upload myUpload = tx.upload(bucketName, key, sr.getNewStream(), om); myUpload.waitForCompletion(); } catch (Exception exc) { logger.error(exc.getLocalizedMessage()); throw new LocalStorageException(exc); } }
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) { final PutObjectRequest putObjectRequest = new PutObjectRequest( bucket, fileName, is, metadata ); final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName; Upload upload = transferManager.upload(putObjectRequest); upload.addProgressListener((ProgressListener) progressEvent -> { switch (progressEvent.getEventType()) { case TRANSFER_STARTED_EVENT: LOG.debug("Started uploading object {} into Amazon S3", object); break; case TRANSFER_COMPLETED_EVENT: LOG.debug("Completed uploading object {} into Amazon S3", object); break; case TRANSFER_FAILED_EVENT: LOG.debug("Failed uploading object {} into Amazon S3", object); break; default: break; } }); return upload; }
private Map<String, String> verifyMultiPartUpload(MultipleFileUpload uploadDirectory) throws AmazonClientException { Collection<? extends Upload> uploadResults = uploadDirectory.getSubTransfers(); Iterator<? extends Upload> iterator = uploadResults.iterator(); Map<String, String> fileModifyMap = new HashMap<String, String>(); while (iterator.hasNext()) { UploadResult uploadResult = null; try { uploadResult = iterator.next().waitForUploadResult(); } catch (Exception e) { LOGGER.error(e.getMessage()); throw new AmazonClientException(e.getMessage()); } if (uploadResult != null) { LOGGER.info(String.format("Multipart upload success for file " + uploadResult.getKey() + " to Amazon S3 bucket " + uploadResult.getBucketName())); } } return fileModifyMap; }
@Override public void store( final Movie movie ) throws MovieNotStoredException { final String key = movie.getMovieId().getMovieId(); logger.info( "Uploading {} to S3 key {}", movie, key ); final File movieFile = movie.getPath().toFile(); final PutObjectRequest putObjectRequest = new PutObjectRequest( S3_BUCKET_HOOD_ETS_SOURCE, key, movieFile ); final ProgressListener progressListener = new S3ProgressListener( key, movieFile.length() ); try { final Upload upload = this.transferManager.upload( putObjectRequest ); upload.addProgressListener( progressListener ); upload.waitForCompletion(); } catch ( AmazonClientException | InterruptedException e ) { this.transferManager.abortMultipartUploads( S3_BUCKET_HOOD_ETS_SOURCE, new Date() ); throw new MovieNotStoredException( movie, e ); } logger.info( "Upload complete." ); }
@Override public Integer call() throws Exception { TransferManager t = new TransferManager(amazonS3Client); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setUserMetadata(metadata); if(sse) { objectMetadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); } Upload u = t.upload(new PutObjectRequest(bucket, key, inputFile).withMetadata(objectMetadata)); // TODO this listener spews out garbage >100% on a retry, add a test to verify if (progressListener != null) { progressListener.withTransferProgress(new TransferProgressWrapper(u.getProgress())); u.addProgressListener(progressListener); } try { u.waitForCompletion(); } finally { t.shutdownNow(); } return 0; }
protected void transferFile(boolean deleteSource, String bucket, String filename, String localDirectory) { File source = new File(localDirectory + BaseESReducer.DIR_SEPARATOR + filename); Preconditions.checkArgument(source.exists(), "Could not find source file: " + source.getAbsolutePath()); logger.info("Transfering + " + source + " to " + bucket + " with key " + filename); FileInputStream fis; try { fis = new FileInputStream(source); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm("AES256"); objectMetadata.setContentLength(source.length()); Upload upload = tx.upload(bucket, filename, fis, objectMetadata); while(!upload.isDone()); Preconditions.checkState(upload.getState().equals(TransferState.Completed), "File " + filename + " failed to upload with state: " + upload.getState()); if(deleteSource) { source.delete(); } } catch (FileNotFoundException e) { // Exception should never be thrown because the precondition above has already validated existence of file logger.error("Filename could not be found " + filename, e); } }
@Test(expectedExceptions = RuntimeException.class) public void testPersistApplicationNullException() throws Exception { Upload upload = mock(Upload.class); when(upload.waitForUploadResult()).thenReturn(new UploadResult()); TransferManager transferManager = mock(TransferManager.class); when(transferManager.upload(anyString(), anyString(), any())).thenReturn(upload); S3Persister s3Persister = new S3Persister(transferManager, "foo"); try { s3Persister.saveMetrics("foo", null); } finally { assertFalse(new File("foo").exists()); } }
private void uploadFile(File file, String s3ObjectName, ObjectMetadata metadata) { try (InputStream is = new BufferedInputStream(new FileInputStream(file))) { Upload upload = s3TransferManager.upload(bucketName, s3ObjectName, is, metadata); upload.waitForCompletion(); } catch (Exception e) { throw new RuntimeException(e); } }
public ProgressableProgressListener(Upload upload, Progressable progress, FileSystem.Statistics statistics) { this.upload = upload; this.progress = progress; this.statistics = statistics; this.lastBytesTransferred = 0; }
/** * Stores a file in a previously created bucket. Downloads the file again and compares checksums * * @throws Exception if FileStreams can not be read */ @Test public void shouldUploadAndDownloadStream() throws Exception { s3Client.createBucket(BUCKET_NAME); final String resourceId = UUID.randomUUID().toString(); final byte[] resource = new byte[] {1, 2, 3, 4, 5}; final ByteArrayInputStream bais = new ByteArrayInputStream(resource); final ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(resource.length); final PutObjectRequest putObjectRequest = new PutObjectRequest(BUCKET_NAME, resourceId, bais, objectMetadata); final TransferManager tm = createDefaultTransferManager(); final Upload upload = tm.upload(putObjectRequest); upload.waitForUploadResult(); final S3Object s3Object = s3Client.getObject(BUCKET_NAME, resourceId); final String uploadHash = HashUtil.getDigest(new ByteArrayInputStream(resource)); final String downloadedHash = HashUtil.getDigest(s3Object.getObjectContent()); s3Object.close(); assertThat("Up- and downloaded Files should have equal Hashes", uploadHash, is(equalTo(downloadedHash))); }
/** * Verifies multipart copy. * * @throws InterruptedException */ @Test public void multipartCopy() throws InterruptedException, IOException, NoSuchAlgorithmException { final int contentLen = 3 * _1MB; final ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(contentLen); final String assumedSourceKey = UUID.randomUUID().toString(); final Bucket sourceBucket = s3Client.createBucket(UUID.randomUUID().toString()); final Bucket targetBucket = s3Client.createBucket(UUID.randomUUID().toString()); final TransferManager transferManager = createTransferManager(_2MB, _1MB, _2MB, _1MB); final InputStream sourceInputStream = randomInputStream(contentLen); final Upload upload = transferManager .upload(sourceBucket.getName(), assumedSourceKey, sourceInputStream, objectMetadata); final UploadResult uploadResult = upload.waitForUploadResult(); assertThat(uploadResult.getKey(), is(assumedSourceKey)); final String assumedDestinationKey = UUID.randomUUID().toString(); final Copy copy = transferManager.copy(sourceBucket.getName(), assumedSourceKey, targetBucket.getName(), assumedDestinationKey); final CopyResult copyResult = copy.waitForCopyResult(); assertThat(copyResult.getDestinationKey(), is(assumedDestinationKey)); final S3Object copiedObject = s3Client.getObject(targetBucket.getName(), assumedDestinationKey); assertThat("Hashes for source and target S3Object do not match.", HashUtil.getDigest(copiedObject.getObjectContent()) + "-1", is(uploadResult.getETag())); }
@Override public void store(File file) throws Exception { LogUtils.debug(LOG_TAG, "Uploading new file. Name: " + file.getName()); TransferManager tm = new TransferManager(new DefaultAWSCredentialsProviderChain()); // TransferManager processes all transfers asynchronously, // so this call will return immediately. Upload upload = tm.upload(bucketName, file.getName(), file); upload.waitForCompletion(); LogUtils.debug(LOG_TAG, "Successfully uploaded file to bucket.\nName: " + file.getName() + "\nBucket name: " + bucketName); tm.shutdownNow(); }
private void transferFileToS3(final String key) { final long fileSizeMb = file.length() / (1024 * 1024); getLogger().info("Uploading {} MB from file {} to {}", fileSizeMb, file, getS3Url()); final TransferManager transferManager = createTransferManager(); final Instant start = Instant.now(); final Upload upload = transferManager.upload(config.getDeploymentBucket(), key, file); try { upload.waitForCompletion(); getLogger().info("Uploaded {} to {} in {}", file, getS3Url(), Duration.between(start, Instant.now())); } catch (final InterruptedException e) { Thread.currentThread().interrupt(); throw new AssertionError("Upload interrupted", e); } }
public static void uploadFile(String file_path, String bucket_name, String key_prefix, boolean pause) { System.out.println("file: " + file_path + (pause ? " (pause)" : "")); String key_name = null; if (key_prefix != null) { key_name = key_prefix + '/' + file_path; } else { key_name = file_path; } File f = new File(file_path); TransferManager xfer_mgr = new TransferManager(); try { Upload xfer = xfer_mgr.upload(bucket_name, key_name, f); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
@Override public void run(final PublishLambdaCommand command) { final URL artifactUrl = command.getArtifactUrl(); final BaseOutputs outputParameters = configStore.getBaseStackOutputs(); final String configBucketName = outputParameters.getConfigBucketName(); if (StringUtils.isBlank(configBucketName)) { final String errorMessage = "The specified environment isn't configured properly!"; logger.error(errorMessage); throw new IllegalStateException(errorMessage); } initClient(configBucketName); final File filePath = downloadArtifact(artifactUrl); try { final Upload upload = transferManager.upload(configBucketName, command.getLambdaName().getBucketKey(), filePath); logger.info("Uploading lambda artifact."); upload.waitForCompletion(); logger.info("Uploading complete."); } catch (InterruptedException e) { logger.error("Interrupted while waiting for upload to complete!", e); } finally { transferManager.shutdownNow(false); } }
@GET @Path("/put") public String put() { Try<Upload, Throwable> operation = writer.put("hello", "world"); if(operation.isSuccess()) return "added"; return operation.failureGet().orElse(null).getMessage(); }
@Override public Upload uploadFileAsync(final String bucketName, final String fileName, final File fileObj, final CannedAccessControlList cannedAcl) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info("uploadObjectAsync invoked, bucketName: {} , fileName: {} and cannedAccessControlList: {}", bucketName, fileName, cannedAcl); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, fileName, fileObj).withCannedAcl(cannedAcl); final TransferManager transferMgr = new TransferManager(s3client); return transferMgr.upload(putObjectRequest); }
@Override public Upload uploadFileAsync(final String bucketName, final String fileName, final File fileObj, final boolean isPublicAccessible) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info("uploadObjectAsync invoked, bucketName: {} , fileName: {} and isPublicAccessible: {}", bucketName, fileName, isPublicAccessible); final PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, fileName, fileObj); if(isPublicAccessible){ putObjectRequest.setCannedAcl(CannedAccessControlList.PublicRead); } final TransferManager transferMgr = new TransferManager(s3client); return transferMgr.upload(putObjectRequest); }
/** * Test method for {@link com.github.abhinavmishra14.aws.s3.service.AwsS3IamService#uploadFileAsync(java.lang.String, java.lang.String, java.io.File)}. * * @throws Exception the exception */ @Test public void testUploadFileAsync() throws Exception{ awsS3IamService.createBucket(AWS_S3_BUCKET);//create bucket for test InputStream inStream = AwsS3IamServiceTest.class .getResourceAsStream("/sample-file/TestPutObject.txt"); File tempFile = AWSUtil.createTempFileFromStream(inStream); Upload upload = awsS3IamService.uploadFileAsync(AWS_S3_BUCKET, AWSUtilConstants.SAMPLE_FILE_NAME, tempFile); upload.waitForUploadResult(); assertEquals(true,upload.isDone()); }
/** * Test method for {@link com.github.abhinavmishra14.aws.s3.service.AwsS3IamService#uploadFileAsync(java.lang.String, java.lang.String, java.io.File,boolean)}. * * @throws Exception the exception */ @Test public void testUploadFileWithPublicAccessAsync() throws Exception{ awsS3IamService.createBucket(AWS_S3_BUCKET);//create bucket for test InputStream inStream = AwsS3IamServiceTest.class .getResourceAsStream("/sample-file/TestPutObject.txt"); File tempFile = AWSUtil.createTempFileFromStream(inStream); Upload upload = awsS3IamService.uploadFileAsync(AWS_S3_BUCKET, AWSUtilConstants.SAMPLE_FILE_NAME, tempFile,true); upload.waitForUploadResult(); assertEquals(true,upload.isDone()); }
/** * Test method for {@link com.github.abhinavmishra14.aws.s3.service.AwsS3IamService#uploadFileAsync(java.lang.String, java.lang.String, java.io.File,com.amazonaws.services.s3.model.CannedAccessControlList)}. * * @throws Exception the exception */ @Test public void testUploadFileWithCannedACLAsync() throws Exception{ awsS3IamService.createBucket(AWS_S3_BUCKET);//create bucket for test InputStream inStream = AwsS3IamServiceTest.class .getResourceAsStream("/sample-file/TestPutObject.txt"); File tempFile = AWSUtil.createTempFileFromStream(inStream); Upload upload = awsS3IamService.uploadFileAsync(AWS_S3_BUCKET, AWSUtilConstants.SAMPLE_FILE_NAME, tempFile,CannedAccessControlList.PublicRead); upload.waitForUploadResult(); assertEquals(true,upload.isDone()); }
public void uploadToS3(String s3bucket, String targetDirectory, File sourceDirectory) { // Recursively upload sourceDirectory to targetDirectory. FOR: for (final File file : sourceDirectory.listFiles()) { if (file.isDirectory()) { uploadToS3(s3bucket, targetDirectory + "/" + file.getName(), file); } else if (file.isFile()) { final String path = file.getAbsolutePath(); final long uploadStartTimeMs = System.currentTimeMillis(); final PutObjectRequest putRequest = new PutObjectRequest(s3bucket, targetDirectory + "/" + file.getName(), file) .withCannedAcl(CannedAccessControlList.PublicRead); final Upload upload = transferManager.upload(putRequest); int statusChecks = 0; System.out.println("Uploading " + path); while (!upload.isDone()) { if (uploadTimedOut(uploadStartTimeMs)) { System.err.format("Timed out uploading file to S3 (%s). Will skip file. Report might be incomplete.%n", path); continue FOR; } sleep(100); if (++statusChecks % 100 == 0) { System.out.format("Still uploading %s%n", file.getAbsolutePath()); } } try { upload.waitForCompletion(); } catch (Exception e) { System.out.format("Failed to upload to S3 %s/%s/%s%n", s3bucket, targetDirectory, file.getName()); e.printStackTrace(); } } } }
public UploadMetadata( Upload upload, String bucket, List<Record> records, List<EventRecord> events ) { this.upload = upload; this.bucket = bucket; this.records = records; this.events = events; }
@Override public F.Promise<Void> store(Path path, String key, String name) { Promise<Void> promise = Futures.promise(); TransferManager transferManager = new TransferManager(credentials); try { Upload upload = transferManager.upload(bucketName, key, path.toFile()); upload.addProgressListener((ProgressListener) progressEvent -> { if (progressEvent.getEventType().isTransferEvent()) { if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) { transferManager.shutdownNow(); promise.success(null); } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) { transferManager.shutdownNow(); logger.error(progressEvent.toString()); promise.failure(new Exception(progressEvent.toString())); } } }); } catch (AmazonServiceException ase) { logAmazonServiceException (ase); } catch (AmazonClientException ace) { logAmazonClientException(ace); } return F.Promise.wrap(promise.future()); }
@Override public void upload(String bucketName, String name, InputStream input, ObjectMetadata meta) throws IOException { final Upload myUpload = tx.upload(bucketName, name, input, meta); try { UploadResult uploadResult = myUpload.waitForUploadResult(); LOG.info("Upload completed, bucket={}, key={}", uploadResult.getBucketName(), uploadResult.getKey()); } catch (InterruptedException e) { throw new IOException(e); } }
public void uploadFile(File file, ObjectMetadata metadata, String bucketName, String fileUploadPath) throws AmazonClientException { LOGGER.info("Uploading file " + file.getAbsolutePath() + " to Amazon S3 bucket " + bucketName); try { metadata.setContentLength(file.length()); Upload upload = tm.upload(bucketName, fileUploadPath, new FileInputStream(file), metadata); upload.waitForCompletion(); } catch (Exception e) { LOGGER.info("File upload for " + file.getAbsolutePath() + " to Amazon S3 bucket " + bucketName + " failed"); throw new AmazonClientException(e.getMessage(), e); } LOGGER.info("File upload for " + file.getAbsolutePath() + " to Amazon S3 bucket " + bucketName + " completed successfully"); }
/** * Uploads a file to S3 and returns the s3 file key. The bucket that is used is configured the properties file via * s3.bucket * * @param s3Bucket the s3 bucket name * @param localFile the local file to be uploaded * @param s3FileKey the s3 file key that should be used * @return a 2-element array, where element 0 is the s3 bucket and element 1 is the s3 file key */ public String[] uploadFileToS3(String s3Bucket, final Path localFile, final String s3FileKey) throws IOException, InterruptedException { if (localFile == null) { throw new NullPointerException("localFile was null."); } if (isEmpty(s3FileKey)) { throw new NullPointerException("objectFileKey cannot be null"); } if (logger.isTraceEnabled()) { logger.trace(format("uploadFileToS3(%s)", localFile.getFileName().toString())); } AWSCredentials awsCredentials = AmazonAWSHelper.getCredentials(); TransferManager tx = new TransferManager(awsCredentials); ObjectMetadata metadata = new ObjectMetadata(); final String contentType = detectContentTypeFromFilename(s3FileKey); if (logger.isDebugEnabled()) { logger.debug(format("Setting contentType to '%s' in metadata for S3 object '%s'", contentType, s3FileKey)); } metadata.setContentType(contentType); Upload myUpload = tx.upload(s3Bucket, s3FileKey, Files.newInputStream(localFile), metadata); myUpload.waitForCompletion(); String[] retval = {s3Bucket, s3FileKey}; if (logger.isDebugEnabled()) { logger.debug(format("Upload to S3 was successful. bucket: '%s', file key: '%s'", s3Bucket, s3FileKey)); } return retval; }
public BufferedWriter createStreamWriter(String correlationID, String streamUri) throws IOException { String[] split = streamUri.split("://", 2); String protocol = split[0]; String path = split[1]; if (Constants.FILE.equals(protocol)) { return new BufferedWriter(new FileWriter(path)); } else if (Constants.s3.equals(protocol)) { String[] split1 = path.split("/", 2); final String bucketName = split1[0]; final String objectKey = split1[1]; String tempFilePath = tempDirectoryPath + "/" + correlationID; final File tempFile = new File(tempFilePath); return new BufferedWriterTaskOnClose(new FileWriter(tempFile), new Task() { @Override public void run() throws InterruptedException { if (!offlineMode) { Upload upload = transferManager.upload(bucketName, objectKey, tempFile); upload.waitForUploadResult(); tempFile.delete(); } } }); } else { throw new NotImplementedException("Unrecognised stream URI protocol: " + protocol); } }