public static void uploadDir(String dir_path, String bucket_name, String key_prefix, boolean recursive, boolean pause) { System.out.println("directory: " + dir_path + (recursive ? " (recursive)" : "") + (pause ? " (pause)" : "")); TransferManager xfer_mgr = new TransferManager(); try { MultipleFileUpload xfer = xfer_mgr.uploadDirectory(bucket_name, key_prefix, new File(dir_path), recursive); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
public static void uploadDirWithSubprogress(String dir_path, String bucket_name, String key_prefix, boolean recursive, boolean pause) { System.out.println("directory: " + dir_path + (recursive ? " (recursive)" : "") + (pause ? " (pause)" : "")); TransferManager xfer_mgr = new TransferManager(); try { MultipleFileUpload multi_upload = xfer_mgr.uploadDirectory( bucket_name, key_prefix, new File(dir_path), recursive); // loop with Transfer.isDone() XferMgrProgress.showMultiUploadProgress(multi_upload); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(multi_upload); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
private String uploadToS3(String bucket, String key, MultipartFile file) { final AmazonS3 s3 = s3ClientFactory.createClient(); final TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build(); try { ObjectMetadata metadata = new ObjectMetadata(); metadata.setContentLength(file.getSize()); metadata.setContentType(file.getContentType()); byte[] resultByte = DigestUtils.md5(file.getBytes()); String streamMD5 = new String(Base64.encodeBase64(resultByte)); metadata.setContentMD5(streamMD5); Upload upload = transferManager.upload(bucket, key, file.getInputStream(), metadata); upload.waitForCompletion(); return streamMD5; } catch (AmazonServiceException | InterruptedException | IOException e) { logger.error("Error uploading file: {}", e.toString()); return null; } finally { transferManager.shutdownNow(); } }
/** * Gets an Amazon transfer manager. * * @return a transfer manager */ public TransferManager getTransferManager() { if (transferManager == null) { transferManager = TransferManagerBuilder.standard().withS3Client(getAmazonS3Client()).build(); } return transferManager; }
@Test public void copyCheckTransferManagerIsShutdown() throws Exception { client.putObject("source", "data", inputData); Path sourceBaseLocation = new Path("s3://source/"); Path replicaLocation = new Path("s3://target/"); List<Path> sourceSubLocations = new ArrayList<>(); TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class); TransferManager mockedTransferManager = Mockito.mock(TransferManager.class); when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(s3S3CopierOptions))) .thenReturn(mockedTransferManager); Copy copy = Mockito.mock(Copy.class); when(mockedTransferManager.copy(any(CopyObjectRequest.class), any(AmazonS3.class), any(TransferStateChangeListener.class))).thenReturn(copy); TransferProgress transferProgress = new TransferProgress(); when(copy.getProgress()).thenReturn(transferProgress); S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory, mockedTransferManagerFactory, listObjectsRequestFactory, registry, s3S3CopierOptions); s3s3Copier.copy(); verify(mockedTransferManager).shutdownNow(); }
@Test public void copyCheckTransferManagerIsShutdownWhenSubmittingJobExceptionsAreThrown() throws Exception { client.putObject("source", "data", inputData); Path sourceBaseLocation = new Path("s3://source/"); Path replicaLocation = new Path("s3://target/"); List<Path> sourceSubLocations = new ArrayList<>(); TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class); TransferManager mockedTransferManager = Mockito.mock(TransferManager.class); when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(s3S3CopierOptions))) .thenReturn(mockedTransferManager); when(mockedTransferManager.copy(any(CopyObjectRequest.class), any(AmazonS3.class), any(TransferStateChangeListener.class))).thenThrow(new AmazonServiceException("MyCause")); S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory, mockedTransferManagerFactory, listObjectsRequestFactory, registry, s3S3CopierOptions); try { s3s3Copier.copy(); fail("exception should have been thrown"); } catch (CircusTrainException e) { verify(mockedTransferManager).shutdownNow(); assertThat(e.getCause().getMessage(), startsWith("MyCause")); } }
@Test public void copyCheckTransferManagerIsShutdownWhenCopyExceptionsAreThrown() throws Exception { client.putObject("source", "data", inputData); Path sourceBaseLocation = new Path("s3://source/"); Path replicaLocation = new Path("s3://target/"); List<Path> sourceSubLocations = new ArrayList<>(); TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class); TransferManager mockedTransferManager = Mockito.mock(TransferManager.class); when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(s3S3CopierOptions))) .thenReturn(mockedTransferManager); Copy copy = Mockito.mock(Copy.class); when(copy.getProgress()).thenReturn(new TransferProgress()); when(mockedTransferManager.copy(any(CopyObjectRequest.class), any(AmazonS3.class), any(TransferStateChangeListener.class))).thenReturn(copy); doThrow(new AmazonClientException("cause")).when(copy).waitForCompletion(); S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory, mockedTransferManagerFactory, listObjectsRequestFactory, registry, s3S3CopierOptions); try { s3s3Copier.copy(); fail("exception should have been thrown"); } catch (CircusTrainException e) { verify(mockedTransferManager).shutdownNow(); assertThat(e.getCause().getMessage(), is("cause")); } }
@Test public void testS3Persister() throws Exception { Upload upload = mock(Upload.class); when(upload.waitForUploadResult()).thenReturn(new UploadResult()); TransferManager transferManager = mock(TransferManager.class); when(transferManager.upload(anyString(), anyString(), any())).thenReturn(upload); S3Persister s3Persister = new S3Persister(transferManager, "foo"); s3Persister.saveMetrics("foo", "bar"); verify(transferManager, times(1)).upload(anyString(), anyString(), any()); verify(transferManager, times(1)).shutdownNow(); verifyNoMoreInteractions(transferManager); verify(upload, times(1)).waitForCompletion(); verifyNoMoreInteractions(upload); assertFalse(new File("foo").exists()); }
@Override @Guarded(by = STARTED) public Blob create(final InputStream blobData, final Map<String, String> headers) { checkNotNull(blobData); return create(headers, destination -> { try (InputStream data = blobData) { MetricsInputStream input = new MetricsInputStream(data); TransferManager transferManager = new TransferManager(s3); transferManager.upload(getConfiguredBucket(), destination, input, new ObjectMetadata()) .waitForCompletion(); return input.getMetrics(); } catch (InterruptedException e) { throw new BlobStoreException("error uploading blob", e, null); } }); }
/** * Constructs a new watcher for copy operation, and then immediately submits * it to the thread pool. * * @param manager * The {@link TransferManager} that owns this copy request. * @param threadPool * The {@link ExecutorService} to which we should submit new * tasks. * @param multipartCopyCallable * The callable responsible for processing the copy * asynchronously * @param copyObjectRequest * The original CopyObject request */ public static CopyMonitor create( TransferManager manager, CopyImpl transfer, ExecutorService threadPool, CopyCallable multipartCopyCallable, CopyObjectRequest copyObjectRequest, ProgressListenerChain progressListenerChain) { CopyMonitor copyMonitor = new CopyMonitor(manager, transfer, threadPool, multipartCopyCallable, copyObjectRequest, progressListenerChain); Future<CopyResult> thisFuture = threadPool.submit(copyMonitor); // Use an atomic compareAndSet to prevent a possible race between the // setting of the CopyMonitor's futureReference, and setting the // CompleteMultipartCopy's futureReference within the call() method. // We only want to set the futureReference to CopyMonitor's futureReference if the // current value is null, otherwise the futureReference that's set is // CompleteMultipartCopy's which is ultimately what we want. copyMonitor.futureReference.compareAndSet(null, thisFuture); return copyMonitor; }
/** * Constructs a new upload watcher and then immediately submits it to * the thread pool. * * @param manager * The {@link TransferManager} that owns this upload. * @param transfer * The transfer being processed. * @param threadPool * The {@link ExecutorService} to which we should submit new * tasks. * @param multipartUploadCallable * The callable responsible for processing the upload * asynchronously * @param putObjectRequest * The original putObject request * @param progressListenerChain * A chain of listeners that wish to be notified of upload * progress */ public static UploadMonitor create( TransferManager manager, UploadImpl transfer, ExecutorService threadPool, UploadCallable multipartUploadCallable, PutObjectRequest putObjectRequest, ProgressListenerChain progressListenerChain) { UploadMonitor uploadMonitor = new UploadMonitor(manager, transfer, threadPool, multipartUploadCallable, putObjectRequest, progressListenerChain); Future<UploadResult> thisFuture = threadPool.submit(uploadMonitor); // Use an atomic compareAndSet to prevent a possible race between the // setting of the UploadMonitor's futureReference, and setting the // CompleteMultipartUpload's futureReference within the call() method. // We only want to set the futureReference to UploadMonitor's futureReference if the // current value is null, otherwise the futureReference that's set is // CompleteMultipartUpload's which is ultimately what we want. uploadMonitor.futureReference.compareAndSet(null, thisFuture); return uploadMonitor; }
/** * Tests if an object can be uploaded asynchronously * * @throws Exception not expected */ @Test public void shouldUploadInParallel() throws Exception { final File uploadFile = new File(UPLOAD_FILE_NAME); s3Client.createBucket(BUCKET_NAME); final TransferManager transferManager = createDefaultTransferManager(); final Upload upload = transferManager.upload(new PutObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadFile)); final UploadResult uploadResult = upload.waitForUploadResult(); assertThat(uploadResult.getKey(), equalTo(UPLOAD_FILE_NAME)); final S3Object getResult = s3Client.getObject(BUCKET_NAME, UPLOAD_FILE_NAME); assertThat(getResult.getKey(), equalTo(UPLOAD_FILE_NAME)); }
/** * Verify that range-downloads work. * * @throws Exception not expected */ @Test public void checkRangeDownloads() throws Exception { final File uploadFile = new File(UPLOAD_FILE_NAME); s3Client.createBucket(BUCKET_NAME); final TransferManager transferManager = createDefaultTransferManager(); final Upload upload = transferManager.upload(new PutObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadFile)); upload.waitForUploadResult(); final File downloadFile = File.createTempFile(UUID.randomUUID().toString(), null); transferManager .download(new GetObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME).withRange(1, 2), downloadFile) .waitForCompletion(); assertThat("Invalid file length", downloadFile.length(), is(2L)); transferManager .download(new GetObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME).withRange(0, 1000), downloadFile) .waitForCompletion(); assertThat("Invalid file length", downloadFile.length(), is(uploadFile.length())); }
private TransferManager createTransferManager(final long multipartUploadThreshold, final long multipartUploadPartSize, final long multipartCopyThreshold, final long multipartCopyPartSize) { final ThreadFactory threadFactory = new ThreadFactory() { private int threadCount = 1; @Override public Thread newThread(final Runnable r) { final Thread thread = new Thread(r); thread.setName("s3-transfer-" + threadCount++); return thread; } }; return TransferManagerBuilder.standard() .withS3Client(s3Client) .withExecutorFactory(() -> Executors.newFixedThreadPool(THREAD_COUNT, threadFactory)) .withMultipartUploadThreshold(multipartUploadThreshold) .withMinimumUploadPartSize(multipartUploadPartSize) .withMultipartCopyPartSize(multipartCopyPartSize) .withMultipartCopyThreshold(multipartCopyThreshold) .build(); }
@Override public String retrieve(String file) throws Exception { LogUtils.debug(LOG_TAG, "Downloading file: " + file); TransferManager tm = new TransferManager(new DefaultAWSCredentialsProviderChain()); // TransferManager processes all transfers asynchronously, // so this call will return immediately. File downloadedFile = new File(Constants.MCSFS_WORKING_DIR + Constants.S3_WORKING_DIR + file + System.currentTimeMillis()); downloadedFile.getParentFile().mkdirs(); downloadedFile.createNewFile(); Download download = tm.download(bucketName, file, downloadedFile); download.waitForCompletion(); LogUtils.debug(LOG_TAG, "Successfully downloaded file from bucket.\nName: " + file + "\nBucket name: " + bucketName); tm.shutdownNow(); return downloadedFile.getAbsolutePath(); }
/** * Constructor for an output stream of an object in COS * * @param bucketName the bucket the object resides in * @param key the key of the object to read * @param client the COS client to use for operations * @param contentType the content type written to the output stream * @param metadata the object`s metadata * @param transfersT TransferManager * @param fsT COSAPIClient * * @throws IOException if error */ public COSOutputStream(String bucketName, String key, AmazonS3 client, String contentType, Map<String, String> metadata, TransferManager transfersT, COSAPIClient fsT) throws IOException { mBucketName = bucketName; transfers = transfersT; fs = fsT; // Remove the bucket name prefix from key path if (key.startsWith(bucketName + "/")) { mKey = key.substring(bucketName.length() + 1); } else { mKey = key; } mContentType = contentType; mMetadata = metadata; try { String tmpPrefix = (key.replaceAll("/", "-")).replaceAll(":", "-"); mBackupFile = fs.createTmpFileForWrite("output-" + tmpPrefix); LOG.trace("OutputStream for key '{}' writing to tempfile: {}", key, mBackupFile); mBackupOutputStream = new BufferedOutputStream(new FileOutputStream(mBackupFile), 32768); } catch (IOException e) { LOG.error(e.getMessage()); throw e; } }
public TransferManager getOrCreateS3TransferManager( AuthCredentialsServiceState credentials, String regionId, StatelessService service, Consumer<Throwable> failConsumer) { if (this.awsClientType != AwsClientType.S3_TRANSFER_MANAGER) { throw new UnsupportedOperationException( "This client manager supports only AWS " + this.awsClientType + " clients."); } String cacheKey = createCredentialRegionCacheKey(credentials, regionId); try { return this.s3TransferManagerCache.computeIfAbsent(cacheKey, key -> AWSUtils .getS3TransferManager(credentials, regionId, getExecutor())); } catch (Throwable t) { service.logSevere(t); failConsumer.accept(t); return null; } }
private void downloadFile(TransferManager tx, String bucketName, String sourcePrefixKey, String destinationFile) throws Exception{ try { final File snapshotFile = new File(destinationFile); // Only create parent directory once, if it doesn't exist. final File parentDir = new File(snapshotFile.getParent()); if (!parentDir.isDirectory()) { final boolean parentDirCreated = parentDir.mkdirs(); if (!parentDirCreated) { LOGGER.error( "Error creating parent directory for file: {}. Skipping to next", destinationFile); return; } } snapshotFile.createNewFile(); final Download download = tx.download(bucketName, sourcePrefixKey, snapshotFile); download.waitForCompletion(); } catch (Exception e) { LOGGER.error("Error downloading the file {} : {}", destinationFile, e); throw new Exception(e); } }
public static void downloadDir(String bucket_name, String key_prefix, String dir_path, boolean pause) { System.out.println("downloading to directory: " + dir_path + (pause ? " (pause)" : "")); TransferManager xfer_mgr = new TransferManager(); try { MultipleFileDownload xfer = xfer_mgr.downloadDirectory( bucket_name, key_prefix, new File(dir_path)); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
public static void downloadFile(String bucket_name, String key_name, String file_path, boolean pause) { System.out.println("Downloading to file: " + file_path + (pause ? " (pause)" : "")); File f = new File(file_path); TransferManager xfer_mgr = new TransferManager(); try { Download xfer = xfer_mgr.download(bucket_name, key_name, f); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
public static void copyObjectSimple(String from_bucket, String from_key, String to_bucket, String to_key) { System.out.println("Copying s3 object: " + from_key); System.out.println(" from bucket: " + from_bucket); System.out.println(" to s3 object: " + to_bucket); System.out.println(" in bucket: " + to_key); TransferManager xfer_mgr = new TransferManager(); try { Copy xfer = xfer_mgr.copy(from_bucket, from_key, to_bucket, to_key); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
public static void uploadFileList(String[] file_paths, String bucket_name, String key_prefix, boolean pause) { System.out.println("file list: " + Arrays.toString(file_paths) + (pause ? " (pause)" : "")); // convert the file paths to a list of File objects (required by the // uploadFileList method) ArrayList<File> files = new ArrayList<File>(); for (String path : file_paths) { files.add(new File(path)); } TransferManager xfer_mgr = new TransferManager(); try { MultipleFileUpload xfer = xfer_mgr.uploadFileList(bucket_name, key_prefix, new File("."), files); // loop with Transfer.isDone() XferMgrProgress.showTransferProgress(xfer); // or block with Transfer.waitForCompletion() XferMgrProgress.waitForCompletion(xfer); } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } xfer_mgr.shutdownNow(); }
public void testUpload() throws Exception { AmazonS3 s3Mock = mock(AmazonS3.class); TransferManager tmMock = mock(TransferManager.class); BlockGZIPFileWriter fileWriter = createDummmyFiles(0, 1000); S3Writer s3Writer = new S3Writer(testBucket, "pfx", s3Mock, tmMock); TopicPartition tp = new TopicPartition("bar", 0); Upload mockUpload = mock(Upload.class); when(tmMock.upload(eq(testBucket), eq(getKeyForFilename("pfx", "bar-00000-000000000000.gz")), isA(File.class))) .thenReturn(mockUpload); when(tmMock.upload(eq(testBucket), eq(getKeyForFilename("pfx", "bar-00000-000000000000.index.json")), isA(File.class))) .thenReturn(mockUpload); s3Writer.putChunk(fileWriter.getDataFilePath(), fileWriter.getIndexFilePath(), tp); verifyTMUpload(tmMock, new ExpectedRequestParams[]{ new ExpectedRequestParams(getKeyForFilename("pfx", "bar-00000-000000000000.gz"), testBucket), new ExpectedRequestParams(getKeyForFilename("pfx", "bar-00000-000000000000.index.json"), testBucket) }); // Verify it also wrote the index file key verifyStringPut(s3Mock, "pfx/last_chunk_index.bar-00000.txt", getKeyForFilename("pfx", "bar-00000-000000000000.index.json")); }
private void uploadArtifactStream(IndexArtifact ia, StorageRequest sr) throws LocalStorageException { try { TransferManager tx = new TransferManager(client); ObjectMetadata om = new ObjectMetadata(); om.setContentLength(sr.getLength()); String key = getPath() + ia.getLocation() + "/" + sr.getFilename(); Upload myUpload = tx.upload(bucketName, key, sr.getNewStream(), om); myUpload.waitForCompletion(); } catch (Exception exc) { logger.error(exc.getLocalizedMessage()); throw new LocalStorageException(exc); } }
@Override @Guarded(by = STARTED) public Blob create(final InputStream blobData, final Map<String, String> headers) { checkNotNull(blobData); return create(headers, destination -> { try (InputStream data = blobData) { MetricsInputStream input = new MetricsInputStream(data); TransferManager transferManager = TransferManagerBuilder.standard().withS3Client(s3).build(); transferManager.upload(getConfiguredBucket(), destination, input, new ObjectMetadata()) .waitForCompletion(); return input.getMetrics(); } catch (InterruptedException e) { throw new BlobStoreException("error uploading blob", e, null); } }); }
@Override public S3FileTransferResultsDto downloadFile(final S3FileTransferRequestParamsDto params) throws InterruptedException { LOGGER.info("Downloading S3 file... s3Key=\"{}\" s3BucketName=\"{}\" localPath=\"{}\"", params.getS3KeyPrefix(), params.getS3BucketName(), params.getLocalPath()); // Perform the transfer. S3FileTransferResultsDto results = performTransfer(params, new Transferer() { @Override public Transfer performTransfer(TransferManager transferManager) { return s3Operations.download(params.getS3BucketName(), params.getS3KeyPrefix(), new File(params.getLocalPath()), transferManager); } }); LOGGER .info("Downloaded S3 file to the local system. s3Key=\"{}\" s3BucketName=\"{}\" localPath=\"{}\" totalBytesTransferred={} transferDuration=\"{}\"", params.getS3KeyPrefix(), params.getS3BucketName(), params.getLocalPath(), results.getTotalBytesTransferred(), HerdDateUtils.formatDuration(results.getDurationMillis())); logOverallTransferRate(results); return results; }
/** * Gets a transfer manager with the specified parameters including proxy host, proxy port, S3 access key, S3 secret key, and max threads. * * @param params the parameters. * * @return a newly created transfer manager. */ private TransferManager getTransferManager(final S3FileTransferRequestParamsDto params) { // We are returning a new transfer manager each time it is called. Although the Javadocs of TransferManager say to share a single instance // if possible, this could potentially be a problem if TransferManager.shutdown(true) is called and underlying resources are not present when needed // for subsequent transfers. if (params.getMaxThreads() == null) { // Create a transfer manager that will internally use an appropriate number of threads. return new TransferManager(getAmazonS3(params)); } else { // Create a transfer manager with our own executor configured with the specified total threads. LOGGER.info("Creating a transfer manager. fixedThreadPoolSize={}", params.getMaxThreads()); return new TransferManager(getAmazonS3(params), Executors.newFixedThreadPool(params.getMaxThreads())); } }
private static TransferManager createManager() { AWSCredentials credentials = new AWSCredentials() { @Override public String getAWSAccessKeyId() { return System.getProperty("s3.accessKey"); } @Override public String getAWSSecretKey() { return System.getProperty("s3.secretKey"); } }; return new TransferManager( credentials); }
@Test public void runAppAndBasicTest() { AmazonS3Client s3client = server.getSpringContext() .getBean(AmazonS3Client.class); assertThat(s3client != null, is(true)); S3Configuration s3Configuration = server.getSpringContext() .getBean(S3Configuration.class); assertThat(s3Configuration.getAccessKey(), is("")); assertThat(s3Configuration.getSecretKey(), is("")); assertThat(s3Configuration.getSessionToken() == null, is(true)); assertThat(s3Configuration.getRegion() == null, is(true)); assertThat(s3Configuration.getUploadThreads(), is(5)); assertThat(s3Configuration.getUploadThreadNamePrefix(), is("s3-transfer-manager-worker-")); S3Utils s3Utils = server.getSpringContext() .getBean(S3Utils.class); assertThat(s3Utils != null, is(true)); TransferManager tm = server.getSpringContext() .getBean(TransferManager.class); assertThat(tm != null, is(true)); }
@Test public void getInputStreamSupplier() throws AmazonServiceException, AmazonClientException, InterruptedException, IOException { TransferManager transferManager = mock(TransferManager.class); Download download = mock(Download.class); when(transferManager.download(anyString(), anyString(), any())).thenReturn(download); File file = Files.createTempFile("micro-s3", "test") .toFile(); Assert.assertTrue(file.exists()); ReadUtils utils = new ReadUtils( transferManager, "test"); utils.getInputStream("", "", () -> file); Assert.assertFalse(file.exists()); }
@Override public Transfer uploadDirectoryOrFile(final String bucketName, final File source, final String virtualDirectoryKeyPrefix) throws AmazonClientException, AmazonServiceException, IOException { LOGGER.info("uploadDirectoryOrFile invoked, bucketName: {} , Source: {}", bucketName, source.getAbsolutePath()); Transfer transfer = null; final TransferManager trMgr = new TransferManager(s3client); if (source.isFile()) { transfer = trMgr.upload(bucketName,source.getPath(),source); } else if (source.isDirectory()) { //Upload recursively //virtualDirectoryKeyPrefix could be virtual directory name inside the bucket transfer = trMgr.uploadDirectory(bucketName, virtualDirectoryKeyPrefix, source, true); } else { throw new FileNotFoundException("Source is neither a regular file nor a directory " + source); } return transfer; }
private static void createBucket(TransferManager tm, String bucketName) throws AmazonClientException { boolean existsBucket = false; for (Bucket bucket : tm.getAmazonS3Client().listBuckets()) { if (bucket.getName().equals(bucketName)) { existsBucket = true; } } if (!existsBucket) { LOGGER.info("Creating Amazon S3 bucket " + bucketName); try { tm.getAmazonS3Client().createBucket(bucketName); } catch (AmazonClientException e) { LOGGER.error("Amazon S3 bucket creation for bucket " + bucketName + " failed"); throw new AmazonClientException(e.getMessage()); } LOGGER.info("Amazon S3 bucket creation for bucket " + bucketName + " completed successfully"); } }
@Override public Integer call() throws Exception { TransferManager t = new TransferManager(amazonS3Client); ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setUserMetadata(metadata); if(sse) { objectMetadata.setSSEAlgorithm(SSEAlgorithm.AES256.getAlgorithm()); } Upload u = t.upload(new PutObjectRequest(bucket, key, inputFile).withMetadata(objectMetadata)); // TODO this listener spews out garbage >100% on a retry, add a test to verify if (progressListener != null) { progressListener.withTransferProgress(new TransferProgressWrapper(u.getProgress())); u.addProgressListener(progressListener); } try { u.waitForCompletion(); } finally { t.shutdownNow(); } return 0; }
/** * Method to download file from s3 to local file system * @param bucketName AWS S3 bucket name * @param key (example: android/apkFolder/ApkName.apk) * @param file (local file name) * @param pollingInterval (polling interval in sec for S3 download status determination) */ public void download(final String bucketName, final String key, final File file, long pollingInterval) { LOGGER.info("App will be downloaded from s3."); LOGGER.info(String.format("[Bucket name: %s] [Key: %s] [File: %s]", bucketName, key, file.getAbsolutePath())); DefaultAWSCredentialsProviderChain credentialProviderChain = new DefaultAWSCredentialsProviderChain(); TransferManager tx = new TransferManager( credentialProviderChain.getCredentials()); Download appDownload = tx.download(bucketName, key, file); try { LOGGER.info("Transfer: " + appDownload.getDescription()); LOGGER.info(" State: " + appDownload.getState()); LOGGER.info(" Progress: "); // You can poll your transfer's status to check its progress while (!appDownload.isDone()) { LOGGER.info(" transferred: " +(int) (appDownload.getProgress().getPercentTransferred() + 0.5) + "%" ); CommonUtils.pause(pollingInterval); } LOGGER.info(" State: " + appDownload.getState()); //appDownload.waitForCompletion(); } catch (AmazonClientException e) { throw new RuntimeException("File wasn't downloaded from s3. See log: ".concat(e.getMessage())); } //tx.shutdownNow(); }
public TransferManager newInstance(AmazonS3 targetS3Client, S3S3CopierOptions s3s3CopierOptions) { return TransferManagerBuilder .standard() .withMultipartCopyThreshold(s3s3CopierOptions.getMultipartCopyThreshold()) .withMultipartCopyPartSize(s3s3CopierOptions.getMultipartCopyPartSize()) .withS3Client(targetS3Client) .build(); }
@Test(expectedExceptions = RuntimeException.class) public void testPersistApplicationDataException() throws Exception { TransferManager transferManager = mock(TransferManager.class); when(transferManager.upload(anyString(), anyString(), any())) .thenThrow(new AmazonServiceException("")); S3Persister s3Persister = new S3Persister(transferManager, "foo"); try { s3Persister.saveMetrics("foo", "bar"); } finally { assertFalse(new File("foo").exists()); } }
@Test(expectedExceptions = RuntimeException.class) public void testPersistApplicationNullException() throws Exception { Upload upload = mock(Upload.class); when(upload.waitForUploadResult()).thenReturn(new UploadResult()); TransferManager transferManager = mock(TransferManager.class); when(transferManager.upload(anyString(), anyString(), any())).thenReturn(upload); S3Persister s3Persister = new S3Persister(transferManager, "foo"); try { s3Persister.saveMetrics("foo", null); } finally { assertFalse(new File("foo").exists()); } }
public S3Publisher(AWSCredentials credentials, String bucketName, String blobNamespace) { this.s3 = new AmazonS3Client(credentials); this.s3TransferManager = new TransferManager(s3); this.bucketName = bucketName; this.blobNamespace = blobNamespace; this.snapshotIndex = initializeSnapshotIndex(); }
public CopyCallable(TransferManager transferManager, ExecutorService threadPool, CopyImpl copy, CopyObjectRequest copyObjectRequest, ObjectMetadata metadata, ProgressListenerChain progressListenerChain) { this.s3 = transferManager.getAmazonS3Client(); this.configuration = transferManager.getConfiguration(); this.threadPool = threadPool; this.copyObjectRequest = copyObjectRequest; this.metadata = metadata; this.listenerChain = progressListenerChain; this.copy = copy; }