@Test public void testS3Persister() throws Exception { Upload upload = mock(Upload.class); when(upload.waitForUploadResult()).thenReturn(new UploadResult()); TransferManager transferManager = mock(TransferManager.class); when(transferManager.upload(anyString(), anyString(), any())).thenReturn(upload); S3Persister s3Persister = new S3Persister(transferManager, "foo"); s3Persister.saveMetrics("foo", "bar"); verify(transferManager, times(1)).upload(anyString(), anyString(), any()); verify(transferManager, times(1)).shutdownNow(); verifyNoMoreInteractions(transferManager); verify(upload, times(1)).waitForCompletion(); verifyNoMoreInteractions(upload); assertFalse(new File("foo").exists()); }
/** * Constructs a new upload watcher and then immediately submits it to * the thread pool. * * @param manager * The {@link TransferManager} that owns this upload. * @param transfer * The transfer being processed. * @param threadPool * The {@link ExecutorService} to which we should submit new * tasks. * @param multipartUploadCallable * The callable responsible for processing the upload * asynchronously * @param putObjectRequest * The original putObject request * @param progressListenerChain * A chain of listeners that wish to be notified of upload * progress */ public static UploadMonitor create( TransferManager manager, UploadImpl transfer, ExecutorService threadPool, UploadCallable multipartUploadCallable, PutObjectRequest putObjectRequest, ProgressListenerChain progressListenerChain) { UploadMonitor uploadMonitor = new UploadMonitor(manager, transfer, threadPool, multipartUploadCallable, putObjectRequest, progressListenerChain); Future<UploadResult> thisFuture = threadPool.submit(uploadMonitor); // Use an atomic compareAndSet to prevent a possible race between the // setting of the UploadMonitor's futureReference, and setting the // CompleteMultipartUpload's futureReference within the call() method. // We only want to set the futureReference to UploadMonitor's futureReference if the // current value is null, otherwise the futureReference that's set is // CompleteMultipartUpload's which is ultimately what we want. uploadMonitor.futureReference.compareAndSet(null, thisFuture); return uploadMonitor; }
/** * Tests if an object can be uploaded asynchronously * * @throws Exception not expected */ @Test public void shouldUploadInParallel() throws Exception { final File uploadFile = new File(UPLOAD_FILE_NAME); s3Client.createBucket(BUCKET_NAME); final TransferManager transferManager = createDefaultTransferManager(); final Upload upload = transferManager.upload(new PutObjectRequest(BUCKET_NAME, UPLOAD_FILE_NAME, uploadFile)); final UploadResult uploadResult = upload.waitForUploadResult(); assertThat(uploadResult.getKey(), equalTo(UPLOAD_FILE_NAME)); final S3Object getResult = s3Client.getObject(BUCKET_NAME, UPLOAD_FILE_NAME); assertThat(getResult.getKey(), equalTo(UPLOAD_FILE_NAME)); }
@Test @Ignore @Repeat(times = 1, threads = 4) public void upload() { S3ObjectWriter writerWithoutEncryption = buildWriterWithEncryption(false); long startNE = System.currentTimeMillis(); Try<UploadResult, Throwable> uploadWithoutEncryption = writerWithoutEncryption.putSync("uploadWithoutEncryption" + r.nextLong(), nullableFile.get()); long endNE = System.currentTimeMillis(); assertTrue(uploadWithoutEncryption.isSuccess()); unencryptedHist.update(endNE - startNE); S3ObjectWriter writerWithEncryption = buildWriterWithEncryption(true); long startWE = System.currentTimeMillis(); Try<UploadResult, Throwable> uploadWithEncryption = writerWithEncryption.putSync("uploadWithEncryption" + r.nextLong(), nullableFile.get()); assertTrue(uploadWithEncryption.isSuccess()); long endWE = System.currentTimeMillis(); aes256Hist.update(endWE - startWE); }
private Map<String, String> verifyMultiPartUpload(MultipleFileUpload uploadDirectory) throws AmazonClientException { Collection<? extends Upload> uploadResults = uploadDirectory.getSubTransfers(); Iterator<? extends Upload> iterator = uploadResults.iterator(); Map<String, String> fileModifyMap = new HashMap<String, String>(); while (iterator.hasNext()) { UploadResult uploadResult = null; try { uploadResult = iterator.next().waitForUploadResult(); } catch (Exception e) { LOGGER.error(e.getMessage()); throw new AmazonClientException(e.getMessage()); } if (uploadResult != null) { LOGGER.info(String.format("Multipart upload success for file " + uploadResult.getKey() + " to Amazon S3 bucket " + uploadResult.getBucketName())); } } return fileModifyMap; }
@Override public void put(String uri, File f) throws AmazonClientException { LOG.trace("Uploading " + uri); String[] parts = pieces(uri); ObjectMetadata om = new ObjectMetadata(); om.setContentLength(f.length()); if (f.getName().endsWith("gzip")) { om.setContentEncoding("gzip"); } uploadsInProgress.incrementAndGet(); try { PutObjectRequest req = new PutObjectRequest(parts[0],parts[1],f); req.setMetadata(om); UploadResult resp = svc.upload(req).waitForUploadResult(); LOG.trace("Uploaded " + uri + " with ETag " + resp.getETag()); } catch (InterruptedException ie) { LOG.error("Interrupted while uploading {} to {}.", f.getPath(), uri); throw Throwables.propagate(ie); } finally { uploadsInProgress.decrementAndGet(); } }
@BeforeEach void before() { doAnswer(invocationOnMock -> { Runnable method = invocationOnMock.getArgument(0); CompletableFuture.runAsync(method); return null; }).when(taskExecutor).execute(any(Runnable.class)); result = new UploadResult(); result.setKey("meep"); when(transferManager.upload(any(PutObjectRequest.class))).thenReturn(upload); }
@Test(expectedExceptions = RuntimeException.class) public void testPersistApplicationNullException() throws Exception { Upload upload = mock(Upload.class); when(upload.waitForUploadResult()).thenReturn(new UploadResult()); TransferManager transferManager = mock(TransferManager.class); when(transferManager.upload(anyString(), anyString(), any())).thenReturn(upload); S3Persister s3Persister = new S3Persister(transferManager, "foo"); try { s3Persister.saveMetrics("foo", null); } finally { assertFalse(new File("foo").exists()); } }
@Override public UploadResult call() throws Exception { CompleteMultipartUploadResult res; try { CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest( origReq.getBucketName(), origReq.getKey(), uploadId, collectPartETags()) .withRequesterPays(origReq.isRequesterPays()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; res = s3.completeMultipartUpload(req); } catch (Exception e) { publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT); throw e; } UploadResult uploadResult = new UploadResult(); uploadResult.setBucketName(origReq .getBucketName()); uploadResult.setKey(origReq.getKey()); uploadResult.setETag(res.getETag()); uploadResult.setVersionId(res.getVersionId()); monitor.uploadComplete(); return uploadResult; }
public UploadResult call() throws Exception { upload.setState(TransferState.InProgress); if ( isMultipartUpload() ) { publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT); return uploadInParts(); } else { return uploadInOneChunk(); } }
/** * Uploads the given request in a single chunk and returns the result. */ private UploadResult uploadInOneChunk() { PutObjectResult putObjectResult = s3.putObject(origReq); UploadResult uploadResult = new UploadResult(); uploadResult.setBucketName(origReq.getBucketName()); uploadResult.setKey(origReq.getKey()); uploadResult.setETag(putObjectResult.getETag()); uploadResult.setVersionId(putObjectResult.getVersionId()); return uploadResult; }
/** * Uploads all parts in the request in serial in this thread, then completes * the upload and returns the result. */ private UploadResult uploadPartsInSeries(UploadPartRequestFactory requestFactory) { final List<PartETag> partETags = new ArrayList<PartETag>(); while (requestFactory.hasMoreRequests()) { if (threadPool.isShutdown()) throw new CancellationException("TransferManager has been shutdown"); UploadPartRequest uploadPartRequest = requestFactory.getNextUploadPartRequest(); // Mark the stream in case we need to reset it InputStream inputStream = uploadPartRequest.getInputStream(); if (inputStream != null && inputStream.markSupported()) { if (uploadPartRequest.getPartSize() >= Integer.MAX_VALUE) { inputStream.mark(Integer.MAX_VALUE); } else { inputStream.mark((int)uploadPartRequest.getPartSize()); } } partETags.add(s3.uploadPart(uploadPartRequest).getPartETag()); } CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest( origReq.getBucketName(), origReq.getKey(), multipartUploadId, partETags) .withRequesterPays(origReq.isRequesterPays()) .withGeneralProgressListener(origReq.getGeneralProgressListener()) .withRequestMetricCollector(origReq.getRequestMetricCollector()) ; CompleteMultipartUploadResult res = s3.completeMultipartUpload(req); UploadResult uploadResult = new UploadResult(); uploadResult.setBucketName(res.getBucketName()); uploadResult.setKey(res.getKey()); uploadResult.setETag(res.getETag()); uploadResult.setVersionId(res.getVersionId()); return uploadResult; }
/** * Verifies multipart copy. * * @throws InterruptedException */ @Test public void multipartCopy() throws InterruptedException, IOException, NoSuchAlgorithmException { final int contentLen = 3 * _1MB; final ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setContentLength(contentLen); final String assumedSourceKey = UUID.randomUUID().toString(); final Bucket sourceBucket = s3Client.createBucket(UUID.randomUUID().toString()); final Bucket targetBucket = s3Client.createBucket(UUID.randomUUID().toString()); final TransferManager transferManager = createTransferManager(_2MB, _1MB, _2MB, _1MB); final InputStream sourceInputStream = randomInputStream(contentLen); final Upload upload = transferManager .upload(sourceBucket.getName(), assumedSourceKey, sourceInputStream, objectMetadata); final UploadResult uploadResult = upload.waitForUploadResult(); assertThat(uploadResult.getKey(), is(assumedSourceKey)); final String assumedDestinationKey = UUID.randomUUID().toString(); final Copy copy = transferManager.copy(sourceBucket.getName(), assumedSourceKey, targetBucket.getName(), assumedDestinationKey); final CopyResult copyResult = copy.waitForCopyResult(); assertThat(copyResult.getDestinationKey(), is(assumedDestinationKey)); final S3Object copiedObject = s3Client.getObject(targetBucket.getName(), assumedDestinationKey); assertThat("Hashes for source and target S3Object do not match.", HashUtil.getDigest(copiedObject.getObjectContent()) + "-1", is(uploadResult.getETag())); }
@Override public void upload(String bucketName, String name, InputStream input, ObjectMetadata meta) throws IOException { final Upload myUpload = tx.upload(bucketName, name, input, meta); try { UploadResult uploadResult = myUpload.waitForUploadResult(); LOG.info("Upload completed, bucket={}, key={}", uploadResult.getBucketName(), uploadResult.getKey()); } catch (InterruptedException e) { throw new IOException(e); } }
@Test @Ignore public void testAggregateEventsToS3() throws IOException, InterruptedException { // Set up mock expectations final Capture<File> fileCapture = new Capture<>(); final BooleanHolder fileAssertionsRan = new BooleanHolder(); EasyMock.expect(mockTransferManager.upload(EasyMock.eq("local.build.bucket"), EasyMock.eq(streamFileName), EasyMock.capture(fileCapture))).andReturn(mockUpload); EasyMock.expect(mockUpload.waitForUploadResult()).andAnswer(new IAnswer<UploadResult>() { @Override public UploadResult answer() throws Throwable { // Run temp file assertions before it's deleted File capturedFile = fileCapture.getValue(); Assert.assertNotNull(capturedFile); String capturedEventStream = replaceDates(fileToString(capturedFile)); Assert.assertNotNull(capturedEventStream); Assert.assertEquals("Line count", 3, capturedEventStream.split("\n").length); Assert.assertEquals("DATE INFO org.ihtsdo.telemetry.server.TestProcessor.doProcessing - Start of event stream\n" + "DATE INFO org.ihtsdo.telemetry.server.TestProcessor.doProcessing - Processing...\n" + "DATE INFO org.ihtsdo.telemetry.server.TestProcessor.doProcessing - End of event stream\n", capturedEventStream); fileAssertionsRan.b = true; return null; } }); mocksControl.replay(); // Perform test scenario TestProcessor.doProcessing(TelemetryProcessorTest.streamS3Destination); // Wait for the aggregator to finish. Thread.sleep(1000); // Assert mock expectations mocksControl.verify(); Assert.assertTrue(fileAssertionsRan.b); }
public static void dump(UploadResult ur, Log log) { int width = "VersionId".length(); log.info(" Bucket : " + ur.getBucketName()); log.info(" Key : " + ur.getKey()); log.info(" ETag : " + ur.getETag()); log.info(" VersionId : " + ur.getVersionId()); }
@Override protected UploadResult doInBackground(String... mediaPaths) { UploadResult result = null; if(null == mediaPaths[0]) { jobFailed(null, 7000000, "S3 media path is null"); return result; } File mediaFile = new File(mediaPaths[0]); if (!mediaFile.exists()) { jobFailed(null, 7000001, "S3 media path invalid"); return result; } try { final AWSCredentials credentials = new BasicAWSCredentials(mContext.getString(R.string.s3_key), mContext.getString(R.string.s3_secret)); Log.i(TAG, "upload file: " + mediaFile.getName()); AmazonS3Client s3Client = new AmazonS3Client(credentials, s3Config); TransferManager transferManager = new TransferManager(s3Client); Upload upload = transferManager.upload(bucket, pathPrefix + mediaFile.getName(), mediaFile); result = upload.waitForUploadResult(); } catch (Exception e) { Timber.e("upload error: " + e.getMessage()); jobFailed(null, 7000002, "S3 upload failed: " + e.getMessage()); } return result; }
@Override protected void onPostExecute(UploadResult result) { if(null != result) { Log.i(TAG, "upload result: " + result.getKey()); String url = "https://s3-us-west-1.amazonaws.com/" + bucket + "/" + result.getKey(); jobSucceeded(url); } else { jobFailed(null, 7000002, "S3 upload failed: PostExecute"); } }
public Future<UploadResult> getFuture() { return futureReference.get(); }
public UploadResult get() throws Exception { return mUpload.waitForUploadResult(); }
/** * Non-blocking call that will throw any Exceptions in the traditional * manner on access * * @param key * @param value * @return */ public Eval<UploadResult> putAsync(String key, Object value) { return Eval.later(() -> put(key, value)) .map(t -> t.orElse(null)) .map(FluentFunctions.ofChecked(up -> up.waitForUploadResult())); }
/** * Waits for this upload to complete and returns the result of this upload. * This is a blocking call. Be prepared to handle errors when calling this * method. Any errors that occurred during the asynchronous transfer will be * re-thrown through this method. * * @return The result of this transfer. * * @throws AmazonClientException * If any errors were encountered in the client while making the * request or handling the response. * @throws AmazonServiceException * If any errors occurred in Amazon S3 while processing the * request. * @throws InterruptedException * If this thread is interrupted while waiting for the upload to * complete. */ public UploadResult waitForUploadResult() throws AmazonClientException, AmazonServiceException, InterruptedException;
/** * Blocking call * * @param key * with which to store data * @param value * Data value * @return Try with completed result of operation */ public Try<UploadResult, Throwable> putSync(String key, Object value) { return put(key, value).map(FluentFunctions.ofChecked(i -> i.waitForUploadResult())); }