@Override public void move(String sourceBlobName, String targetBlobName) throws IOException { try { CopyObjectRequest request = new CopyObjectRequest(blobStore.bucket(), buildKey(sourceBlobName), blobStore.bucket(), buildKey(targetBlobName)); if (blobStore.serverSideEncryption()) { ObjectMetadata objectMetadata = new ObjectMetadata(); objectMetadata.setSSEAlgorithm(ObjectMetadata.AES_256_SERVER_SIDE_ENCRYPTION); request.setNewObjectMetadata(objectMetadata); } SocketAccess.doPrivilegedVoid(() -> { blobStore.client().copyObject(request); blobStore.client().deleteObject(blobStore.bucket(), buildKey(sourceBlobName)); }); } catch (AmazonS3Exception e) { throw new IOException(e); } }
@Override public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest) throws AmazonClientException, AmazonServiceException { String sourceBlobName = copyObjectRequest.getSourceKey(); String targetBlobName = copyObjectRequest.getDestinationKey(); if (!blobs.containsKey(sourceBlobName)) { throw new AmazonS3Exception("Source blob [" + sourceBlobName + "] does not exist."); } if (blobs.containsKey(targetBlobName)) { throw new AmazonS3Exception("Target blob [" + targetBlobName + "] already exists."); } blobs.put(targetBlobName, blobs.get(sourceBlobName)); return new CopyObjectResult(); // nothing is done with it }
@Test public void copyCheckTransferManagerIsShutdown() throws Exception { client.putObject("source", "data", inputData); Path sourceBaseLocation = new Path("s3://source/"); Path replicaLocation = new Path("s3://target/"); List<Path> sourceSubLocations = new ArrayList<>(); TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class); TransferManager mockedTransferManager = Mockito.mock(TransferManager.class); when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(s3S3CopierOptions))) .thenReturn(mockedTransferManager); Copy copy = Mockito.mock(Copy.class); when(mockedTransferManager.copy(any(CopyObjectRequest.class), any(AmazonS3.class), any(TransferStateChangeListener.class))).thenReturn(copy); TransferProgress transferProgress = new TransferProgress(); when(copy.getProgress()).thenReturn(transferProgress); S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory, mockedTransferManagerFactory, listObjectsRequestFactory, registry, s3S3CopierOptions); s3s3Copier.copy(); verify(mockedTransferManager).shutdownNow(); }
@Test public void copyCheckTransferManagerIsShutdownWhenSubmittingJobExceptionsAreThrown() throws Exception { client.putObject("source", "data", inputData); Path sourceBaseLocation = new Path("s3://source/"); Path replicaLocation = new Path("s3://target/"); List<Path> sourceSubLocations = new ArrayList<>(); TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class); TransferManager mockedTransferManager = Mockito.mock(TransferManager.class); when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(s3S3CopierOptions))) .thenReturn(mockedTransferManager); when(mockedTransferManager.copy(any(CopyObjectRequest.class), any(AmazonS3.class), any(TransferStateChangeListener.class))).thenThrow(new AmazonServiceException("MyCause")); S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory, mockedTransferManagerFactory, listObjectsRequestFactory, registry, s3S3CopierOptions); try { s3s3Copier.copy(); fail("exception should have been thrown"); } catch (CircusTrainException e) { verify(mockedTransferManager).shutdownNow(); assertThat(e.getCause().getMessage(), startsWith("MyCause")); } }
@Test public void copyCheckTransferManagerIsShutdownWhenCopyExceptionsAreThrown() throws Exception { client.putObject("source", "data", inputData); Path sourceBaseLocation = new Path("s3://source/"); Path replicaLocation = new Path("s3://target/"); List<Path> sourceSubLocations = new ArrayList<>(); TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class); TransferManager mockedTransferManager = Mockito.mock(TransferManager.class); when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(s3S3CopierOptions))) .thenReturn(mockedTransferManager); Copy copy = Mockito.mock(Copy.class); when(copy.getProgress()).thenReturn(new TransferProgress()); when(mockedTransferManager.copy(any(CopyObjectRequest.class), any(AmazonS3.class), any(TransferStateChangeListener.class))).thenReturn(copy); doThrow(new AmazonClientException("cause")).when(copy).waitForCompletion(); S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory, mockedTransferManagerFactory, listObjectsRequestFactory, registry, s3S3CopierOptions); try { s3s3Copier.copy(); fail("exception should have been thrown"); } catch (CircusTrainException e) { verify(mockedTransferManager).shutdownNow(); assertThat(e.getCause().getMessage(), is("cause")); } }
/** * Constructs a new watcher for copy operation, and then immediately submits * it to the thread pool. * * @param manager * The {@link TransferManager} that owns this copy request. * @param threadPool * The {@link ExecutorService} to which we should submit new * tasks. * @param multipartCopyCallable * The callable responsible for processing the copy * asynchronously * @param copyObjectRequest * The original CopyObject request */ public static CopyMonitor create( TransferManager manager, CopyImpl transfer, ExecutorService threadPool, CopyCallable multipartCopyCallable, CopyObjectRequest copyObjectRequest, ProgressListenerChain progressListenerChain) { CopyMonitor copyMonitor = new CopyMonitor(manager, transfer, threadPool, multipartCopyCallable, copyObjectRequest, progressListenerChain); Future<CopyResult> thisFuture = threadPool.submit(copyMonitor); // Use an atomic compareAndSet to prevent a possible race between the // setting of the CopyMonitor's futureReference, and setting the // CompleteMultipartCopy's futureReference within the call() method. // We only want to set the futureReference to CopyMonitor's futureReference if the // current value is null, otherwise the futureReference that's set is // CompleteMultipartCopy's which is ultimately what we want. copyMonitor.futureReference.compareAndSet(null, thisFuture); return copyMonitor; }
/** * Puts an Object; Copies that object to a new bucket; Downloads the object from the new * bucket; compares checksums * of original and copied object * * @throws Exception if an Exception occurs */ @Test public void shouldCopyObject() throws Exception { final File uploadFile = new File(UPLOAD_FILE_NAME); final String sourceKey = UPLOAD_FILE_NAME; final String destinationBucketName = "destinationBucket"; final String destinationKey = "copyOf/" + sourceKey; final PutObjectResult putObjectResult = s3Client.putObject(new PutObjectRequest(BUCKET_NAME, sourceKey, uploadFile)); final CopyObjectRequest copyObjectRequest = new CopyObjectRequest(BUCKET_NAME, sourceKey, destinationBucketName, destinationKey); s3Client.copyObject(copyObjectRequest); final com.amazonaws.services.s3.model.S3Object copiedObject = s3Client.getObject(destinationBucketName, destinationKey); final String copiedHash = HashUtil.getDigest(copiedObject.getObjectContent()); copiedObject.close(); assertThat("Sourcefile and copied File should have same Hashes", copiedHash, is(equalTo(putObjectResult.getETag()))); }
/** * Puts an Object; Copies that object to a new bucket; Downloads the object from the new * bucket; compares checksums * of original and copied object * * @throws Exception if an Exception occurs */ @Test public void shouldCopyObjectEncrypted() throws Exception { final File uploadFile = new File(UPLOAD_FILE_NAME); final String sourceKey = UPLOAD_FILE_NAME; final String destinationBucketName = "destinationBucket"; final String destinationKey = "copyOf/" + sourceKey; s3Client.putObject(new PutObjectRequest(BUCKET_NAME, sourceKey, uploadFile)); final CopyObjectRequest copyObjectRequest = new CopyObjectRequest(BUCKET_NAME, sourceKey, destinationBucketName, destinationKey); copyObjectRequest.setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(TEST_ENC_KEYREF)); final CopyObjectResult copyObjectResult = s3Client.copyObject(copyObjectRequest); final ObjectMetadata metadata = s3Client.getObjectMetadata(destinationBucketName, destinationKey); final InputStream uploadFileIS = new FileInputStream(uploadFile); final String uploadHash = HashUtil.getDigest(TEST_ENC_KEYREF, uploadFileIS); assertThat("ETag should match", copyObjectResult.getETag(), is(uploadHash)); assertThat("Files should have the same length", metadata.getContentLength(), is(uploadFile.length())); }
/** * Tests that an object wont be copied with wrong encryption Key * * @throws Exception if an Exception occurs */ @Test public void shouldNotObjectCopyWithWrongEncryptionKey() { final File uploadFile = new File(UPLOAD_FILE_NAME); final String sourceKey = UPLOAD_FILE_NAME; final String destinationBucketName = "destinationBucket"; final String destinationKey = "copyOf" + sourceKey; s3Client.putObject(new PutObjectRequest(BUCKET_NAME, sourceKey, uploadFile)); final CopyObjectRequest copyObjectRequest = new CopyObjectRequest(BUCKET_NAME, sourceKey, destinationBucketName, destinationKey); copyObjectRequest .setSSEAwsKeyManagementParams(new SSEAwsKeyManagementParams(TEST_WRONG_KEYREF)); thrown.expect(AmazonS3Exception.class); thrown.expectMessage(containsString("Status Code: 400; Error Code: KMS.NotFoundException")); s3Client.copyObject(copyObjectRequest); }
@Override protected void storeMimeType(BinaryValue binaryValue, String mimeType) throws BinaryStoreException { try { String key = binaryValue.getKey().toString(); ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key); metadata.setContentType(mimeType); // Update the object in place CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, key, bucketName, key); copyRequest.setNewObjectMetadata(metadata); s3Client.copyObject(copyRequest); } catch (AmazonClientException e) { throw new BinaryStoreException(e); } }
private void setS3ObjectUserProperty(BinaryKey binaryKey, String metadataKey, String metadataValue) throws BinaryStoreException { try { String key = binaryKey.toString(); ObjectMetadata metadata = s3Client.getObjectMetadata(bucketName, key); Map<String, String> userMetadata = metadata.getUserMetadata(); if(null != metadataValue && metadataValue.equals(userMetadata.get(metadataKey))) { return; // The key/value pair already exists in user metadata, skip update } userMetadata.put(metadataKey, metadataValue); metadata.setUserMetadata(userMetadata); // Update the object in place CopyObjectRequest copyRequest = new CopyObjectRequest(bucketName, key, bucketName, key); copyRequest.setNewObjectMetadata(metadata); s3Client.copyObject(copyRequest); } catch (AmazonClientException e) { throw new BinaryStoreException(e); } }
@Test public void testStoreMimeType() throws BinaryStoreException { expect(s3Client.getObjectMetadata(BUCKET, TEST_KEY)) .andReturn(new ObjectMetadata()); Capture<CopyObjectRequest> copyRequestCapture = Capture.newInstance(); expect(s3Client.copyObject(capture(copyRequestCapture))).andReturn(null); replayAll(); BinaryValue binaryValue = createBinaryValue(TEST_KEY, TEST_CONTENT); s3BinaryStore.storeMimeType(binaryValue, TEST_MIME); CopyObjectRequest copyRequest = copyRequestCapture.getValue(); assertEquals(BUCKET, copyRequest.getSourceBucketName()); assertEquals(BUCKET, copyRequest.getDestinationBucketName()); assertEquals(TEST_KEY, copyRequest.getSourceKey()); assertEquals(TEST_KEY, copyRequest.getDestinationKey()); assertEquals(TEST_MIME, copyRequest.getNewObjectMetadata().getContentType()); }
@Test public void testStoreExtractedText() throws BinaryStoreException { String extractedText = "text-that-has-been-extracted"; expect(s3Client.getObjectMetadata(BUCKET, TEST_KEY)) .andReturn(new ObjectMetadata()); Capture<CopyObjectRequest> copyRequestCapture = Capture.newInstance(); expect(s3Client.copyObject(capture(copyRequestCapture))).andReturn(null); replayAll(); BinaryValue binaryValue = createBinaryValue(TEST_KEY, TEST_CONTENT); s3BinaryStore.storeExtractedText(binaryValue, extractedText); CopyObjectRequest copyRequest = copyRequestCapture.getValue(); assertEquals(BUCKET, copyRequest.getSourceBucketName()); assertEquals(BUCKET, copyRequest.getDestinationBucketName()); assertEquals(TEST_KEY, copyRequest.getSourceKey()); assertEquals(TEST_KEY, copyRequest.getDestinationKey()); assertEquals(extractedText, copyRequest.getNewObjectMetadata() .getUserMetadata() .get(s3BinaryStore.EXTRACTED_TEXT_KEY)); }
@Test public void testStoreValueExisting() throws BinaryStoreException, IOException { String valueToStore = "value-to-store"; expect(s3Client.doesObjectExist(eq(BUCKET), isA(String.class))).andReturn(true); expect(s3Client.getObjectMetadata(eq(BUCKET), isA(String.class))) .andReturn(new ObjectMetadata()); ObjectMetadata objMeta = new ObjectMetadata(); Map<String, String> userMeta = new HashMap<>(); userMeta.put(s3BinaryStore.UNUSED_KEY, String.valueOf(true)); objMeta.setUserMetadata(userMeta); Capture<CopyObjectRequest> copyRequestCapture = Capture.newInstance(); expect(s3Client.copyObject(capture(copyRequestCapture))).andReturn(null); replayAll(); s3BinaryStore.storeValue(new StringInputStream(valueToStore), true); ObjectMetadata newObjMeta = copyRequestCapture.getValue().getNewObjectMetadata(); assertEquals(String.valueOf(true), newObjMeta.getUserMetadata().get(s3BinaryStore.UNUSED_KEY)); }
@Test public void testMarkAsUsed() throws BinaryStoreException { ObjectMetadata objMeta = new ObjectMetadata(); Map<String, String> userMeta = new HashMap<>(); // Existing value of unused property set to true (so file is considered not used) userMeta.put(s3BinaryStore.UNUSED_KEY, String.valueOf(true)); objMeta.setUserMetadata(userMeta); expect(s3Client.getObjectMetadata(eq(BUCKET), isA(String.class))) .andReturn(objMeta); Capture<CopyObjectRequest> copyRequestCapture = Capture.newInstance(); expect(s3Client.copyObject(capture(copyRequestCapture))).andReturn(null); replayAll(); s3BinaryStore.markAsUsed(Collections.singleton(new BinaryKey(TEST_KEY))); ObjectMetadata newObjMeta = copyRequestCapture.getValue().getNewObjectMetadata(); assertEquals(String.valueOf(false), newObjMeta.getUserMetadata().get(s3BinaryStore.UNUSED_KEY)); }
@Test public void testMarkAsUnused() throws BinaryStoreException { ObjectMetadata objMeta = new ObjectMetadata(); Map<String, String> userMeta = new HashMap<>(); // Existing value of unused property set to false (so file is considered used) userMeta.put(s3BinaryStore.UNUSED_KEY, String.valueOf(false)); objMeta.setUserMetadata(userMeta); expect(s3Client.getObjectMetadata(eq(BUCKET), isA(String.class))) .andReturn(objMeta); Capture<CopyObjectRequest> copyRequestCapture = Capture.newInstance(); expect(s3Client.copyObject(capture(copyRequestCapture))).andReturn(null); replayAll(); s3BinaryStore.markAsUnused(Collections.singleton(new BinaryKey(TEST_KEY))); ObjectMetadata newObjMeta = copyRequestCapture.getValue().getNewObjectMetadata(); assertEquals(String.valueOf(true), newObjMeta.getUserMetadata().get(s3BinaryStore.UNUSED_KEY)); }
@Override public void add(final String name) throws IOException { final Collection<String> friends = this.list(); friends.add(name); final Ocket ocket = this.bucket.ocket(this.key()); final ObjectMetadata meta = ocket.meta(); meta.setUserMetadata( new ArrayMap<>(meta.getUserMetadata()).with( AwsFriends.HEADER, Joiner.on(';').join(friends) ) ); ocket.bucket().region().aws().copyObject( new CopyObjectRequest( ocket.bucket().name(), ocket.key(), ocket.bucket().name(), ocket.key() ).withNewObjectMetadata(meta) ); final ObjectMetadata fmeta = new ObjectMetadata(); fmeta.addUserMetadata(AwsDoc.HEADER, "true"); this.bucket.ocket(String.format("%s/%s", name, this.label)).write( IOUtils.toInputStream(""), fmeta ); }
@Override public void eject(final String name) throws IOException { final Collection<String> friends = this.list(); friends.remove(name); final Ocket ocket = this.bucket.ocket(this.key()); final ObjectMetadata meta = ocket.meta(); meta.setUserMetadata( new ArrayMap<>(meta.getUserMetadata()).with( AwsFriends.HEADER, Joiner.on(';').join(friends) ) ); ocket.bucket().region().aws().copyObject( new CopyObjectRequest( ocket.bucket().name(), ocket.key(), ocket.bucket().name(), ocket.key() ).withNewObjectMetadata(meta) ); this.bucket.remove(String.format("%s/%s", name, this.label)); }
@Override public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest) throws AmazonClientException, AmazonServiceException { CopyObjectResult copyObjectResult = new CopyObjectResult(); copyObjectResult.setETag("3a5c8b1ad448bca04584ecb55b836264"); copyObjectResult.setVersionId("11192828ahsh2723"); return copyObjectResult; }
private void submitCopyJobsFromListing( AmazonS3URI sourceS3Uri, final AmazonS3URI targetS3Uri, ListObjectsRequest request, ObjectListing listing) { LOG.debug("Found objects to copy {}, for request {}/{}", listing.getObjectSummaries(), request.getBucketName(), request.getPrefix()); List<S3ObjectSummary> objectSummaries = listing.getObjectSummaries(); for (final S3ObjectSummary s3ObjectSummary : objectSummaries) { String fileName = StringUtils.removeStart(s3ObjectSummary.getKey(), sourceS3Uri.getKey()); final String targetKey = Strings.nullToEmpty(targetS3Uri.getKey()) + fileName; LOG.info("copying object from '{}/{}' to '{}/{}'", s3ObjectSummary.getBucketName(), s3ObjectSummary.getKey(), targetS3Uri.getBucket(), targetKey); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(s3ObjectSummary.getBucketName(), s3ObjectSummary.getKey(), targetS3Uri.getBucket(), targetKey); TransferStateChangeListener stateChangeListener = new TransferStateChangeListener() { @Override public void transferStateChanged(Transfer transfer, TransferState state) { if (state == TransferState.Completed) { // NOTE: running progress doesn't seem to be reported correctly. // transfer.getProgress().getBytesTransferred() is always 0. Unsure what is the cause of this at this moment // so just printing total bytes when completed. LOG.debug("copied object from '{}/{}' to '{}/{}': {} bytes transferred", s3ObjectSummary.getBucketName(), s3ObjectSummary.getKey(), targetS3Uri.getBucket(), targetKey, transfer.getProgress().getTotalBytesToTransfer()); } } }; Copy copy = transferManager.copy(copyObjectRequest, srcClient, stateChangeListener); totalBytesToReplicate += copy.getProgress().getTotalBytesToTransfer(); copyJobs.add(copy); } }
@Override public Parameters handleRequest(Parameters parameters, Context context) { context.getLogger().log("Input Function [" + context.getFunctionName() + "], Parameters [" + parameters + "]"); // The archive location of the snapshot will be decided by the alert // flag String newFilename; if (parameters.getSendAlert()) { newFilename = parameters.getS3Key().replace("upload/", "archive/alerts/"); } else { newFilename = parameters.getS3Key().replace("upload/", "archive/falsepositives/"); } // Ensure that the first two hyphens are used to create sub-directories // in the file path newFilename = newFilename.replaceFirst("-", "/"); newFilename = newFilename.replaceFirst("-", "/"); // Using the S3 client, first copy the file to the archive, and then // delete the original AmazonS3 client = AmazonS3ClientBuilder.defaultClient(); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(parameters.getS3Bucket(), parameters.getS3Key(), parameters.getS3Bucket(), newFilename); client.copyObject(copyObjectRequest); DeleteObjectRequest deleteObjectRequest = new DeleteObjectRequest(parameters.getS3Bucket(), parameters.getS3Key()); client.deleteObject(deleteObjectRequest); // Place the new location in the parameters parameters.setS3ArchivedKey(newFilename); context.getLogger().log("Output Function [" + context.getFunctionName() + "], Parameters [" + parameters + "]"); return parameters; }
private void copyFile(String srcKey, String dstKey) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("copyFile " + srcKey + " -> " + dstKey); } ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setServerSideEncryption(serverSideEncryptionAlgorithm); } CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventCode()) { case ProgressEvent.PART_COMPLETED_EVENT_CODE: statistics.incrementWriteOps(1); break; default: break; } } }; Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try { copy.waitForCopyResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } }
@Override public void changeObjectStorageClass(String bucketName, String key, StorageClass newStorageClass) throws SdkClientException, AmazonServiceException { rejectNull(bucketName, "The bucketName parameter must be specified when changing an object's storage class"); rejectNull(key, "The key parameter must be specified when changing an object's storage class"); rejectNull(newStorageClass, "The newStorageClass parameter must be specified when changing an object's storage class"); copyObject(new CopyObjectRequest(bucketName, key, bucketName, key) .withStorageClass(newStorageClass.toString())); }
@Override public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) throws SdkClientException, AmazonServiceException { rejectNull(bucketName, "The bucketName parameter must be specified when changing an object's storage class"); rejectNull(key, "The key parameter must be specified when changing an object's storage class"); rejectNull(newRedirectLocation, "The newStorageClass parameter must be specified when changing an object's storage class"); copyObject(new CopyObjectRequest(bucketName, key, bucketName, key) .withRedirectLocation(newRedirectLocation)); }
@Override public CopyObjectResult copyObject(String sourceBucketName, String sourceKey, String destinationBucketName, String destinationKey) throws SdkClientException, AmazonServiceException { return copyObject(new CopyObjectRequest(sourceBucketName, sourceKey, destinationBucketName, destinationKey)); }
public CopyCallable(TransferManager transferManager, ExecutorService threadPool, CopyImpl copy, CopyObjectRequest copyObjectRequest, ObjectMetadata metadata, ProgressListenerChain progressListenerChain) { this.s3 = transferManager.getAmazonS3Client(); this.configuration = transferManager.getConfiguration(); this.threadPool = threadPool; this.copyObjectRequest = copyObjectRequest; this.metadata = metadata; this.listenerChain = progressListenerChain; this.copy = copy; }
private CopyMonitor(TransferManager manager, CopyImpl transfer, ExecutorService threadPool, CopyCallable multipartCopyCallable, CopyObjectRequest copyObjectRequest, ProgressListenerChain progressListenerChain) { this.s3 = manager.getAmazonS3Client(); this.multipartCopyCallable = multipartCopyCallable; this.origReq = copyObjectRequest; this.listener = progressListenerChain; this.transfer = transfer; this.threadPool = threadPool; }
public CopyPartRequestFactory(CopyObjectRequest origReq, String uploadId, long optimalPartSize, long contentLength) { this.origReq = origReq; this.uploadId = uploadId; this.optimalPartSize = optimalPartSize; this.remainingBytes = contentLength; }
public CompleteMultipartCopy(String uploadId, AmazonS3 s3, CopyObjectRequest copyObjectRequest, List<Future<PartETag>> futures, ProgressListenerChain progressListenerChain, CopyMonitor monitor) { this.uploadId = uploadId; this.s3 = s3; this.origReq = copyObjectRequest; this.futures = futures; this.listener = progressListenerChain; this.monitor = monitor; }
private void copyFile(String srcKey, String dstKey) throws IOException { if (LOG.isDebugEnabled()) { LOG.debug("copyFile " + srcKey + " -> " + dstKey); } ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey); final ObjectMetadata dstom = srcom.clone(); if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) { dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm); } CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey); copyObjectRequest.setCannedAccessControlList(cannedACL); copyObjectRequest.setNewObjectMetadata(dstom); ProgressListener progressListener = new ProgressListener() { public void progressChanged(ProgressEvent progressEvent) { switch (progressEvent.getEventType()) { case TRANSFER_PART_COMPLETED_EVENT: statistics.incrementWriteOps(1); break; default: break; } } }; Copy copy = transfers.copy(copyObjectRequest); copy.addProgressListener(progressListener); try { copy.waitForCopyResult(); statistics.incrementWriteOps(1); } catch (InterruptedException e) { throw new IOException("Got interrupted, cancelling"); } }
@Override public CompletableFuture<Void> moveObject(String fromBucketName, String fromKey, String toBucketName, String toKey) { CopyObjectRequest copyObjectResult = new CopyObjectRequest(fromBucketName, fromKey, toBucketName, toKey); DeleteObjectRequest deleteObjectRequest = new DeleteObjectRequest(fromBucketName, fromKey); return CompletableFuture.runAsync( () -> { s3Client.copyObject(copyObjectResult); s3Client.deleteObject(deleteObjectRequest); }, executorService ); }
/** * Adds gzip content-encoding metadata to S3 objects. * * <p>Adds gzip content-encoding metadata to S3 objects. All objects * beneath the specified prefix (i.e. folder) will have the * metadata added. When the bucket serves objects it will then * add a suitable Content-Encoding header. * * @param bucketName the bucket to apply the metadata to. * @param prefix prefix within the bucket, beneath which to apply the metadata. * @param logger a CloudwatchLogs logger. */ public static void addGzipContentEncodingMetadata(String bucketName, Optional<String> prefix, LambdaLogger logger) { // To add new metadata, we must copy each object to itself. ListObjectsRequest listObjectsRequest; if (prefix.isPresent()) { logger.log("Setting gzip content encoding metadata on bucket: " + bucketName + " and prefix: " + prefix.get()); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix( prefix.get()); } else { logger.log("Setting gzip content encoding metadata on bucket: " + bucketName); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); } ObjectListing objectListing; AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client(); do { objectListing = client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { String key = objectSummary.getKey(); logger.log("Setting metadata for S3 object: " + key); // We must specify ALL metadata - not just the one we're adding. ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key); objectMetadata.setContentEncoding("gzip"); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key).withNewObjectMetadata(objectMetadata).withCannedAccessControlList( CannedAccessControlList.PublicRead); client.copyObject(copyObjectRequest); logger.log("Set metadata for S3 object: " + key); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); logger.log("Set gzip content encoding metadata on bucket"); }
/** * Adds cache-control header to S3 objects. * * <p>Adds cache-control header to S3 objects. All objects * beneath the specified prefix (i.e. folder), and with the * specified extension will have the header added. When the * bucket serves objects it will then add a suitable * Cache-Control header. * * @param headerValue value of the cache-control header * @param bucketName the bucket to apply the header to. * @param prefix prefix within the bucket, beneath which to apply the header. * @param extension file extension to apply header to * @param logger a CloudwatchLogs logger. */ public static void addCacheControlHeader(String headerValue, String bucketName, Optional<String> prefix, String extension, LambdaLogger logger) { // To add new metadata, we must copy each object to itself. ListObjectsRequest listObjectsRequest; if (prefix.isPresent()) { logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName + " and prefix: " + prefix.get() + " and extension: " + extension); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName).withPrefix( prefix.get()); } else { logger.log("Setting cache-control metadata: " + headerValue + ", on bucket: " + bucketName + " and extension: " + extension); listObjectsRequest = new ListObjectsRequest().withBucketName(bucketName); } ObjectListing objectListing; AmazonS3 client = TransferManagerBuilder.defaultTransferManager().getAmazonS3Client(); do { objectListing = client.listObjects(listObjectsRequest); for (S3ObjectSummary objectSummary : objectListing.getObjectSummaries()) { String key = objectSummary.getKey(); if (!key.endsWith(extension)) { continue; } logger.log("Setting metadata for S3 object: " + key); // We must specify ALL metadata - not just the one we're adding. ObjectMetadata objectMetadata = client.getObjectMetadata(bucketName, key); objectMetadata.setCacheControl(headerValue); CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucketName, key, bucketName, key).withNewObjectMetadata(objectMetadata).withCannedAccessControlList( CannedAccessControlList.PublicRead); client.copyObject(copyObjectRequest); logger.log("Set metadata for S3 object: " + key); } listObjectsRequest.setMarker(objectListing.getNextMarker()); } while (objectListing.isTruncated()); logger.log("Set cache-control metadata on bucket"); }
@Test public void testRefreshPageThrowsWhenS3Throws() throws Exception { // ARRANGE thrown.expect(Exception.class); thrown.expectMessage("Exception caught while copying booking page to S3"); initialisePageManager(); // Make S3 throw: // Transfer interface is implemented by Uploads, Downloads, and Copies Transfer mockTransfer = mockery.mock(Transfer.class); mockery.checking(new Expectations() { { allowing(mockTransfer).isDone(); will(returnValue(true)); allowing(mockTransfer).waitForCompletion(); } }); mockTransferManager = mockery.mock(IS3TransferManager.class); mockery.checking(new Expectations() { { oneOf(mockTransferManager).upload(with(any(PutObjectRequest.class))); will(throwException(new AmazonServiceException("Grrr..."))); // Should throw before copy is called never(mockTransferManager).copy(with(any(CopyObjectRequest.class))); } }); pageManager.setS3TransferManager(mockTransferManager); // ACT - this should throw pageManager.refreshPage(fakeCurrentDateString, validDates, apiGatewayBaseUrl, false, bookings, revvingSuffix); }