Java 类com.amazonaws.services.s3.transfer.Copy 实例源码

项目:circus-train    文件:S3S3Copier.java   
private Metrics gatherAllCopyResults() {
  AtomicLong bytesReplicated = new AtomicLong(0);
  registerRunningMetrics(bytesReplicated);
  for (Copy copyJob : copyJobs) {
    try {
      copyJob.waitForCompletion();
      long alreadyReplicated = bytesReplicated.addAndGet(copyJob.getProgress().getTotalBytesToTransfer());
      if (totalBytesToReplicate > 0) {
        LOG.info("Replicating...': {}% complete",
            String.format("%.0f", (alreadyReplicated / (double) totalBytesToReplicate) * 100.0));
      }
    } catch (InterruptedException e) {
      throw new CircusTrainException(e);
    }
  }
  ImmutableMap<String, Long> metrics = ImmutableMap.of(S3S3CopierMetrics.Metrics.TOTAL_BYTES_TO_REPLICATE.name(),
      totalBytesToReplicate);
  return new S3S3CopierMetrics(metrics, bytesReplicated.get());
}
项目:circus-train    文件:S3S3CopierTest.java   
@Test
public void copyCheckTransferManagerIsShutdown() throws Exception {
  client.putObject("source", "data", inputData);
  Path sourceBaseLocation = new Path("s3://source/");
  Path replicaLocation = new Path("s3://target/");
  List<Path> sourceSubLocations = new ArrayList<>();

  TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class);
  TransferManager mockedTransferManager = Mockito.mock(TransferManager.class);
  when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(s3S3CopierOptions)))
      .thenReturn(mockedTransferManager);
  Copy copy = Mockito.mock(Copy.class);
  when(mockedTransferManager.copy(any(CopyObjectRequest.class), any(AmazonS3.class),
      any(TransferStateChangeListener.class))).thenReturn(copy);
  TransferProgress transferProgress = new TransferProgress();
  when(copy.getProgress()).thenReturn(transferProgress);
  S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory,
      mockedTransferManagerFactory, listObjectsRequestFactory, registry, s3S3CopierOptions);
  s3s3Copier.copy();
  verify(mockedTransferManager).shutdownNow();
}
项目:circus-train    文件:S3S3CopierTest.java   
@Test
public void copyCheckTransferManagerIsShutdownWhenCopyExceptionsAreThrown() throws Exception {
  client.putObject("source", "data", inputData);
  Path sourceBaseLocation = new Path("s3://source/");
  Path replicaLocation = new Path("s3://target/");
  List<Path> sourceSubLocations = new ArrayList<>();

  TransferManagerFactory mockedTransferManagerFactory = Mockito.mock(TransferManagerFactory.class);
  TransferManager mockedTransferManager = Mockito.mock(TransferManager.class);
  when(mockedTransferManagerFactory.newInstance(any(AmazonS3.class), eq(s3S3CopierOptions)))
      .thenReturn(mockedTransferManager);
  Copy copy = Mockito.mock(Copy.class);
  when(copy.getProgress()).thenReturn(new TransferProgress());
  when(mockedTransferManager.copy(any(CopyObjectRequest.class), any(AmazonS3.class),
      any(TransferStateChangeListener.class))).thenReturn(copy);
  doThrow(new AmazonClientException("cause")).when(copy).waitForCompletion();
  S3S3Copier s3s3Copier = new S3S3Copier(sourceBaseLocation, sourceSubLocations, replicaLocation, s3ClientFactory,
      mockedTransferManagerFactory, listObjectsRequestFactory, registry, s3S3CopierOptions);
  try {
    s3s3Copier.copy();
    fail("exception should have been thrown");
  } catch (CircusTrainException e) {
    verify(mockedTransferManager).shutdownNow();
    assertThat(e.getCause().getMessage(), is("cause"));
  }
}
项目:aws-doc-sdk-examples    文件:XferMgrCopy.java   
public static void copyObjectSimple(String from_bucket, String from_key,
        String to_bucket, String to_key) {
    System.out.println("Copying s3 object: " + from_key);
    System.out.println("      from bucket: " + from_bucket);
    System.out.println("     to s3 object: " + to_bucket);
    System.out.println("        in bucket: " + to_key);

    TransferManager xfer_mgr = new TransferManager();
    try {
        Copy xfer = xfer_mgr.copy(from_bucket, from_key, to_bucket, to_key);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
}
项目:circus-train    文件:S3S3Copier.java   
private void submitCopyJobsFromListing(
    AmazonS3URI sourceS3Uri,
    final AmazonS3URI targetS3Uri,
    ListObjectsRequest request,
    ObjectListing listing) {
  LOG.debug("Found objects to copy {}, for request {}/{}", listing.getObjectSummaries(), request.getBucketName(),
      request.getPrefix());
  List<S3ObjectSummary> objectSummaries = listing.getObjectSummaries();
  for (final S3ObjectSummary s3ObjectSummary : objectSummaries) {
    String fileName = StringUtils.removeStart(s3ObjectSummary.getKey(), sourceS3Uri.getKey());
    final String targetKey = Strings.nullToEmpty(targetS3Uri.getKey()) + fileName;
    LOG.info("copying object from '{}/{}' to '{}/{}'", s3ObjectSummary.getBucketName(), s3ObjectSummary.getKey(),
        targetS3Uri.getBucket(), targetKey);

    CopyObjectRequest copyObjectRequest = new CopyObjectRequest(s3ObjectSummary.getBucketName(),
        s3ObjectSummary.getKey(), targetS3Uri.getBucket(), targetKey);

    TransferStateChangeListener stateChangeListener = new TransferStateChangeListener() {

      @Override
      public void transferStateChanged(Transfer transfer, TransferState state) {
        if (state == TransferState.Completed) {
          // NOTE: running progress doesn't seem to be reported correctly.
          // transfer.getProgress().getBytesTransferred() is always 0. Unsure what is the cause of this at this moment
          // so just printing total bytes when completed.
          LOG.debug("copied object from '{}/{}' to '{}/{}': {} bytes transferred", s3ObjectSummary.getBucketName(),
              s3ObjectSummary.getKey(), targetS3Uri.getBucket(), targetKey,
              transfer.getProgress().getTotalBytesToTransfer());
        }
      }
    };
    Copy copy = transferManager.copy(copyObjectRequest, srcClient, stateChangeListener);
    totalBytesToReplicate += copy.getProgress().getTotalBytesToTransfer();
    copyJobs.add(copy);
  }
}
项目:hadoop    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:S3Mock    文件:AmazonClientUploadIT.java   
/**
 * Verifies multipart copy.
 *
 * @throws InterruptedException
 */
@Test
public void multipartCopy() throws InterruptedException, IOException, NoSuchAlgorithmException {
  final int contentLen = 3 * _1MB;

  final ObjectMetadata objectMetadata = new ObjectMetadata();
  objectMetadata.setContentLength(contentLen);

  final String assumedSourceKey = UUID.randomUUID().toString();

  final Bucket sourceBucket = s3Client.createBucket(UUID.randomUUID().toString());
  final Bucket targetBucket = s3Client.createBucket(UUID.randomUUID().toString());

  final TransferManager transferManager = createTransferManager(_2MB, _1MB, _2MB, _1MB);

  final InputStream sourceInputStream = randomInputStream(contentLen);
  final Upload upload = transferManager
      .upload(sourceBucket.getName(), assumedSourceKey,
          sourceInputStream, objectMetadata);

  final UploadResult uploadResult = upload.waitForUploadResult();

  assertThat(uploadResult.getKey(), is(assumedSourceKey));

  final String assumedDestinationKey = UUID.randomUUID().toString();
  final Copy copy =
      transferManager.copy(sourceBucket.getName(), assumedSourceKey, targetBucket.getName(),
          assumedDestinationKey);
  final CopyResult copyResult = copy.waitForCopyResult();
  assertThat(copyResult.getDestinationKey(), is(assumedDestinationKey));

  final S3Object copiedObject = s3Client.getObject(targetBucket.getName(), assumedDestinationKey);

  assertThat("Hashes for source and target S3Object do not match.",
      HashUtil.getDigest(copiedObject.getObjectContent()) + "-1",
      is(uploadResult.getETag()));
}
项目:aliyun-oss-hadoop-fs    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventType()) {
        case TRANSFER_PART_COMPLETED_EVENT:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:big-c    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:herd    文件:S3OperationsImpl.java   
@Override
public Copy copyFile(CopyObjectRequest copyObjectRequest, TransferManager transferManager)
{
    return transferManager.copy(copyObjectRequest);
}
项目:herd    文件:S3DaoTest.java   
@Test
public void testPerformTransferAssertHandleFailedWithAmazonClientException() throws Exception
{
    S3Operations originalS3Operations = (S3Operations) ReflectionTestUtils.getField(s3Dao, "s3Operations");
    S3Operations mockS3Operations = mock(S3Operations.class);
    ReflectionTestUtils.setField(s3Dao, "s3Operations", mockS3Operations);

    // Shorten the sleep interval for faster tests
    long originalSleepIntervalsMillis = (long) ReflectionTestUtils.getField(s3Dao, "sleepIntervalsMillis");
    ReflectionTestUtils.setField(s3Dao, "sleepIntervalsMillis", 1l);

    try
    {
        S3FileCopyRequestParamsDto s3FileCopyRequestParamsDto = new S3FileCopyRequestParamsDto();
        s3FileCopyRequestParamsDto.setSourceBucketName("sourceBucketName");
        s3FileCopyRequestParamsDto.setSourceObjectKey("sourceObjectKey");
        s3FileCopyRequestParamsDto.setTargetBucketName("targetBucketName");
        s3FileCopyRequestParamsDto.setTargetObjectKey("targetObjectKey");
        s3FileCopyRequestParamsDto.setKmsKeyId("kmsKeyId");

        when(mockS3Operations.copyFile(any(), any())).then(new Answer<Copy>()
        {
            @Override
            public Copy answer(InvocationOnMock invocation) throws Throwable
            {
                Copy mockTransfer = mock(Copy.class);

                when(mockTransfer.getProgress()).thenReturn(new TransferProgress());
                when(mockTransfer.getState()).thenReturn(TransferState.Failed);
                when(mockTransfer.isDone()).thenReturn(true);
                when(mockTransfer.waitForException()).thenReturn(new AmazonClientException("message"));
                return mockTransfer;
            }
        });

        try
        {
            s3Dao.copyFile(s3FileCopyRequestParamsDto);
            fail();
        }
        catch (Exception e)
        {
            assertEquals(AmazonClientException.class, e.getClass());
            assertEquals("message", e.getMessage());
        }
    }
    finally
    {
        ReflectionTestUtils.setField(s3Dao, "s3Operations", originalS3Operations);
        ReflectionTestUtils.setField(s3Dao, "sleepIntervalsMillis", originalSleepIntervalsMillis);
    }
}
项目:herd    文件:S3Operations.java   
/**
 * Schedules a new transfer to copy data from one Amazon S3 location to another Amazon S3 location.
 *
 * @param copyObjectRequest the request containing all the parameters for the copy
 * @param transferManager the transfer manager implementation to use
 */
public Copy copyFile(CopyObjectRequest copyObjectRequest, TransferManager transferManager);