Java 类com.amazonaws.event.ProgressListener 实例源码

项目:ibm-cos-sdk-java    文件:AmazonHttpClient.java   
/**
 * Publishes the "request content length" event, and returns an input stream, which will be
 * made mark-and-resettable if possible, for progress tracking purposes.
 *
 * @return an input stream, which will be made mark-and-resettable if possible, for progress
 * tracking purposes; or null if the request doesn't have an input stream
 */
private InputStream beforeRequest() {
    ProgressListener listener = requestConfig.getProgressListener();
    reportContentLength(listener);
    if (request.getContent() == null) {
        return null;
    }
    final InputStream content = monitorStreamProgress(listener,
                                                      buffer(
                                                              makeResettable(
                                                                      request.getContent())));
    if (AmazonHttpClient.unreliableTestConfig == null) {
        return content;
    }
    return wrapWithUnreliableStream(content);
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequestTest.java   
private static void verifyBaseAfterCopy(final ProgressListener listener,
        final AWSCredentials credentials,
        final RequestMetricCollector collector,
        final AmazonWebServiceRequest from, final AmazonWebServiceRequest to) {
    RequestClientOptions toOptions;
    Map<String, String> headers = to.getCustomRequestHeaders();
    assertTrue(2 == headers.size());
    assertEquals("v1", headers.get("k1"));
    assertEquals("v2", headers.get("k2"));
    Map<String, List<String>> parmas = to.getCustomQueryParameters();
    assertTrue(2 == parmas.size());
    assertEquals(Arrays.asList("v1"), parmas.get("k1"));
    assertEquals(Arrays.asList("v2a", "v2b"), parmas.get("k2"));
    assertSame(listener, to.getGeneralProgressListener());
    assertSame(credentials, to.getRequestCredentials());
    assertSame(collector, to.getRequestMetricCollector());

    assertTrue(1234 == to.getReadLimit());
    toOptions = to.getRequestClientOptions();
    assertEquals(
            from.getRequestClientOptions().getClientMarker(
                    Marker.USER_AGENT),
            toOptions.getClientMarker(Marker.USER_AGENT));
    assertTrue(1234 == toOptions.getReadLimit());
}
项目:presto    文件:PrestoS3FileSystem.java   
private ProgressListener createProgressListener(Transfer transfer)
{
    return new ProgressListener()
    {
        private ProgressEventType previousType;
        private double previousTransferred;

        @Override
        public synchronized void progressChanged(ProgressEvent progressEvent)
        {
            ProgressEventType eventType = progressEvent.getEventType();
            if (previousType != eventType) {
                log.debug("Upload progress event (%s/%s): %s", host, key, eventType);
                previousType = eventType;
            }

            double transferred = transfer.getProgress().getPercentTransferred();
            if (transferred >= (previousTransferred + 10.0)) {
                log.debug("Upload percentage (%s/%s): %.0f%%", host, key, transferred);
                previousTransferred = transferred;
            }
        }
    };
}
项目:datacollector    文件:FileHelper.java   
Upload doUpload(String bucket, String fileName, InputStream is, ObjectMetadata metadata) {
  final PutObjectRequest putObjectRequest = new PutObjectRequest(
      bucket,
      fileName,
      is,
      metadata
  );
  final String object = bucket + s3TargetConfigBean.s3Config.delimiter + fileName;
  Upload upload = transferManager.upload(putObjectRequest);
  upload.addProgressListener((ProgressListener) progressEvent -> {
    switch (progressEvent.getEventType()) {
      case TRANSFER_STARTED_EVENT:
        LOG.debug("Started uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_COMPLETED_EVENT:
        LOG.debug("Completed uploading object {} into Amazon S3", object);
        break;
      case TRANSFER_FAILED_EVENT:
        LOG.debug("Failed uploading object {} into Amazon S3", object);
        break;
      default:
        break;
    }
  });
  return upload;
}
项目:transcoder    文件:S3MovieRepository.java   
@Override
public void store( final Movie movie ) throws MovieNotStoredException
{
    final String key = movie.getMovieId().getMovieId();
    logger.info( "Uploading {} to S3 key {}", movie, key );
    final File movieFile = movie.getPath().toFile();
    final PutObjectRequest putObjectRequest = new PutObjectRequest( S3_BUCKET_HOOD_ETS_SOURCE, key, movieFile );
    final ProgressListener progressListener = new S3ProgressListener( key, movieFile.length() );
    try
    {
        final Upload upload = this.transferManager.upload( putObjectRequest );
        upload.addProgressListener( progressListener );
        upload.waitForCompletion();
    }
    catch ( AmazonClientException | InterruptedException e )
    {
        this.transferManager.abortMultipartUploads( S3_BUCKET_HOOD_ETS_SOURCE, new Date() );
        throw new MovieNotStoredException( movie, e );
    }
    logger.info( "Upload complete." );
}
项目:hadoop    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:ibm-cos-sdk-java    文件:AmazonHttpClient.java   
private Response<Output> doExecute() throws InterruptedException {
    runBeforeRequestHandlers();
    setSdkTransactionId(request);
    setUserAgent(request);

    ProgressListener listener = requestConfig.getProgressListener();
    // add custom headers
    request.getHeaders().putAll(config.getHeaders());
    request.getHeaders().putAll(requestConfig.getCustomRequestHeaders());
    // add custom query parameters
    mergeQueryParameters(requestConfig.getCustomQueryParameters());
    Response<Output> response = null;
    final InputStream origContent = request.getContent();
    final InputStream toBeClosed = beforeRequest(); // for progress tracking
    // make "notCloseable", so reset would work with retries
    final InputStream notCloseable = (toBeClosed == null) ? null
            : ReleasableInputStream.wrap(toBeClosed).disableClose();
    request.setContent(notCloseable);
    try {
        publishProgress(listener, ProgressEventType.CLIENT_REQUEST_STARTED_EVENT);
        response = executeHelper();
        publishProgress(listener, ProgressEventType.CLIENT_REQUEST_SUCCESS_EVENT);
        awsRequestMetrics.getTimingInfo().endTiming();
        afterResponse(response);
        return response;
    } catch (AmazonClientException e) {
        publishProgress(listener, ProgressEventType.CLIENT_REQUEST_FAILED_EVENT);
        afterError(response, e);
        throw e;
    } finally {
        // Always close so any progress tracking would get the final events propagated.
        closeQuietly(toBeClosed, log);
        request.setContent(origContent); // restore the original content
    }
}
项目:ibm-cos-sdk-java    文件:AmazonHttpClient.java   
/**
 * If content length is present on the request, report it to the progress listener.
 *
 * @param listener Listener to notify.
 */
private void reportContentLength(ProgressListener listener) {
    Map<String, String> headers = request.getHeaders();
    String contentLengthStr = headers.get("Content-Length");
    if (contentLengthStr != null) {
        try {
            long contentLength = Long.parseLong(contentLengthStr);
            publishRequestContentLength(listener, contentLength);
        } catch (NumberFormatException e) {
            log.warn("Cannot parse the Content-Length header of the request.");
        }
    }
}
项目:ibm-cos-sdk-java    文件:AmazonHttpClient.java   
/**
 * Pause before the next retry and record metrics around retry behavior.
 */
private void pauseBeforeRetry(ExecOneRequestParams execOneParams,
                              final ProgressListener listener) throws InterruptedException {
    publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT);
    // Notify the progress listener of the retry
    awsRequestMetrics.startEvent(Field.RetryPauseTime);
    try {
        doPauseBeforeRetry(execOneParams);
    } finally {
        awsRequestMetrics.endEvent(Field.RetryPauseTime);
    }
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequestTest.java   
@Test
public void copyBaseTo() {
    final ProgressListener listener = new SyncProgressListener() {
        @Override
        public void progressChanged(ProgressEvent progressEvent) {
        }
    };
    final AWSCredentials credentials = new BasicAWSCredentials("accesskey",
            "accessid");
    final RequestMetricCollector collector = new RequestMetricCollector() {
        @Override
        public void collectMetrics(Request<?> request, Response<?> response) {
        }
    };

    final AmazonWebServiceRequest from = new AmazonWebServiceRequest() {
    };
    from.setGeneralProgressListener(listener);
    from.setRequestCredentials(credentials);
    from.setRequestMetricCollector(collector);
    from.putCustomRequestHeader("k1", "v1");
    from.putCustomRequestHeader("k2", "v2");
    from.putCustomQueryParameter("k1", "v1");
    from.putCustomQueryParameter("k2", "v2a");
    from.putCustomQueryParameter("k2", "v2b");
    from.getRequestClientOptions().setReadLimit(1234);

    final AmazonWebServiceRequest to = new AmazonWebServiceRequest() {
    };

    // Before copy
    RequestClientOptions toOptions;
    verifyBaseBeforeCopy(to);

    // After copy
    from.copyBaseTo(to);
    verifyBaseAfterCopy(listener, credentials, collector, from, to);
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequestTest.java   
public static void verifyBaseBeforeCopy(final AmazonWebServiceRequest to) {
    assertNull(to.getCustomRequestHeaders());
    assertNull(to.getCustomQueryParameters());
    assertSame(ProgressListener.NOOP, to.getGeneralProgressListener());
    assertNull(to.getRequestCredentials());
    assertNull(to.getRequestMetricCollector());

    assertTrue(RequestClientOptions.DEFAULT_STREAM_BUFFER_SIZE == to
            .getReadLimit());
    RequestClientOptions toOptions = to.getRequestClientOptions();
    assertNull(toOptions.getClientMarker(Marker.USER_AGENT));
    assertTrue(RequestClientOptions.DEFAULT_STREAM_BUFFER_SIZE == toOptions
            .getReadLimit());
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequestAdapterTest.java   
@Test
public void customProgressListenerSetInBaseRequest_IsSetOnAdapter() {
    EmptyAmazonWebServiceRequest request = new EmptyAmazonWebServiceRequest();
    ProgressListener listener = mock(ProgressListener.class);
    request.setGeneralProgressListener(listener);
    AmazonWebServiceRequestAdapter adapter = new AmazonWebServiceRequestAdapter(request);

    assertEquals(listener, adapter.getProgressListener());
}
项目:ibm-cos-sdk-java    文件:S3ProgressListenerChain.java   
@Override
public void onPersistableTransfer(PersistableTransfer persistableTransfer) {
    for (ProgressListener listener : getListeners()) {
        if (listener instanceof S3ProgressListener) {
            S3ProgressListener s3listener = (S3ProgressListener)listener;
            s3listener.onPersistableTransfer(persistableTransfer);
        }
    }
}
项目:ibm-cos-sdk-java    文件:AbstractPutObjectRequest.java   
/**
 * Returns the optional progress listener for receiving updates about object
 * upload status.
 *
 * @return the optional progress listener for receiving updates about object
 *         upload status.
 *
 * @deprecated use {@link #getGeneralProgressListener()} instead.
 */
@Deprecated
public com.amazonaws.services.s3.model.ProgressListener getProgressListener() {
    ProgressListener generalProgressListener = getGeneralProgressListener();
    if (generalProgressListener instanceof LegacyS3ProgressListener) {
        return ((LegacyS3ProgressListener)generalProgressListener).unwrap();
    } else {
         return null;
    }
}
项目:ibm-cos-sdk-java    文件:GetObjectRequest.java   
/**
 * Returns the optional progress listener for receiving updates about object
 * download status.
 *
 * @return the optional progress listener for receiving updates about object
 *          download status.
 *
 * @deprecated use {@link #getGeneralProgressListener()} instead.
 */
@Deprecated
public com.amazonaws.services.s3.model.ProgressListener getProgressListener() {
    ProgressListener generalProgressListener = getGeneralProgressListener();
    if (generalProgressListener instanceof LegacyS3ProgressListener) {
        return ((LegacyS3ProgressListener)generalProgressListener).unwrap();
    } else {
         return null;
    }
}
项目:ibm-cos-sdk-java    文件:UploadPartRequest.java   
/**
 * Returns the optional progress listener for receiving updates about object
 * upload status.
 *
 * @return the optional progress listener for receiving updates about object
 *         upload status.
 *
 * @deprecated use {@link #getGeneralProgressListener()} instead.
 */
@Deprecated
public com.amazonaws.services.s3.model.ProgressListener getProgressListener() {
    ProgressListener generalProgressListener = getGeneralProgressListener();
    if (generalProgressListener instanceof LegacyS3ProgressListener) {
        return ((LegacyS3ProgressListener)generalProgressListener).unwrap();
    } else {
         return null;
    }
}
项目:aliyun-oss-hadoop-fs    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventType()) {
        case TRANSFER_PART_COMPLETED_EVENT:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:big-c    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:emodb    文件:S3ScanWriter.java   
/**
 * Starts an asynchronous upload and returns a ListenableFuture for handling the result.
 */
synchronized ListenableFuture<String> upload() {
    // Reset values from possible prior attempt
    _attempts += 1;
    _bytesTransferred = 0;

    // Separate the future returned to the caller from the future generated by submitting the
    // putObject request.  If the writer is closed then uploadFuture may be canceled before it executes,
    // in which case it may not trigger any callbacks.  To ensure there is always a callback resultFuture is
    // tracked independently and, in the event that the upload is aborted, gets set on abort().
    _resultFuture = SettableFuture.create();

    _uploadFuture = _uploadService.submit(new Runnable() {
        @Override
        public void run() {
            try {
                ProgressListener progressListener = new ProgressListener() {
                    @Override
                    public void progressChanged(ProgressEvent progressEvent) {
                        // getBytesTransferred() returns zero for all events not pertaining to the file transfer
                        _bytesTransferred += progressEvent.getBytesTransferred();
                    }
                };

                PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, _key, _file);
                putObjectRequest.setGeneralProgressListener(progressListener);
                PutObjectResult result = _amazonS3.putObject(putObjectRequest);
                _resultFuture.set(result.getETag());
            } catch (Throwable t) {
                _resultFuture.setException(t);
            }
        }
    });

    return _resultFuture;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:thunderbit    文件:AmazonS3Storage.java   
@Override
public F.Promise<Void> store(Path path, String key, String name) {
    Promise<Void> promise = Futures.promise();

    TransferManager transferManager = new TransferManager(credentials);
    try {
        Upload upload = transferManager.upload(bucketName, key, path.toFile());
        upload.addProgressListener((ProgressListener) progressEvent -> {
            if (progressEvent.getEventType().isTransferEvent()) {
                if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                    transferManager.shutdownNow();
                    promise.success(null);
                } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                    transferManager.shutdownNow();
                    logger.error(progressEvent.toString());
                    promise.failure(new Exception(progressEvent.toString()));
                }
            }
        });
    } catch (AmazonServiceException ase) {
        logAmazonServiceException (ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}
项目:kickflip-android-sdk    文件:S3BroadcastManager.java   
public void queueUpload(final String bucket, final String key, final File file, boolean lastUpload) {
    if (VERBOSE) Log.i(TAG, "Queueing upload " + key);

    final PutObjectRequest por = new PutObjectRequest(bucket, key, file);
    por.setGeneralProgressListener(new ProgressListener() {
        final String url = "https://" + bucket + ".s3.amazonaws.com/" + key;
        private long uploadStartTime;

        @Override
        public void progressChanged(com.amazonaws.event.ProgressEvent progressEvent) {
            try {
                if (progressEvent.getEventCode() == ProgressEvent.STARTED_EVENT_CODE) {
                    uploadStartTime = System.currentTimeMillis();
                } else if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE) {
                    long uploadDurationMillis = System.currentTimeMillis() - uploadStartTime;
                    int bytesPerSecond = (int) (file.length() / (uploadDurationMillis / 1000.0));
                    if (VERBOSE)
                        Log.i(TAG, "Uploaded " + file.length() / 1000.0 + " KB in " + (uploadDurationMillis) + "ms (" + bytesPerSecond / 1000.0 + " KBps)");
                    mBroadcaster.onS3UploadComplete(new S3UploadEvent(file, url, bytesPerSecond));
                } else if (progressEvent.getEventCode() == ProgressEvent.FAILED_EVENT_CODE) {
                    Log.w(TAG, "Upload failed for " + url);
                }
            } catch (Exception excp) {
                Log.e(TAG, "ProgressListener error");
                excp.printStackTrace();
            }
        }
    });
    por.setCannedAcl(CannedAccessControlList.PublicRead);
    for (WeakReference<S3RequestInterceptor> ref : mInterceptors) {
        S3RequestInterceptor interceptor = ref.get();
        if (interceptor != null) {
            interceptor.interceptRequest(por);
        }
    }
    mQueue.add(new Pair<>(por, lastUpload));
}
项目:hadoop    文件:S3AFileSystem.java   
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequestAdapter.java   
@Override
public ProgressListener getProgressListener() {
    return request.getGeneralProgressListener();
}
项目:ibm-cos-sdk-java    文件:AmazonS3Client.java   
private UploadPartResult doUploadPart(final String bucketName,
        final String key, final String uploadId, final int partNumber,
        final long partSize, Request<UploadPartRequest> request,
        InputStream inputStream,
        MD5DigestCalculatingInputStream md5DigestStream,
        final ProgressListener listener) {
    try {
        request.setContent(inputStream);
        ObjectMetadata metadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
        final String etag = metadata.getETag();

        if (md5DigestStream != null
                && !skipMd5CheckStrategy.skipClientSideValidationPerUploadPartResponse(metadata)) {
            byte[] clientSideHash = md5DigestStream.getMd5Digest();
            byte[] serverSideHash = BinaryUtils.fromHex(etag);

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                final String info = "bucketName: " + bucketName + ", key: "
                        + key + ", uploadId: " + uploadId
                        + ", partNumber: " + partNumber + ", partSize: "
                        + partSize;
                throw new SdkClientException(
                     "Unable to verify integrity of data upload.  "
                    + "Client calculated content hash (contentMD5: "
                    + Base16.encodeAsString(clientSideHash)
                    + " in hex) didn't match hash (etag: "
                    + etag
                    + " in hex) calculated by Amazon S3.  "
                    + "You may need to delete the data stored in Amazon S3. "
                    + "(" + info + ")");
            }
        }
        publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
        UploadPartResult result = new UploadPartResult();
        result.setETag(etag);
        result.setPartNumber(partNumber);
        result.setSSEAlgorithm(metadata.getSSEAlgorithm());
        result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm());
        result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5());
        result.setRequesterCharged(metadata.isRequesterCharged());
        return result;
    } catch (Throwable t) {
        publishProgress(listener, ProgressEventType.TRANSFER_PART_FAILED_EVENT);
        // Leaving this here in case anyone is depending on it, but it's
        // inconsistent with other methods which only generate one of
        // COMPLETED_EVENT_CODE or FAILED_EVENT_CODE.
        publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
        throw failure(t);
    }
}
项目:ibm-cos-sdk-java    文件:AbstractTransfer.java   
/**
 * @deprecated Replaced by {@link #addProgressListener(ProgressListener)}
 */
@Deprecated
public synchronized void addProgressListener(com.amazonaws.services.s3.model.ProgressListener listener) {
    listenerChain.addProgressListener(new LegacyS3ProgressListener(listener));
}
项目:ibm-cos-sdk-java    文件:AbstractTransfer.java   
/**
 * @deprecated Replaced by {@link #removeProgressListener(ProgressListener)}
 */
@Deprecated
public synchronized void removeProgressListener(com.amazonaws.services.s3.model.ProgressListener listener) {
    listenerChain.removeProgressListener(new LegacyS3ProgressListener(listener));
}
项目:aliyun-oss-hadoop-fs    文件:S3AFileSystem.java   
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventType()) {
        case TRANSFER_PART_COMPLETED_EVENT:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
项目:big-c    文件:S3AFileSystem.java   
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
项目:photon-model    文件:AWSCostStatsService.java   
private void downloadParseAndCreateStats(AWSCostStatsCreationContext statsData, String awsBucketName) throws IOException {
    try {
        // Creating a working directory for downloading and processing the bill
        final Path workingDirPath = Paths.get(System.getProperty(TEMP_DIR_LOCATION),
                UUID.randomUUID().toString());
        Files.createDirectories(workingDirPath);

        AWSCsvBillParser parser = new AWSCsvBillParser();
        final String csvBillZipFileName = parser
                .getCsvBillFileName(statsData.billMonthToDownload, statsData.accountId, true);
        Path csvBillZipFilePath = Paths.get(workingDirPath.toString(), csvBillZipFileName);
        ProgressListener listener = new ProgressListener() {
            @Override
            public void progressChanged(ProgressEvent progressEvent) {
                try {
                    ProgressEventType eventType = progressEvent.getEventType();
                    if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(eventType)) {
                        OperationContext.restoreOperationContext(statsData.opContext);
                        LocalDate billMonth = new LocalDate(
                                statsData.billMonthToDownload.getYear(),
                                statsData.billMonthToDownload.getMonthOfYear(), 1);

                        logWithContext(statsData, Level.INFO, () -> String.format("Processing" +
                                " bill for the month: %s.", billMonth));

                        parser.parseDetailedCsvBill(statsData.ignorableInvoiceCharge, csvBillZipFilePath,
                                statsData.awsAccountIdToComputeStates.keySet(),
                                getHourlyStatsConsumer(billMonth, statsData),
                                getMonthlyStatsConsumer(billMonth, statsData));
                        deleteTempFiles();
                        // Continue downloading and processing the bills for past and current months' bills
                        statsData.billMonthToDownload = statsData.billMonthToDownload.plusMonths(1);
                        handleCostStatsCreationRequest(statsData);
                    } else if (ProgressEventType.TRANSFER_FAILED_EVENT.equals(eventType)) {
                        deleteTempFiles();
                        billDownloadFailureHandler(statsData, awsBucketName, new IOException(
                                "Download of AWS CSV Bill '" + csvBillZipFileName + "' failed."));
                    }
                } catch (Exception exception) {
                    deleteTempFiles();
                    billDownloadFailureHandler(statsData, awsBucketName, exception);
                }
            }

            private void deleteTempFiles() {
                try {
                    Files.deleteIfExists(csvBillZipFilePath);
                    Files.deleteIfExists(workingDirPath);
                } catch (IOException e) {
                    // Ignore IO exception while cleaning files.
                }
            }
        };
        GetObjectRequest getObjectRequest = new GetObjectRequest(awsBucketName,
                csvBillZipFileName).withGeneralProgressListener(listener);
        statsData.s3Client.download(getObjectRequest, csvBillZipFilePath.toFile());
    } catch (AmazonS3Exception s3Exception) {
        billDownloadFailureHandler(statsData, awsBucketName, s3Exception);
    }
}
项目:spring-cloud-stream-app-starters    文件:AmazonS3SinkMockTests.java   
@Test
@Override
public void test() throws Exception {
    AmazonS3 amazonS3Client = TestUtils.getPropertyValue(this.s3MessageHandler, "transferManager.s3",
            AmazonS3.class);

    File file = this.temporaryFolder.newFile("foo.mp3");
    Message<?> message = MessageBuilder.withPayload(file)
            .build();

    this.channels.input().send(message);

    ArgumentCaptor<PutObjectRequest> putObjectRequestArgumentCaptor =
            ArgumentCaptor.forClass(PutObjectRequest.class);
    verify(amazonS3Client, atLeastOnce()).putObject(putObjectRequestArgumentCaptor.capture());

    PutObjectRequest putObjectRequest = putObjectRequestArgumentCaptor.getValue();
    assertThat(putObjectRequest.getBucketName(), equalTo(S3_BUCKET));
    assertThat(putObjectRequest.getKey(), equalTo("foo.mp3"));
    assertNotNull(putObjectRequest.getFile());
    assertNull(putObjectRequest.getInputStream());

    ObjectMetadata metadata = putObjectRequest.getMetadata();
    assertThat(metadata.getContentMD5(), equalTo(Md5Utils.md5AsBase64(file)));
    assertThat(metadata.getContentLength(), equalTo(0L));
    assertThat(metadata.getContentType(), equalTo("audio/mpeg"));

    ProgressListener listener = putObjectRequest.getGeneralProgressListener();
    S3ProgressPublisher.publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);

    assertTrue(this.transferCompletedLatch.await(10, TimeUnit.SECONDS));
    assertTrue(this.aclLatch.await(10, TimeUnit.SECONDS));

    ArgumentCaptor<SetObjectAclRequest> setObjectAclRequestArgumentCaptor =
            ArgumentCaptor.forClass(SetObjectAclRequest.class);
    verify(amazonS3Client).setObjectAcl(setObjectAclRequestArgumentCaptor.capture());

    SetObjectAclRequest setObjectAclRequest = setObjectAclRequestArgumentCaptor.getValue();

    assertThat(setObjectAclRequest.getBucketName(), equalTo(S3_BUCKET));
    assertThat(setObjectAclRequest.getKey(), equalTo("foo.mp3"));
    assertNull(setObjectAclRequest.getAcl());
    assertThat(setObjectAclRequest.getCannedAcl(), equalTo(CannedAccessControlList.PublicReadWrite));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AFileSystem.java   
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
项目:s3_video    文件:AWSAdapter.java   
public void uploadTextToS3Bucket(String bucketName, String key, String content, String storageClass) throws TranscodeException{
    ObjectMetadata objectMetaData = new ObjectMetadata();
    byte[] bytes = content.getBytes();
    objectMetaData.setContentLength(bytes.length);
    objectMetaData.setContentType("text/html; charset=utf-8");
    try {
        ByteArrayInputStream inputStream = new ByteArrayInputStream(bytes);
        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, key, inputStream, objectMetaData);
        if (storageClass.equalsIgnoreCase(StorageClass.ReducedRedundancy.name())) {
            putObjectRequest.setStorageClass(StorageClass.ReducedRedundancy);
        }

        final long fileSizeInBytes = bytes.length;

        putObjectRequest.setGeneralProgressListener(new ProgressListener() {

            private long bytesTransferred = 0;

            private int currentPercentage = 0;

            @Override
            public void progressChanged(ProgressEvent progressEvent) {
                bytesTransferred += progressEvent.getBytesTransferred();
                int percentTransferred = (int) (bytesTransferred * 100 / fileSizeInBytes);
                if (percentTransferred%10 == 0 && percentTransferred != currentPercentage) {
                    logger.info("Transferred {}% of {} bytes.", percentTransferred, fileSizeInBytes);
                    currentPercentage = percentTransferred;
                }
            }
        });

        Upload upload = tm.upload(putObjectRequest);
        if (upload!=null) {
            upload.waitForCompletion();             
        } else {
            logger.error("Did not get upload detail from S3 for asset with key " + key);
            throw new TranscodeException("Did not get upload detail from S3 for asset with key " + key);
        }
    } catch (AmazonClientException | InterruptedException e) {
        throw new TranscodeException(e.getMessage());           
    }
}
项目:spring-boot-aws-cloudsearch    文件:CloudSearchClient.java   
public void setProgressListener(ProgressListener progressListener) {
    this.progressListener = progressListener;
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequest.java   
/**
 * Sets the optional progress listener for receiving updates about the progress of the request,
 * and returns a reference to this object so that method calls can be chained together.
 *
 * @param progressListener
 *            The new progress listener.
 * @return A reference to this updated object so that method calls can be chained together.
 */
public <T extends AmazonWebServiceRequest> T withGeneralProgressListener(ProgressListener progressListener) {
    setGeneralProgressListener(progressListener);
    @SuppressWarnings("unchecked")
    T t = (T) this;
    return t;
}
项目:ibm-cos-sdk-java    文件:S3ProgressPublisher.java   
/**
 * Used to deliver a persistable transfer to the given s3 listener.
 *
 * @param listener only listener of type {@link S3ProgressListener} will be
 * notified.
 *
 * @return the future of a submitted task; or null if the delivery is
 * synchronous with no future task involved.  Note a listener should never
 * block, and therefore returning null is the typical case.
 */
public static Future<?> publishTransferPersistable(
        final ProgressListener listener,
        final PersistableTransfer persistableTransfer) {
    if (persistableTransfer == null || !(listener instanceof S3ProgressListener)) {
        return null;
    }
    final S3ProgressListener s3listener = (S3ProgressListener)listener;
    return deliverEvent(s3listener, persistableTransfer);
}
项目:ibm-cos-sdk-java    文件:AbstractPutObjectRequest.java   
/**
 * Sets the optional progress listener for receiving updates about object
 * upload status, and returns this updated object so that additional method
 * calls can be chained together.
 *
 * @param progressListener
 *            The legacy progress listener that is used exclusively for Amazon S3 client.
 *
 * @return This updated PutObjectRequest object.
 *
 * @deprecated use {@link #withGeneralProgressListener(ProgressListener)} instead.
 */
@Deprecated
public <T extends AbstractPutObjectRequest> T withProgressListener(
        com.amazonaws.services.s3.model.ProgressListener progressListener) {
    setProgressListener(progressListener);
    @SuppressWarnings("unchecked") T t = (T)this;
    return t;
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequest.java   
/**
 * Sets the optional progress listener for receiving updates about the progress of the request.
 *
 * @param progressListener
 *            The new progress listener.
 */
public void setGeneralProgressListener(ProgressListener progressListener) {
    this.progressListener = progressListener == null ? ProgressListener.NOOP : progressListener;
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequest.java   
/**
 * Returns the optional progress listener for receiving updates about the progress of the
 * request.
 *
 * @return the optional progress listener for receiving updates about the progress of the
 *         request.
 */
public ProgressListener getGeneralProgressListener() {
    return progressListener;
}
项目:ibm-cos-sdk-java    文件:AmazonHttpClient.java   
/**
 * Wrap with a {@link ProgressInputStream} to report request progress to listener.
 *
 * @param listener Listener to report to
 * @param content  Input stream to monitor progress for
 * @return Wrapped input stream with progress monitoring capabilities.
 */
private InputStream monitorStreamProgress(ProgressListener listener,
                                          InputStream content) {
    return ProgressInputStream.inputStreamForRequest(content, listener);
}