Java 类com.amazonaws.event.ProgressEventType 实例源码

项目:ibm-cos-sdk-java    文件:CopyCallable.java   
/**
 * Performs the copy of an Amazon S3 object from source bucket to
 * destination bucket as multiple copy part requests. The information about
 * the part to be copied is specified in the request as a byte range
 * (first-last)
 *
 * @throws Exception
 *             Any Exception that occurs while carrying out the request.
 */
private void copyInParts() throws Exception {
    multipartUploadId = initiateMultipartUpload(copyObjectRequest);

    long optimalPartSize = getOptimalPartSize(metadata.getContentLength());

    try {
        CopyPartRequestFactory requestFactory = new CopyPartRequestFactory(
                copyObjectRequest, multipartUploadId, optimalPartSize,
                metadata.getContentLength());
        copyPartsInParallel(requestFactory);
    } catch (Exception e) {
        publishProgress(listenerChain, ProgressEventType.TRANSFER_FAILED_EVENT);
        abortMultipartCopy();
        throw new RuntimeException("Unable to perform multipart copy", e);
    }
}
项目:ibm-cos-sdk-java    文件:DownloadImpl.java   
/**
 * This method is also responsible for firing COMPLETED signal to the
 * listeners.
 */
@Override
public void setState(TransferState state) {
    super.setState(state);

    switch (state) {
        case Completed :
            fireProgressEvent(ProgressEventType.TRANSFER_COMPLETED_EVENT);
            break;
        case Canceled:
            fireProgressEvent(ProgressEventType.TRANSFER_CANCELED_EVENT);
            break;
        case Failed:
            fireProgressEvent(ProgressEventType.TRANSFER_FAILED_EVENT);
            break;
        default:
            break;
    }
}
项目:aliyun-oss-hadoop-fs    文件:S3AOutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  if (progress != null) {
    progress.progress();
  }

  // There are 3 http ops here, but this should be close enough for now
  ProgressEventType pet = progressEvent.getEventType();
  if (pet == TRANSFER_PART_STARTED_EVENT ||
      pet == TRANSFER_COMPLETED_EVENT) {
    statistics.incrementWriteOps(1);
  }

  long transferred = upload.getProgress().getBytesTransferred();
  long delta = transferred - lastBytesTransferred;
  if (statistics != null && delta != 0) {
    statistics.incrementBytesWritten(delta);
  }

  lastBytesTransferred = transferred;
}
项目:ServerlessJavaMaven    文件:ServerlessDeployMojo.java   
@Override
public void progressChanged(ProgressEvent event)
{
    if ( event.getEventType() == ProgressEventType.REQUEST_CONTENT_LENGTH_EVENT )
    {
        contentLength = event.getBytes();
        getLog().info("Content size: " + contentLength + " bytes");
    }
    else if ( event.getEventType() == ProgressEventType.REQUEST_BYTE_TRANSFER_EVENT )
    {
        contentSent += event.getBytesTransferred();
        double div = (double) (((double)contentSent/(double)contentLength));
        double mul = div*(double)100.0;
        int mod = (int)mul / 10;
        if ( mod > lastTenPct )
        {
            lastTenPct = mod;
            getLog().info("Uploaded " + (mod*10) + "% of " + (contentLength/(1024*1024)) + " MB");
        }
    }
}
项目:spring-cloud-stream-app-starters    文件:AmazonS3SinkMockTests.java   
@Bean
public S3ProgressListener s3ProgressListener() {
    return new S3ProgressListener() {

        @Override
        public void onPersistableTransfer(PersistableTransfer persistableTransfer) {

        }

        @Override
        public void progressChanged(ProgressEvent progressEvent) {
            if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(progressEvent.getEventType())) {
                transferCompletedLatch().countDown();
            }
        }

    };
}
项目:presto    文件:PrestoS3FileSystem.java   
private ProgressListener createProgressListener(Transfer transfer)
{
    return new ProgressListener()
    {
        private ProgressEventType previousType;
        private double previousTransferred;

        @Override
        public synchronized void progressChanged(ProgressEvent progressEvent)
        {
            ProgressEventType eventType = progressEvent.getEventType();
            if (previousType != eventType) {
                log.debug("Upload progress event (%s/%s): %s", host, key, eventType);
                previousType = eventType;
            }

            double transferred = transfer.getProgress().getPercentTransferred();
            if (transferred >= (previousTransferred + 10.0)) {
                log.debug("Upload percentage (%s/%s): %.0f%%", host, key, transferred);
                previousTransferred = transferred;
            }
        }
    };
}
项目:esthree    文件:PrintingProgressListener.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    ProgressEventType type = progressEvent.getEventType();
    if (type.equals(TRANSFER_COMPLETED_EVENT) || type.equals(TRANSFER_STARTED_EVENT)) {
        out.println();
    }

    if (type.isByteCountEvent()) {
        long timeLeft = getTimeLeft();
        if (lastTimeLeft < 1 && timeLeft > 0) {
            // prime this value with a sane starting point
            lastTimeLeft = timeLeft;
        }

        // use an exponential moving average to smooth the estimate
        lastTimeLeft += 0.90 * (timeLeft - lastTimeLeft);

        out.print(String.format("\r%1$s  %2$s / %3$s  %4$s      ",
                generate(saturatedCast(round(completed + (progress.getPercentTransferred() * multiplier)))),
                humanReadableByteCount(progress.getBytesTransferred(), true),
                humanReadableByteCount(progress.getTotalBytesToTransfer(), true), fromSeconds(lastTimeLeft)));
        out.flush();
    }
}
项目:esthree    文件:Get.java   
public MessageDigest copyAndHash(InputStream input, long totalBytes, Progress progress)
        throws IOException, CloneNotSupportedException {

    // clone the current digest, such that it remains unchanged in this method
    MessageDigest computedDigest = (MessageDigest) currentDigest.clone();
    byte[] buffer = new byte[DEFAULT_BUF_SIZE];

    long count = 0;
    int n;
    while (-1 != (n = input.read(buffer))) {
        output.write(buffer, 0, n);
        if (progressListener != null) {
            progress.updateProgress(n);
            progressListener.progressChanged(new ProgressEvent(ProgressEventType.RESPONSE_BYTE_TRANSFER_EVENT, n));
        }
        computedDigest.update(buffer, 0, n);
        count += n;
    }

    // verify that at least this many bytes were read
    if (totalBytes != count) {
        throw new IOException(String.format("%d bytes downloaded instead of expected %d bytes", count, totalBytes));
    }
    return computedDigest;
}
项目:esthree    文件:GetMultipart.java   
public MessageDigest copyAndHash(InputStream input, long totalBytes, Progress progress)
        throws IOException, CloneNotSupportedException {

    // clone the current digest, such that it remains unchanged in this method
    MessageDigest computedDigest = (MessageDigest) currentDigest.clone();
    byte[] buffer = new byte[DEFAULT_BUF_SIZE];

    long count = 0;
    int n;
    while (-1 != (n = input.read(buffer))) {
        output.write(buffer, 0, n);
        if (progressListener != null) {
            progress.updateProgress(n);
            progressListener.progressChanged(new ProgressEvent(ProgressEventType.REQUEST_BYTE_TRANSFER_EVENT, n));
        }
        computedDigest.update(buffer, 0, n);
        count += n;
    }

    // verify that at least this many bytes were read
    if (totalBytes != count) {
        throw new IOException(String.format("%d bytes downloaded instead of expected %d bytes", count, totalBytes));
    }
    return computedDigest;
}
项目:simple-glacier-client    文件:ArchiveUploadHighLevel.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {

    if (progressEvent.getEventType() == ProgressEventType.REQUEST_CONTENT_LENGTH_EVENT) {
        partSize = progressEvent.getBytes();
        ArchiveUploadHighLevel.this.log.info("Part size: " + partSize);
    }

    if (progressEvent.getEventType() == ProgressEventType.CLIENT_REQUEST_SUCCESS_EVENT) {
        counter += partSize;
        int percentage = (int)(counter * 100.0 / total);
        ArchiveUploadHighLevel.this.log.info("Successfully transferred: " + counter + " / " + total + " (" + percentage + "%)");
    }
}
项目:ibm-cos-sdk-java    文件:AmazonHttpClient.java   
private Response<Output> doExecute() throws InterruptedException {
    runBeforeRequestHandlers();
    setSdkTransactionId(request);
    setUserAgent(request);

    ProgressListener listener = requestConfig.getProgressListener();
    // add custom headers
    request.getHeaders().putAll(config.getHeaders());
    request.getHeaders().putAll(requestConfig.getCustomRequestHeaders());
    // add custom query parameters
    mergeQueryParameters(requestConfig.getCustomQueryParameters());
    Response<Output> response = null;
    final InputStream origContent = request.getContent();
    final InputStream toBeClosed = beforeRequest(); // for progress tracking
    // make "notCloseable", so reset would work with retries
    final InputStream notCloseable = (toBeClosed == null) ? null
            : ReleasableInputStream.wrap(toBeClosed).disableClose();
    request.setContent(notCloseable);
    try {
        publishProgress(listener, ProgressEventType.CLIENT_REQUEST_STARTED_EVENT);
        response = executeHelper();
        publishProgress(listener, ProgressEventType.CLIENT_REQUEST_SUCCESS_EVENT);
        awsRequestMetrics.getTimingInfo().endTiming();
        afterResponse(response);
        return response;
    } catch (AmazonClientException e) {
        publishProgress(listener, ProgressEventType.CLIENT_REQUEST_FAILED_EVENT);
        afterError(response, e);
        throw e;
    } finally {
        // Always close so any progress tracking would get the final events propagated.
        closeQuietly(toBeClosed, log);
        request.setContent(origContent); // restore the original content
    }
}
项目:ibm-cos-sdk-java    文件:AmazonHttpClient.java   
/**
 * Pause before the next retry and record metrics around retry behavior.
 */
private void pauseBeforeRetry(ExecOneRequestParams execOneParams,
                              final ProgressListener listener) throws InterruptedException {
    publishProgress(listener, ProgressEventType.CLIENT_REQUEST_RETRY_EVENT);
    // Notify the progress listener of the retry
    awsRequestMetrics.startEvent(Field.RetryPauseTime);
    try {
        doPauseBeforeRetry(execOneParams);
    } finally {
        awsRequestMetrics.endEvent(Field.RetryPauseTime);
    }
}
项目:ibm-cos-sdk-java    文件:ProgressListenerWithEventCodeVerification.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    ProgressEventType type = progressEvent.getEventType();
    if (type.isByteCountEvent())
        return;
    if (type != types[count]) {
        throw new AssertionError("Expect event type "
                + types[count] + " but got "
                + progressEvent.getEventType());
    }
    count++;
}
项目:ibm-cos-sdk-java    文件:TransferCompletionFilter.java   
@Override
public ProgressEvent filter(ProgressEvent progressEvent) {
    // Block COMPLETE events from the low-level GetObject operation,
    // but we still want to keep the BytesTransferred
    return progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT
         ? null // discard this event
         : progressEvent
         ;
}
项目:ibm-cos-sdk-java    文件:CopyCallable.java   
public CopyResult call() throws Exception {
    copy.setState(TransferState.InProgress);
    if (isMultipartCopy()) {
        publishProgress(listenerChain, ProgressEventType.TRANSFER_STARTED_EVENT);
        copyInParts();
        return null;
    } else {
        return copyInOneChunk();
    }
}
项目:ibm-cos-sdk-java    文件:MultipleFileTransfer.java   
/**
 * Override this method so that TransferState updates are also sent out to the
 * progress listener chain in forms of ProgressEvent.
 */
@Override
public void setState(TransferState state) {
    super.setState(state);

    switch (state) {
    case Waiting:
        fireProgressEvent(ProgressEventType.TRANSFER_PREPARING_EVENT);
        break;
    case InProgress:
        if ( subTransferStarted.compareAndSet(false, true) ) {
            /* The first InProgress signal */
            fireProgressEvent(ProgressEventType.TRANSFER_STARTED_EVENT);
        }
        /* Don't need any event code update for subsequent InProgress signals */
        break;
    case Completed:
        fireProgressEvent(ProgressEventType.TRANSFER_COMPLETED_EVENT);
        break;
    case Canceled:
        fireProgressEvent(ProgressEventType.TRANSFER_CANCELED_EVENT);
        break;
    case Failed:
        fireProgressEvent(ProgressEventType.TRANSFER_FAILED_EVENT);
        break;
    default:
        break;
    }
}
项目:ibm-cos-sdk-java    文件:CompleteMultipartUpload.java   
@Override
public UploadResult call() throws Exception {
    CompleteMultipartUploadResult res;

    try {
        CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest(
                origReq.getBucketName(), origReq.getKey(), uploadId,
                collectPartETags())
                .withRequesterPays(origReq.isRequesterPays())
            .withGeneralProgressListener(origReq.getGeneralProgressListener())
            .withRequestMetricCollector(origReq.getRequestMetricCollector())
            ;
        res = s3.completeMultipartUpload(req);
    } catch (Exception e) {
        publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
        throw e;
    }

    UploadResult uploadResult = new UploadResult();
    uploadResult.setBucketName(origReq
            .getBucketName());
    uploadResult.setKey(origReq.getKey());
    uploadResult.setETag(res.getETag());
    uploadResult.setVersionId(res.getVersionId());

    monitor.uploadComplete();

    return uploadResult;
}
项目:ibm-cos-sdk-java    文件:CopyMonitor.java   
void copyComplete() {
    markAllDone();
    transfer.setState(TransferState.Completed);
    // AmazonS3Client takes care of all the events for single part uploads,
    // so we only need to send a completed event for multipart uploads.
    if (multipartCopyCallable.isMultipartCopy()) {
        publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);
    }
}
项目:ibm-cos-sdk-java    文件:UploadMonitor.java   
void uploadComplete() {
    markAllDone();
    transfer.setState(TransferState.Completed);

    // AmazonS3Client takes care of all the events for single part uploads,
    // so we only need to send a completed event for multipart uploads.
    if (multipartUploadCallable.isMultipartUpload()) {
        publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);
    }
}
项目:ibm-cos-sdk-java    文件:UploadCallable.java   
public UploadResult call() throws Exception {
    upload.setState(TransferState.InProgress);
    if ( isMultipartUpload() ) {
        publishProgress(listener, ProgressEventType.TRANSFER_STARTED_EVENT);
        return uploadInParts();
    } else {
        return uploadInOneChunk();
    }
}
项目:ibm-cos-sdk-java    文件:CompleteMultipartCopy.java   
@Override
public CopyResult call() throws Exception {
    CompleteMultipartUploadResult res;

    try {
        CompleteMultipartUploadRequest req = new CompleteMultipartUploadRequest(
                origReq.getDestinationBucketName(), origReq.getDestinationKey(), uploadId,
                collectPartETags())
                .withRequesterPays(origReq.isRequesterPays())
                .withGeneralProgressListener(origReq.getGeneralProgressListener())
                .withRequestMetricCollector(origReq.getRequestMetricCollector())
                ;
        res = s3.completeMultipartUpload(req);
    } catch (Exception e) {
        publishProgress(listener, ProgressEventType.TRANSFER_FAILED_EVENT);
        throw e;
    }

    CopyResult copyResult = new CopyResult();
    copyResult.setSourceBucketName(origReq.getSourceBucketName());
    copyResult.setSourceKey(origReq.getSourceKey());
    copyResult.setDestinationBucketName(res
            .getBucketName());
    copyResult.setDestinationKey(res.getKey());
    copyResult.setETag(res.getETag());
    copyResult.setVersionId(res.getVersionId());

    monitor.copyComplete();

    return copyResult;
}
项目:thunderbit    文件:AmazonS3Storage.java   
@Override
public F.Promise<Void> store(Path path, String key, String name) {
    Promise<Void> promise = Futures.promise();

    TransferManager transferManager = new TransferManager(credentials);
    try {
        Upload upload = transferManager.upload(bucketName, key, path.toFile());
        upload.addProgressListener((ProgressListener) progressEvent -> {
            if (progressEvent.getEventType().isTransferEvent()) {
                if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                    transferManager.shutdownNow();
                    promise.success(null);
                } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                    transferManager.shutdownNow();
                    logger.error(progressEvent.toString());
                    promise.failure(new Exception(progressEvent.toString()));
                }
            }
        });
    } catch (AmazonServiceException ase) {
        logAmazonServiceException (ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}
项目:thunderbit    文件:AmazonS3Storage.java   
@Override
public F.Promise<Void> delete(String key, String name) {
    Promise<Void> promise = Futures.promise();

    AmazonS3 amazonS3 = new AmazonS3Client(credentials);
    DeleteObjectRequest request = new DeleteObjectRequest(bucketName, key);
    request.withGeneralProgressListener(progressEvent -> {
        if (progressEvent.getEventType().isTransferEvent()) {
            if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_COMPLETED_EVENT)) {
                promise.success(null);
            } else if (progressEvent.getEventType().equals(ProgressEventType.TRANSFER_FAILED_EVENT)) {
                logger.error(progressEvent.toString());
                promise.failure(new Exception(progressEvent.toString()));
            }
        }
    });

    try {
        amazonS3.deleteObject(request);
    } catch (AmazonServiceException ase) {
        logAmazonServiceException (ase);
    } catch (AmazonClientException ace) {
        logAmazonClientException(ace);
    }

    return F.Promise.wrap(promise.future());
}
项目:esthree    文件:Get.java   
@Override
public Integer call() throws Exception {

    // this is the most up to date digest, it's initialized here but later holds the most up to date valid digest
    currentDigest = MessageDigest.getInstance("MD5");
    currentDigest = retryingGet();

    if(progressListener != null) {
        progressListener.progressChanged(new ProgressEvent(ProgressEventType.TRANSFER_STARTED_EVENT));
    }

    if (!fullETag.contains("-")) {
        byte[] expected = BinaryUtils.fromHex(fullETag);
        byte[] current = currentDigest.digest();
        if (!Arrays.equals(expected, current)) {
            throw new AmazonClientException("Unable to verify integrity of data download.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "The data may be corrupt.");
        }
    } else {
        // TODO log warning that we can't validate the MD5
        // TODO implement this algorithm for several common chunk sizes http://stackoverflow.com/questions/6591047/etag-definition-changed-in-amazon-s3
        if(verbose) {
            System.err.println("\nMD5 does not exist on AWS for file, calculated value: " + BinaryUtils.toHex(currentDigest.digest()));
        }
    }
    // TODO add ability to resume from previously downloaded chunks
    // TODO add rate limiter

    return 0;

}
项目:cfnassist    文件:CloudClient.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    if (progressEvent.getEventType() == ProgressEventType.CLIENT_REQUEST_FAILED_EVENT) {
        logger.warn(progressEvent.toString());
    }
    logger.info(progressEvent.toString());
}
项目:ibm-cos-sdk-java    文件:ProgressListenerWithEventCodeVerification.java   
public ProgressListenerWithEventCodeVerification(ProgressEventType... types) {
    this.types = types.clone();
}
项目:ibm-cos-sdk-java    文件:AmazonS3Client.java   
private UploadPartResult doUploadPart(final String bucketName,
        final String key, final String uploadId, final int partNumber,
        final long partSize, Request<UploadPartRequest> request,
        InputStream inputStream,
        MD5DigestCalculatingInputStream md5DigestStream,
        final ProgressListener listener) {
    try {
        request.setContent(inputStream);
        ObjectMetadata metadata = invoke(request, new S3MetadataResponseHandler(), bucketName, key);
        final String etag = metadata.getETag();

        if (md5DigestStream != null
                && !skipMd5CheckStrategy.skipClientSideValidationPerUploadPartResponse(metadata)) {
            byte[] clientSideHash = md5DigestStream.getMd5Digest();
            byte[] serverSideHash = BinaryUtils.fromHex(etag);

            if (!Arrays.equals(clientSideHash, serverSideHash)) {
                final String info = "bucketName: " + bucketName + ", key: "
                        + key + ", uploadId: " + uploadId
                        + ", partNumber: " + partNumber + ", partSize: "
                        + partSize;
                throw new SdkClientException(
                     "Unable to verify integrity of data upload.  "
                    + "Client calculated content hash (contentMD5: "
                    + Base16.encodeAsString(clientSideHash)
                    + " in hex) didn't match hash (etag: "
                    + etag
                    + " in hex) calculated by Amazon S3.  "
                    + "You may need to delete the data stored in Amazon S3. "
                    + "(" + info + ")");
            }
        }
        publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
        UploadPartResult result = new UploadPartResult();
        result.setETag(etag);
        result.setPartNumber(partNumber);
        result.setSSEAlgorithm(metadata.getSSEAlgorithm());
        result.setSSECustomerAlgorithm(metadata.getSSECustomerAlgorithm());
        result.setSSECustomerKeyMd5(metadata.getSSECustomerKeyMd5());
        result.setRequesterCharged(metadata.isRequesterCharged());
        return result;
    } catch (Throwable t) {
        publishProgress(listener, ProgressEventType.TRANSFER_PART_FAILED_EVENT);
        // Leaving this here in case anyone is depending on it, but it's
        // inconsistent with other methods which only generate one of
        // COMPLETED_EVENT_CODE or FAILED_EVENT_CODE.
        publishProgress(listener, ProgressEventType.TRANSFER_PART_COMPLETED_EVENT);
        throw failure(t);
    }
}
项目:ibm-cos-sdk-java    文件:AbstractTransfer.java   
protected void fireProgressEvent(final ProgressEventType eventType) {
    publishProgress(listenerChain, eventType);
}
项目:ibm-cos-sdk-java    文件:UploadMonitor.java   
/**
 * Cancels all the futures associated with this upload operation. Also
 * cleans up the parts on Amazon S3 if the upload is performed as a
 * multi-part upload operation.
 */
void performAbort() {
    cancelFutures();
    multipartUploadCallable.performAbortMultipartUpload();
    publishProgress(listener, ProgressEventType.TRANSFER_CANCELED_EVENT);
}
项目:ibm-cos-sdk-java    文件:ProgressEvent.java   
public ProgressEvent(long bytesTransferred) {
    super(ProgressEventType.BYTE_TRANSFER_EVENT, bytesTransferred);
}
项目:ibm-cos-sdk-java    文件:ProgressEvent.java   
public ProgressEvent(ProgressEventType eventType) {
    super(eventType);
}
项目:photon-model    文件:AWSCostStatsService.java   
private void downloadParseAndCreateStats(AWSCostStatsCreationContext statsData, String awsBucketName) throws IOException {
    try {
        // Creating a working directory for downloading and processing the bill
        final Path workingDirPath = Paths.get(System.getProperty(TEMP_DIR_LOCATION),
                UUID.randomUUID().toString());
        Files.createDirectories(workingDirPath);

        AWSCsvBillParser parser = new AWSCsvBillParser();
        final String csvBillZipFileName = parser
                .getCsvBillFileName(statsData.billMonthToDownload, statsData.accountId, true);
        Path csvBillZipFilePath = Paths.get(workingDirPath.toString(), csvBillZipFileName);
        ProgressListener listener = new ProgressListener() {
            @Override
            public void progressChanged(ProgressEvent progressEvent) {
                try {
                    ProgressEventType eventType = progressEvent.getEventType();
                    if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(eventType)) {
                        OperationContext.restoreOperationContext(statsData.opContext);
                        LocalDate billMonth = new LocalDate(
                                statsData.billMonthToDownload.getYear(),
                                statsData.billMonthToDownload.getMonthOfYear(), 1);

                        logWithContext(statsData, Level.INFO, () -> String.format("Processing" +
                                " bill for the month: %s.", billMonth));

                        parser.parseDetailedCsvBill(statsData.ignorableInvoiceCharge, csvBillZipFilePath,
                                statsData.awsAccountIdToComputeStates.keySet(),
                                getHourlyStatsConsumer(billMonth, statsData),
                                getMonthlyStatsConsumer(billMonth, statsData));
                        deleteTempFiles();
                        // Continue downloading and processing the bills for past and current months' bills
                        statsData.billMonthToDownload = statsData.billMonthToDownload.plusMonths(1);
                        handleCostStatsCreationRequest(statsData);
                    } else if (ProgressEventType.TRANSFER_FAILED_EVENT.equals(eventType)) {
                        deleteTempFiles();
                        billDownloadFailureHandler(statsData, awsBucketName, new IOException(
                                "Download of AWS CSV Bill '" + csvBillZipFileName + "' failed."));
                    }
                } catch (Exception exception) {
                    deleteTempFiles();
                    billDownloadFailureHandler(statsData, awsBucketName, exception);
                }
            }

            private void deleteTempFiles() {
                try {
                    Files.deleteIfExists(csvBillZipFilePath);
                    Files.deleteIfExists(workingDirPath);
                } catch (IOException e) {
                    // Ignore IO exception while cleaning files.
                }
            }
        };
        GetObjectRequest getObjectRequest = new GetObjectRequest(awsBucketName,
                csvBillZipFileName).withGeneralProgressListener(listener);
        statsData.s3Client.download(getObjectRequest, csvBillZipFilePath.toFile());
    } catch (AmazonS3Exception s3Exception) {
        billDownloadFailureHandler(statsData, awsBucketName, s3Exception);
    }
}
项目:spring-cloud-stream-app-starters    文件:AmazonS3SinkMockTests.java   
@Test
@Override
public void test() throws Exception {
    AmazonS3 amazonS3Client = TestUtils.getPropertyValue(this.s3MessageHandler, "transferManager.s3",
            AmazonS3.class);

    File file = this.temporaryFolder.newFile("foo.mp3");
    Message<?> message = MessageBuilder.withPayload(file)
            .build();

    this.channels.input().send(message);

    ArgumentCaptor<PutObjectRequest> putObjectRequestArgumentCaptor =
            ArgumentCaptor.forClass(PutObjectRequest.class);
    verify(amazonS3Client, atLeastOnce()).putObject(putObjectRequestArgumentCaptor.capture());

    PutObjectRequest putObjectRequest = putObjectRequestArgumentCaptor.getValue();
    assertThat(putObjectRequest.getBucketName(), equalTo(S3_BUCKET));
    assertThat(putObjectRequest.getKey(), equalTo("foo.mp3"));
    assertNotNull(putObjectRequest.getFile());
    assertNull(putObjectRequest.getInputStream());

    ObjectMetadata metadata = putObjectRequest.getMetadata();
    assertThat(metadata.getContentMD5(), equalTo(Md5Utils.md5AsBase64(file)));
    assertThat(metadata.getContentLength(), equalTo(0L));
    assertThat(metadata.getContentType(), equalTo("audio/mpeg"));

    ProgressListener listener = putObjectRequest.getGeneralProgressListener();
    S3ProgressPublisher.publishProgress(listener, ProgressEventType.TRANSFER_COMPLETED_EVENT);

    assertTrue(this.transferCompletedLatch.await(10, TimeUnit.SECONDS));
    assertTrue(this.aclLatch.await(10, TimeUnit.SECONDS));

    ArgumentCaptor<SetObjectAclRequest> setObjectAclRequestArgumentCaptor =
            ArgumentCaptor.forClass(SetObjectAclRequest.class);
    verify(amazonS3Client).setObjectAcl(setObjectAclRequestArgumentCaptor.capture());

    SetObjectAclRequest setObjectAclRequest = setObjectAclRequestArgumentCaptor.getValue();

    assertThat(setObjectAclRequest.getBucketName(), equalTo(S3_BUCKET));
    assertThat(setObjectAclRequest.getKey(), equalTo("foo.mp3"));
    assertNull(setObjectAclRequest.getAcl());
    assertThat(setObjectAclRequest.getCannedAcl(), equalTo(CannedAccessControlList.PublicReadWrite));
}
项目:esthree    文件:GetMultipart.java   
@Override
public Integer call() throws Exception {
    ObjectMetadata om = amazonS3Client.getObjectMetadata(bucket, key);
    contentLength = om.getContentLength();

    // this is the most up to date digest, it's initialized here but later holds the most up to date valid digest
    currentDigest = MessageDigest.getInstance("MD5");
    chunkSize = chunkSize == null ? DEFAULT_CHUNK_SIZE : chunkSize;
    fileParts = Parts.among(contentLength, chunkSize);
    for (Part fp : fileParts) {

        /*
         * We'll need to compute the digest on the full incoming stream for
         * each valid chunk that comes in. Invalid chunks will need to be
         * recomputed and fed through a copy of the MD5 that was valid up
         * until the latest chunk.
         */
        currentDigest = retryingGetWithRange(fp.start, fp.end);
    }

    // TODO fix total content length progress bar
    if(progressListener != null) {
        progressListener.progressChanged(new ProgressEvent(ProgressEventType.TRANSFER_STARTED_EVENT));
    }

    String fullETag = om.getETag();
    if (!fullETag.contains("-")) {
        byte[] expected = BinaryUtils.fromHex(fullETag);
        byte[] current = currentDigest.digest();
        if (!Arrays.equals(expected, current)) {
            throw new AmazonClientException("Unable to verify integrity of data download.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "The data may be corrupt.");
        }
    } else {
        // TODO log warning that we can't validate the MD5
        if(verbose) {
            System.err.println("\nMD5 does not exist on AWS for file, calculated value: " + BinaryUtils.toHex(currentDigest.digest()));
        }
    }
    // TODO add ability to resume from previously downloaded chunks
    // TODO add rate limiter

    return 0;
}
项目:cfnassist    文件:UploadProgressListener.java   
private void processReceivedEvent(ProgressEvent progressEvent) {
    ProgressEventType eventType = progressEvent.getEventType();

    long bytesTransferred = progressEvent.getBytesTransferred();
    switch(eventType) {
    case REQUEST_BYTE_TRANSFER_EVENT:
        done += bytesTransferred;
        chunk -= bytesTransferred;
        if (chunk<=0) {
            logger.info(String.format("Sent %s of %s bytes", done, total));
            chunk = total / CHUNK;
        }   
        break;
    case TRANSFER_COMPLETED_EVENT:
            logger.info("Transfer finished");
        break;
    case TRANSFER_FAILED_EVENT:
            logger.error("Transfer failed");
        break;
    case TRANSFER_STARTED_EVENT:
            done = 0;
            logger.info("Transfer started");
        break;
    case REQUEST_CONTENT_LENGTH_EVENT:
        total = progressEvent.getBytes();
        chunk = total / CHUNK;
        logger.info("Length is " + progressEvent.getBytes());
        break;
    case CLIENT_REQUEST_STARTED_EVENT:
    case HTTP_REQUEST_STARTED_EVENT:
    case HTTP_REQUEST_COMPLETED_EVENT:
    case HTTP_RESPONSE_STARTED_EVENT:
    case HTTP_RESPONSE_COMPLETED_EVENT:
    case CLIENT_REQUEST_SUCCESS_EVENT:
            // no-op
        break;
    default:
            logger.debug("Transfer event " + progressEvent.getEventType() + " transfered bytes was " + bytesTransferred);
        break;          
    }
}