Java 类com.amazonaws.event.ProgressEvent 实例源码

项目:circus-train    文件:RetriableFileCopyCommand.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
  StringBuilder message = new StringBuilder();
  switch (progressEvent.getEventType()) {
  case TRANSFER_STARTED_EVENT:
    message.append("Starting: ").append(description);
    break;
  case TRANSFER_COMPLETED_EVENT:
    message.append("Completed: ").append(description);
    break;
  case TRANSFER_FAILED_EVENT:
    message.append("Falied: ").append(description);
    break;
  default:
    break;
  }

  context.setStatus(message.toString());
}
项目:hadoop    文件:S3AOutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  if (progress != null) {
    progress.progress();
  }

  // There are 3 http ops here, but this should be close enough for now
  if (progressEvent.getEventCode() == ProgressEvent.PART_STARTED_EVENT_CODE ||
      progressEvent.getEventCode() == ProgressEvent.COMPLETED_EVENT_CODE) {
    statistics.incrementWriteOps(1);
  }

  long transferred = upload.getProgress().getBytesTransferred();
  long delta = transferred - lastBytesTransferred;
  if (statistics != null && delta != 0) {
    statistics.incrementBytesWritten(delta);
  }

  lastBytesTransferred = transferred;
}
项目:aliyun-oss-hadoop-fs    文件:S3AOutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  if (progress != null) {
    progress.progress();
  }

  // There are 3 http ops here, but this should be close enough for now
  ProgressEventType pet = progressEvent.getEventType();
  if (pet == TRANSFER_PART_STARTED_EVENT ||
      pet == TRANSFER_COMPLETED_EVENT) {
    statistics.incrementWriteOps(1);
  }

  long transferred = upload.getProgress().getBytesTransferred();
  long delta = transferred - lastBytesTransferred;
  if (statistics != null && delta != 0) {
    statistics.incrementBytesWritten(delta);
  }

  lastBytesTransferred = transferred;
}
项目:big-c    文件:S3AOutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  if (progress != null) {
    progress.progress();
  }

  // There are 3 http ops here, but this should be close enough for now
  if (progressEvent.getEventCode() == ProgressEvent.PART_STARTED_EVENT_CODE ||
      progressEvent.getEventCode() == ProgressEvent.COMPLETED_EVENT_CODE) {
    statistics.incrementWriteOps(1);
  }

  long transferred = upload.getProgress().getBytesTransferred();
  long delta = transferred - lastBytesTransferred;
  if (statistics != null && delta != 0) {
    statistics.incrementBytesWritten(delta);
  }

  lastBytesTransferred = transferred;
}
项目:ServerlessJavaMaven    文件:ServerlessDeployMojo.java   
@Override
public void progressChanged(ProgressEvent event)
{
    if ( event.getEventType() == ProgressEventType.REQUEST_CONTENT_LENGTH_EVENT )
    {
        contentLength = event.getBytes();
        getLog().info("Content size: " + contentLength + " bytes");
    }
    else if ( event.getEventType() == ProgressEventType.REQUEST_BYTE_TRANSFER_EVENT )
    {
        contentSent += event.getBytesTransferred();
        double div = (double) (((double)contentSent/(double)contentLength));
        double mul = div*(double)100.0;
        int mod = (int)mul / 10;
        if ( mod > lastTenPct )
        {
            lastTenPct = mod;
            getLog().info("Uploaded " + (mod*10) + "% of " + (contentLength/(1024*1024)) + " MB");
        }
    }
}
项目:spring-cloud-stream-app-starters    文件:AmazonS3SinkMockTests.java   
@Bean
public S3ProgressListener s3ProgressListener() {
    return new S3ProgressListener() {

        @Override
        public void onPersistableTransfer(PersistableTransfer persistableTransfer) {

        }

        @Override
        public void progressChanged(ProgressEvent progressEvent) {
            if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(progressEvent.getEventType())) {
                transferCompletedLatch().countDown();
            }
        }

    };
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AOutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  if (progress != null) {
    progress.progress();
  }

  // There are 3 http ops here, but this should be close enough for now
  if (progressEvent.getEventCode() == ProgressEvent.PART_STARTED_EVENT_CODE ||
      progressEvent.getEventCode() == ProgressEvent.COMPLETED_EVENT_CODE) {
    statistics.incrementWriteOps(1);
  }

  long transferred = upload.getProgress().getBytesTransferred();
  long delta = transferred - lastBytesTransferred;
  if (statistics != null && delta != 0) {
    statistics.incrementBytesWritten(delta);
  }

  lastBytesTransferred = transferred;
}
项目:presto    文件:PrestoS3FileSystem.java   
private ProgressListener createProgressListener(Transfer transfer)
{
    return new ProgressListener()
    {
        private ProgressEventType previousType;
        private double previousTransferred;

        @Override
        public synchronized void progressChanged(ProgressEvent progressEvent)
        {
            ProgressEventType eventType = progressEvent.getEventType();
            if (previousType != eventType) {
                log.debug("Upload progress event (%s/%s): %s", host, key, eventType);
                previousType = eventType;
            }

            double transferred = transfer.getProgress().getPercentTransferred();
            if (transferred >= (previousTransferred + 10.0)) {
                log.debug("Upload percentage (%s/%s): %.0f%%", host, key, transferred);
                previousTransferred = transferred;
            }
        }
    };
}
项目:esthree    文件:PrintingProgressListener.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    ProgressEventType type = progressEvent.getEventType();
    if (type.equals(TRANSFER_COMPLETED_EVENT) || type.equals(TRANSFER_STARTED_EVENT)) {
        out.println();
    }

    if (type.isByteCountEvent()) {
        long timeLeft = getTimeLeft();
        if (lastTimeLeft < 1 && timeLeft > 0) {
            // prime this value with a sane starting point
            lastTimeLeft = timeLeft;
        }

        // use an exponential moving average to smooth the estimate
        lastTimeLeft += 0.90 * (timeLeft - lastTimeLeft);

        out.print(String.format("\r%1$s  %2$s / %3$s  %4$s      ",
                generate(saturatedCast(round(completed + (progress.getPercentTransferred() * multiplier)))),
                humanReadableByteCount(progress.getBytesTransferred(), true),
                humanReadableByteCount(progress.getTotalBytesToTransfer(), true), fromSeconds(lastTimeLeft)));
        out.flush();
    }
}
项目:esthree    文件:Get.java   
public MessageDigest copyAndHash(InputStream input, long totalBytes, Progress progress)
        throws IOException, CloneNotSupportedException {

    // clone the current digest, such that it remains unchanged in this method
    MessageDigest computedDigest = (MessageDigest) currentDigest.clone();
    byte[] buffer = new byte[DEFAULT_BUF_SIZE];

    long count = 0;
    int n;
    while (-1 != (n = input.read(buffer))) {
        output.write(buffer, 0, n);
        if (progressListener != null) {
            progress.updateProgress(n);
            progressListener.progressChanged(new ProgressEvent(ProgressEventType.RESPONSE_BYTE_TRANSFER_EVENT, n));
        }
        computedDigest.update(buffer, 0, n);
        count += n;
    }

    // verify that at least this many bytes were read
    if (totalBytes != count) {
        throw new IOException(String.format("%d bytes downloaded instead of expected %d bytes", count, totalBytes));
    }
    return computedDigest;
}
项目:esthree    文件:GetMultipart.java   
public MessageDigest copyAndHash(InputStream input, long totalBytes, Progress progress)
        throws IOException, CloneNotSupportedException {

    // clone the current digest, such that it remains unchanged in this method
    MessageDigest computedDigest = (MessageDigest) currentDigest.clone();
    byte[] buffer = new byte[DEFAULT_BUF_SIZE];

    long count = 0;
    int n;
    while (-1 != (n = input.read(buffer))) {
        output.write(buffer, 0, n);
        if (progressListener != null) {
            progress.updateProgress(n);
            progressListener.progressChanged(new ProgressEvent(ProgressEventType.REQUEST_BYTE_TRANSFER_EVENT, n));
        }
        computedDigest.update(buffer, 0, n);
        count += n;
    }

    // verify that at least this many bytes were read
    if (totalBytes != count) {
        throw new IOException(String.format("%d bytes downloaded instead of expected %d bytes", count, totalBytes));
    }
    return computedDigest;
}
项目:hadoop    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:simple-glacier-client    文件:ArchiveUploadHighLevel.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {

    if (progressEvent.getEventType() == ProgressEventType.REQUEST_CONTENT_LENGTH_EVENT) {
        partSize = progressEvent.getBytes();
        ArchiveUploadHighLevel.this.log.info("Part size: " + partSize);
    }

    if (progressEvent.getEventType() == ProgressEventType.CLIENT_REQUEST_SUCCESS_EVENT) {
        counter += partSize;
        int percentage = (int)(counter * 100.0 / total);
        ArchiveUploadHighLevel.this.log.info("Successfully transferred: " + counter + " / " + total + " (" + percentage + "%)");
    }
}
项目:ibm-cos-sdk-java    文件:AmazonWebServiceRequestTest.java   
@Test
public void copyBaseTo() {
    final ProgressListener listener = new SyncProgressListener() {
        @Override
        public void progressChanged(ProgressEvent progressEvent) {
        }
    };
    final AWSCredentials credentials = new BasicAWSCredentials("accesskey",
            "accessid");
    final RequestMetricCollector collector = new RequestMetricCollector() {
        @Override
        public void collectMetrics(Request<?> request, Response<?> response) {
        }
    };

    final AmazonWebServiceRequest from = new AmazonWebServiceRequest() {
    };
    from.setGeneralProgressListener(listener);
    from.setRequestCredentials(credentials);
    from.setRequestMetricCollector(collector);
    from.putCustomRequestHeader("k1", "v1");
    from.putCustomRequestHeader("k2", "v2");
    from.putCustomQueryParameter("k1", "v1");
    from.putCustomQueryParameter("k2", "v2a");
    from.putCustomQueryParameter("k2", "v2b");
    from.getRequestClientOptions().setReadLimit(1234);

    final AmazonWebServiceRequest to = new AmazonWebServiceRequest() {
    };

    // Before copy
    RequestClientOptions toOptions;
    verifyBaseBeforeCopy(to);

    // After copy
    from.copyBaseTo(to);
    verifyBaseAfterCopy(listener, credentials, collector, from, to);
}
项目:ibm-cos-sdk-java    文件:ProgressListenerWithEventCodeVerification.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    ProgressEventType type = progressEvent.getEventType();
    if (type.isByteCountEvent())
        return;
    if (type != types[count]) {
        throw new AssertionError("Expect event type "
                + types[count] + " but got "
                + progressEvent.getEventType());
    }
    count++;
}
项目:ibm-cos-sdk-java    文件:TransferCompletionFilter.java   
@Override
public ProgressEvent filter(ProgressEvent progressEvent) {
    // Block COMPLETE events from the low-level GetObject operation,
    // but we still want to keep the BytesTransferred
    return progressEvent.getEventType() == ProgressEventType.TRANSFER_COMPLETED_EVENT
         ? null // discard this event
         : progressEvent
         ;
}
项目:aliyun-oss-hadoop-fs    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setSSEAlgorithm(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventType()) {
        case TRANSFER_PART_COMPLETED_EVENT:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:big-c    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:emodb    文件:S3ScanWriter.java   
/**
 * Starts an asynchronous upload and returns a ListenableFuture for handling the result.
 */
synchronized ListenableFuture<String> upload() {
    // Reset values from possible prior attempt
    _attempts += 1;
    _bytesTransferred = 0;

    // Separate the future returned to the caller from the future generated by submitting the
    // putObject request.  If the writer is closed then uploadFuture may be canceled before it executes,
    // in which case it may not trigger any callbacks.  To ensure there is always a callback resultFuture is
    // tracked independently and, in the event that the upload is aborted, gets set on abort().
    _resultFuture = SettableFuture.create();

    _uploadFuture = _uploadService.submit(new Runnable() {
        @Override
        public void run() {
            try {
                ProgressListener progressListener = new ProgressListener() {
                    @Override
                    public void progressChanged(ProgressEvent progressEvent) {
                        // getBytesTransferred() returns zero for all events not pertaining to the file transfer
                        _bytesTransferred += progressEvent.getBytesTransferred();
                    }
                };

                PutObjectRequest putObjectRequest = new PutObjectRequest(_bucket, _key, _file);
                putObjectRequest.setGeneralProgressListener(progressListener);
                PutObjectResult result = _amazonS3.putObject(putObjectRequest);
                _resultFuture.set(result.getETag());
            } catch (Throwable t) {
                _resultFuture.setException(t);
            }
        }
    });

    return _resultFuture;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:S3AFileSystem.java   
private void copyFile(String srcKey, String dstKey) throws IOException {
  if (LOG.isDebugEnabled()) {
    LOG.debug("copyFile " + srcKey + " -> " + dstKey);
  }

  ObjectMetadata srcom = s3.getObjectMetadata(bucket, srcKey);
  final ObjectMetadata dstom = srcom.clone();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    dstom.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  CopyObjectRequest copyObjectRequest = new CopyObjectRequest(bucket, srcKey, bucket, dstKey);
  copyObjectRequest.setCannedAccessControlList(cannedACL);
  copyObjectRequest.setNewObjectMetadata(dstom);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
      }
    }
  };

  Copy copy = transfers.copy(copyObjectRequest);
  copy.addProgressListener(progressListener);
  try {
    copy.waitForCopyResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }
}
项目:snake-game-aws    文件:S3UploadTask.java   
@Override
public void progressChanged(ProgressEvent pe) {
    total += pe.getBytesTransferred();
    publishProgress(total);
    Log.i("bytestranferred:", total + "bytes");

}
项目:snake-game-aws    文件:S3DownloadTask.java   
@Override
public void progressChanged(ProgressEvent pe) {
    total += pe.getBytesTransferred();
    publishProgress(total);
    Log.i("bytestranferred:", total + "bytes");

}
项目:esthree    文件:Get.java   
@Override
public Integer call() throws Exception {

    // this is the most up to date digest, it's initialized here but later holds the most up to date valid digest
    currentDigest = MessageDigest.getInstance("MD5");
    currentDigest = retryingGet();

    if(progressListener != null) {
        progressListener.progressChanged(new ProgressEvent(ProgressEventType.TRANSFER_STARTED_EVENT));
    }

    if (!fullETag.contains("-")) {
        byte[] expected = BinaryUtils.fromHex(fullETag);
        byte[] current = currentDigest.digest();
        if (!Arrays.equals(expected, current)) {
            throw new AmazonClientException("Unable to verify integrity of data download.  "
                    + "Client calculated content hash didn't match hash calculated by Amazon S3.  "
                    + "The data may be corrupt.");
        }
    } else {
        // TODO log warning that we can't validate the MD5
        // TODO implement this algorithm for several common chunk sizes http://stackoverflow.com/questions/6591047/etag-definition-changed-in-amazon-s3
        if(verbose) {
            System.err.println("\nMD5 does not exist on AWS for file, calculated value: " + BinaryUtils.toHex(currentDigest.digest()));
        }
    }
    // TODO add ability to resume from previously downloaded chunks
    // TODO add rate limiter

    return 0;

}
项目:DeployMan    文件:SharpProgressListener.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
  this.total += progressEvent.getBytesTransferred();

  if (this.total >= this.progress) {
    System.out.print("#"); //$NON-NLS-1$
    this.progress += this.fraction;
  }
}
项目:kickflip-android-sdk    文件:S3BroadcastManager.java   
public void queueUpload(final String bucket, final String key, final File file, boolean lastUpload) {
    if (VERBOSE) Log.i(TAG, "Queueing upload " + key);

    final PutObjectRequest por = new PutObjectRequest(bucket, key, file);
    por.setGeneralProgressListener(new ProgressListener() {
        final String url = "https://" + bucket + ".s3.amazonaws.com/" + key;
        private long uploadStartTime;

        @Override
        public void progressChanged(com.amazonaws.event.ProgressEvent progressEvent) {
            try {
                if (progressEvent.getEventCode() == ProgressEvent.STARTED_EVENT_CODE) {
                    uploadStartTime = System.currentTimeMillis();
                } else if (progressEvent.getEventCode() == com.amazonaws.event.ProgressEvent.COMPLETED_EVENT_CODE) {
                    long uploadDurationMillis = System.currentTimeMillis() - uploadStartTime;
                    int bytesPerSecond = (int) (file.length() / (uploadDurationMillis / 1000.0));
                    if (VERBOSE)
                        Log.i(TAG, "Uploaded " + file.length() / 1000.0 + " KB in " + (uploadDurationMillis) + "ms (" + bytesPerSecond / 1000.0 + " KBps)");
                    mBroadcaster.onS3UploadComplete(new S3UploadEvent(file, url, bytesPerSecond));
                } else if (progressEvent.getEventCode() == ProgressEvent.FAILED_EVENT_CODE) {
                    Log.w(TAG, "Upload failed for " + url);
                }
            } catch (Exception excp) {
                Log.e(TAG, "ProgressListener error");
                excp.printStackTrace();
            }
        }
    });
    por.setCannedAcl(CannedAccessControlList.PublicRead);
    for (WeakReference<S3RequestInterceptor> ref : mInterceptors) {
        S3RequestInterceptor interceptor = ref.get();
        if (interceptor != null) {
            interceptor.interceptRequest(por);
        }
    }
    mQueue.add(new Pair<>(por, lastUpload));
}
项目:aws-mobile-self-paced-labs-samples    文件:S3UploadTask.java   
@Override
public void progressChanged(ProgressEvent pe) {
    total += pe.getBytesTransferred();
    publishProgress(total);
    Log.i("bytestranferred:", total + "bytes");

}
项目:aws-mobile-self-paced-labs-samples    文件:S3DownloadTask.java   
@Override
public void progressChanged(ProgressEvent pe) {
    total += pe.getBytesTransferred();
    publishProgress(total);
    Log.i("bytestranferred:", total + "bytes");

}
项目:cfnassist    文件:UploadProgressListener.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    try {
        processReceivedEvent(progressEvent);
    }
    catch(Exception exception) {
        logger.error("Error handling S3 progress event ", exception);
    }
}
项目:cfnassist    文件:CloudClient.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    if (progressEvent.getEventType() == ProgressEventType.CLIENT_REQUEST_FAILED_EVENT) {
        logger.warn(progressEvent.toString());
    }
    logger.info(progressEvent.toString());
}
项目:hadoop    文件:S3AFastOutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  if (progress != null) {
    progress.progress();
  }
}
项目:hadoop    文件:S3AFileSystem.java   
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
项目:ibm-cos-sdk-java    文件:TransferProgressUpdatingListener.java   
public void progressChanged(ProgressEvent progressEvent) {
    long bytes = progressEvent.getBytesTransferred();
    if (bytes == 0)
        return; // only interested in non-zero bytes
    transferProgress.updateProgress(bytes);
}
项目:ibm-cos-sdk-java    文件:MultipleFileTransferProgressUpdatingListener.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    super.progressChanged(progressEvent);
    progressListenerChain.progressChanged(progressEvent);
}
项目:nifi-minifi    文件:S3OutputStream.java   
@Override
public void progressChanged(ProgressEvent progressEvent) {
    log.debug("Progress event: " + progressEvent);
  }
项目:aliyun-oss-hadoop-fs    文件:S3AFastOutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  if (progress != null) {
    progress.progress();
  }
}
项目:aliyun-oss-hadoop-fs    文件:S3AFileSystem.java   
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventType()) {
        case TRANSFER_PART_COMPLETED_EVENT:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
项目:big-c    文件:S3AFastOutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  if (progress != null) {
    progress.progress();
  }
}
项目:big-c    文件:S3AFileSystem.java   
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
项目:photon-model    文件:AWSCostStatsService.java   
private void downloadParseAndCreateStats(AWSCostStatsCreationContext statsData, String awsBucketName) throws IOException {
    try {
        // Creating a working directory for downloading and processing the bill
        final Path workingDirPath = Paths.get(System.getProperty(TEMP_DIR_LOCATION),
                UUID.randomUUID().toString());
        Files.createDirectories(workingDirPath);

        AWSCsvBillParser parser = new AWSCsvBillParser();
        final String csvBillZipFileName = parser
                .getCsvBillFileName(statsData.billMonthToDownload, statsData.accountId, true);
        Path csvBillZipFilePath = Paths.get(workingDirPath.toString(), csvBillZipFileName);
        ProgressListener listener = new ProgressListener() {
            @Override
            public void progressChanged(ProgressEvent progressEvent) {
                try {
                    ProgressEventType eventType = progressEvent.getEventType();
                    if (ProgressEventType.TRANSFER_COMPLETED_EVENT.equals(eventType)) {
                        OperationContext.restoreOperationContext(statsData.opContext);
                        LocalDate billMonth = new LocalDate(
                                statsData.billMonthToDownload.getYear(),
                                statsData.billMonthToDownload.getMonthOfYear(), 1);

                        logWithContext(statsData, Level.INFO, () -> String.format("Processing" +
                                " bill for the month: %s.", billMonth));

                        parser.parseDetailedCsvBill(statsData.ignorableInvoiceCharge, csvBillZipFilePath,
                                statsData.awsAccountIdToComputeStates.keySet(),
                                getHourlyStatsConsumer(billMonth, statsData),
                                getMonthlyStatsConsumer(billMonth, statsData));
                        deleteTempFiles();
                        // Continue downloading and processing the bills for past and current months' bills
                        statsData.billMonthToDownload = statsData.billMonthToDownload.plusMonths(1);
                        handleCostStatsCreationRequest(statsData);
                    } else if (ProgressEventType.TRANSFER_FAILED_EVENT.equals(eventType)) {
                        deleteTempFiles();
                        billDownloadFailureHandler(statsData, awsBucketName, new IOException(
                                "Download of AWS CSV Bill '" + csvBillZipFileName + "' failed."));
                    }
                } catch (Exception exception) {
                    deleteTempFiles();
                    billDownloadFailureHandler(statsData, awsBucketName, exception);
                }
            }

            private void deleteTempFiles() {
                try {
                    Files.deleteIfExists(csvBillZipFilePath);
                    Files.deleteIfExists(workingDirPath);
                } catch (IOException e) {
                    // Ignore IO exception while cleaning files.
                }
            }
        };
        GetObjectRequest getObjectRequest = new GetObjectRequest(awsBucketName,
                csvBillZipFileName).withGeneralProgressListener(listener);
        statsData.s3Client.download(getObjectRequest, csvBillZipFilePath.toFile());
    } catch (AmazonS3Exception s3Exception) {
        billDownloadFailureHandler(statsData, awsBucketName, s3Exception);
    }
}
项目:kafka-connect-storage-cloud    文件:S3OutputStream.java   
public void progressChanged(ProgressEvent progressEvent) {
  log.debug("Progress event: " + progressEvent);
}