Java 类com.amazonaws.services.s3.transfer.MultipleFileUpload 实例源码

项目:aws-doc-sdk-examples    文件:XferMgrUpload.java   
public static void uploadDir(String dir_path, String bucket_name,
        String key_prefix, boolean recursive, boolean pause)
{
    System.out.println("directory: " + dir_path + (recursive ?
                " (recursive)" : "") + (pause ? " (pause)" : ""));

    TransferManager xfer_mgr = new TransferManager();
    try {
        MultipleFileUpload xfer = xfer_mgr.uploadDirectory(bucket_name,
                key_prefix, new File(dir_path), recursive);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
}
项目:aws-doc-sdk-examples    文件:XferMgrProgress.java   
public static void uploadDirWithSubprogress(String dir_path,
        String bucket_name, String key_prefix, boolean recursive,
        boolean pause)
{
    System.out.println("directory: " + dir_path + (recursive ?
                " (recursive)" : "") + (pause ? " (pause)" : ""));

    TransferManager xfer_mgr = new TransferManager();
    try {
        MultipleFileUpload multi_upload = xfer_mgr.uploadDirectory(
                bucket_name, key_prefix, new File(dir_path), recursive);
        // loop with Transfer.isDone()
        XferMgrProgress.showMultiUploadProgress(multi_upload);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(multi_upload);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
}
项目:aws-doc-sdk-examples    文件:XferMgrUpload.java   
public static void uploadFileList(String[] file_paths, String bucket_name,
        String key_prefix, boolean pause)
{
    System.out.println("file list: " + Arrays.toString(file_paths) +
            (pause ? " (pause)" : ""));
    // convert the file paths to a list of File objects (required by the
    // uploadFileList method)
    ArrayList<File> files = new ArrayList<File>();
    for (String path : file_paths) {
        files.add(new File(path));
    }

    TransferManager xfer_mgr = new TransferManager();
    try {
        MultipleFileUpload xfer = xfer_mgr.uploadFileList(bucket_name,
                key_prefix, new File("."), files);
        // loop with Transfer.isDone()
        XferMgrProgress.showTransferProgress(xfer);
        // or block with Transfer.waitForCompletion()
        XferMgrProgress.waitForCompletion(xfer);
    } catch (AmazonServiceException e) {
        System.err.println(e.getErrorMessage());
        System.exit(1);
    }
    xfer_mgr.shutdownNow();
}
项目:snap2cloud    文件:S3Backup.java   
private Map<String, String> verifyMultiPartUpload(MultipleFileUpload uploadDirectory) throws AmazonClientException {
    Collection<? extends Upload> uploadResults = uploadDirectory.getSubTransfers();
    Iterator<? extends Upload> iterator = uploadResults.iterator();

    Map<String, String> fileModifyMap = new HashMap<String, String>();
    while (iterator.hasNext()) {
        UploadResult uploadResult = null;

        try {
            uploadResult = iterator.next().waitForUploadResult();
        } catch (Exception e) {
                LOGGER.error(e.getMessage());
                throw new AmazonClientException(e.getMessage());
        }

        if (uploadResult != null) {
            LOGGER.info(String.format("Multipart upload success for file " + uploadResult.getKey() + " to Amazon S3 bucket " + uploadResult.getBucketName()));
        }
    }

    return fileModifyMap;
}
项目:elasticsearch-lambda    文件:S3SnapshotTransport.java   
protected void transferDir(String shardDestinationBucket, String localShardPath, String shard) {
    MultipleFileUpload mfu = tx.uploadDirectory(shardDestinationBucket + shard, null, new File(localShardPath), true, objectMetadataProvider);

    /**
     * TODO: Hadoop has a configurable timeout for how long a reducer can be non-responsive (usually 600s). If 
     * this takes >600s hadoop will kill the task. We need to ping the reporter to let it know it's alive
     * in the case where the file transfer is taking a while.
     */
    while(!mfu.isDone()) {
        logger.info("Transfering to S3 completed %" + mfu.getProgress().getPercentTransferred());
        try {
            Thread.sleep(1000);
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
        }
    }
}
项目:dcos-cassandra-service    文件:S3StorageDriver.java   
private void uploadDirectory(TransferManager tx,
                             String bucketName,
                             String key,
                             String keyspaceName,
                             String cfName,
                             File snapshotDirectory) throws Exception {
    try {
        final String fileKey = key + "/" + keyspaceName + "/" + cfName + "/";
        final MultipleFileUpload myUpload = tx.uploadDirectory(bucketName, fileKey, snapshotDirectory, true);
        myUpload.waitForCompletion();
    } catch (Exception e) {
        LOGGER.error("Error occurred on uploading directory {} : {}", snapshotDirectory.getName(), e);
        throw new Exception(e);
    }
}
项目:cerberus-lifecycle-cli    文件:PublishDashboardOperation.java   
@Override
public void run(final PublishDashboardCommand command) {
    final URL artifactUrl = command.getArtifactUrl();
    final URL overrideArtifactUrl = command.getOverrideArtifactUrl();

    final BaseOutputs outputParameters = configStore.getBaseStackOutputs();
    final String dashboardBucketName = outputParameters.getDashboardBucketName();

    if (StringUtils.isBlank(dashboardBucketName)) {
        final String errorMessage = "The specified environment isn't configured properly!";
        logger.error(errorMessage);
        throw new IllegalStateException(errorMessage);
    }

    initClient(dashboardBucketName);

    final File extractedDirectory = extractArtifact(artifactUrl, overrideArtifactUrl);

    try {
        final MultipleFileUpload multipleFileUpload =
                transferManager.uploadDirectory(dashboardBucketName, "", extractedDirectory, true, new DashboardMetaDataProvider());
        logger.info("Uploading dashboard files.");
        multipleFileUpload.waitForCompletion();
        logger.info("Uploading complete.");
    } catch (InterruptedException e) {
        logger.error("Interrupted while waiting for upload to complete!", e);
    } finally {
        transferManager.shutdownNow(false);
    }
}
项目:herd    文件:MockS3OperationsImpl.java   
@Override
public MultipleFileUpload uploadDirectory(String bucketName, String virtualDirectoryKeyPrefix, File directory, boolean includeSubdirectories,
    ObjectMetadataProvider metadataProvider, TransferManager transferManager)
{
    LOGGER.debug(
        "uploadDirectory(): bucketName = " + bucketName + ", virtualDirectoryKeyPrefix = " + virtualDirectoryKeyPrefix + ", directory = " + directory +
            ", includeSubdirectories = " + includeSubdirectories);

    List<File> files = new ArrayList<>();
    listFiles(directory, files, includeSubdirectories);

    return uploadFileList(bucketName, virtualDirectoryKeyPrefix, directory, files, metadataProvider, transferManager);
}
项目:herd    文件:S3OperationsImpl.java   
@Override
public MultipleFileUpload uploadDirectory(String s3BucketName, String virtualDirectoryKeyPrefix, File directory, boolean includeSubdirectories,
    ObjectMetadataProvider metadataProvider, TransferManager transferManager)
{
    return transferManager.uploadDirectory(s3BucketName, virtualDirectoryKeyPrefix, directory, includeSubdirectories, metadataProvider);
}
项目:herd    文件:S3OperationsImpl.java   
@Override
public MultipleFileUpload uploadFileList(String s3BucketName, String virtualDirectoryKeyPrefix, File directory, List<File> files,
    ObjectMetadataProvider metadataProvider, TransferManager transferManager)
{
    return transferManager.uploadFileList(s3BucketName, virtualDirectoryKeyPrefix, directory, files, metadataProvider);
}
项目:herd    文件:MockS3OperationsImpl.java   
@Override
public MultipleFileUpload uploadFileList(String bucketName, String virtualDirectoryKeyPrefix, File directory, List<File> files,
    ObjectMetadataProvider metadataProvider, TransferManager transferManager)
{
    LOGGER.debug(
        "uploadFileList(): bucketName = " + bucketName + ", virtualDirectoryKeyPrefix = " + virtualDirectoryKeyPrefix + ", directory = " + directory +
            ", files = " + files);

    String directoryPath = directory.getAbsolutePath();

    long totalFileLength = 0;
    List<Upload> subTransfers = new ArrayList<>();
    for (File file : files)
    {
        // Get path to file relative to the specified directory
        String relativeFilePath = file.getAbsolutePath().substring(directoryPath.length());

        // Replace any backslashes (i.e. Windows separator) with a forward slash.
        relativeFilePath = relativeFilePath.replace("\\", "/");

        // Remove any leading slashes
        relativeFilePath = relativeFilePath.replaceAll("^/+", "");

        long fileLength = file.length();

        // Remove any trailing slashes
        virtualDirectoryKeyPrefix = virtualDirectoryKeyPrefix.replaceAll("/+$", "");

        String s3ObjectKey = virtualDirectoryKeyPrefix + "/" + relativeFilePath;
        totalFileLength += fileLength;

        PutObjectRequest putObjectRequest = new PutObjectRequest(bucketName, s3ObjectKey, file);

        ObjectMetadata objectMetadata = new ObjectMetadata();
        metadataProvider.provideObjectMetadata(null, objectMetadata);
        putObjectRequest.setMetadata(objectMetadata);

        putObject(putObjectRequest, transferManager.getAmazonS3Client());

        subTransfers.add(new UploadImpl(null, null, null, null));
    }

    TransferProgress progress = new TransferProgress();
    progress.setTotalBytesToTransfer(totalFileLength);
    progress.updateProgress(totalFileLength);

    MultipleFileUploadImpl multipleFileUpload = new MultipleFileUploadImpl(null, progress, null, virtualDirectoryKeyPrefix, bucketName, subTransfers);
    multipleFileUpload.setState(TransferState.Completed);
    return multipleFileUpload;
}
项目:DeployMan    文件:RemoteRepository.java   
public void uploadFolder(File folder, String key) throws AmazonServiceException,
    AmazonClientException, InterruptedException {
  TransferManager tm = new TransferManager(new Aws().getAwsCredentials());
  MultipleFileUpload upload = tm.uploadDirectory(getUserProperty(REPO_BUCKET), key, folder, true);
  waitForUpload(upload, tm);
}
项目:deeplearning4j    文件:S3Uploader.java   
public MultipleFileUpload uploadFolder(String bucketName, String keyPrefix, File folderPath,
                boolean includeSubDir) {
    TransferManager transfer = new TransferManager(getClient());
    return transfer.uploadDirectory(bucketName, keyPrefix, folderPath, includeSubDir);
}
项目:deeplearning4j    文件:S3Uploader.java   
public MultipleFileUpload uploadFileList(String bucketName, File folderPath, List<File> fileList,
                String keyPrefix) {
    TransferManager transfer = new TransferManager(getClient());
    return transfer.uploadFileList(bucketName, keyPrefix, folderPath, fileList);
}
项目:herd    文件:S3Operations.java   
/**
 * Uploads all files in the directory given to the bucket named, optionally recursing for all subdirectories.
 *
 * @param s3BucketName the S3 bucket name
 * @param virtualDirectoryKeyPrefix the key prefix of the virtual directory to upload to
 * @param directory the directory to upload
 * @param includeSubdirectories specified whether to include subdirectories in the upload. If true, files found in subdirectories will be included with an
 * appropriate concatenation to the key prefix
 * @param metadataProvider the callback of type <code>ObjectMetadataProvider</code> which is used to provide metadata for each file being uploaded
 * @param transferManager the transfer manager implementation to use
 *
 * @return the multiple file upload information
 */
public MultipleFileUpload uploadDirectory(String s3BucketName, String virtualDirectoryKeyPrefix, File directory, boolean includeSubdirectories,
    ObjectMetadataProvider metadataProvider, TransferManager transferManager);
项目:herd    文件:S3Operations.java   
/**
 * Uploads all specified files to the bucket named, constructing relative keys depending on the common parent directory given.
 *
 * @param s3BucketName the S3 bucket name
 * @param virtualDirectoryKeyPrefix the key prefix of the virtual directory to upload to
 * @param directory the common parent directory of files to upload. The keys of the files in the list of files are constructed relative to this directory
 * and the virtualDirectoryKeyPrefix
 * @param files the list of files to upload. The keys of the files are calculated relative to the common parent directory and the virtualDirectoryKeyPrefix
 * @param metadataProvider the callback of type <code>ObjectMetadataProvider</code> which is used to provide metadata for each file being uploaded
 * @param transferManager the transfer manager implementation to use
 *
 * @return the multiple file upload information
 */
public MultipleFileUpload uploadFileList(String s3BucketName, String virtualDirectoryKeyPrefix, File directory, List<File> files,
    ObjectMetadataProvider metadataProvider, TransferManager transferManager);