private Map<Integer, PartSummary> identifyExistingPartsForResume( String uploadId) { Map<Integer, PartSummary> partNumbers = new HashMap<Integer, PartSummary>(); if (uploadId == null) { return partNumbers; } int partNumber = 0; while (true) { PartListing parts = s3.listParts(new ListPartsRequest( origReq.getBucketName(), origReq.getKey(), uploadId) .withPartNumberMarker(partNumber) .withRequesterPays(origReq.isRequesterPays())); for (PartSummary partSummary : parts.getParts()) { partNumbers.put(partSummary.getPartNumber(), partSummary); } if (!parts.isTruncated()) { return partNumbers; } partNumber = parts.getNextPartNumberMarker(); } }
@Override public PartListing listParts(ListPartsRequest listPartsRequest) throws SdkClientException, AmazonServiceException { listPartsRequest = beforeClientExecution(listPartsRequest); rejectNull(listPartsRequest, "The request parameter must be specified when listing parts"); rejectNull(listPartsRequest.getBucketName(), "The bucket name parameter must be specified when listing parts"); rejectNull(listPartsRequest.getKey(), "The key parameter must be specified when listing parts"); rejectNull(listPartsRequest.getUploadId(), "The upload ID parameter must be specified when listing parts"); Request<ListPartsRequest> request = createRequest(listPartsRequest.getBucketName(), listPartsRequest.getKey(), listPartsRequest, HttpMethodName.GET); request.addParameter("uploadId", listPartsRequest.getUploadId()); if (listPartsRequest.getMaxParts() != null) request.addParameter("max-parts", listPartsRequest.getMaxParts().toString()); if (listPartsRequest.getPartNumberMarker() != null) request.addParameter("part-number-marker", listPartsRequest.getPartNumberMarker().toString()); if (listPartsRequest.getEncodingType() != null) request.addParameter("encoding-type", listPartsRequest.getEncodingType()); populateRequesterPaysHeader(request, listPartsRequest.isRequesterPays()); @SuppressWarnings("unchecked") ResponseHeaderHandlerChain<PartListing> responseHandler = new ResponseHeaderHandlerChain<PartListing>( // xml payload unmarshaller new Unmarshallers.ListPartsResultUnmarshaller(), // header handler new S3RequesterChargedHeaderHandler<PartListing>(), new ListPartsHeaderHandler()); return invoke(request, responseHandler, listPartsRequest.getBucketName(), listPartsRequest.getKey()); }
@Override public PartListing listParts(ListPartsRequest request) throws AmazonClientException, AmazonServiceException { return delegate.listParts(request); }
@Override public PartListing listParts(ListPartsRequest listPartsRequest) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public void handle(PartListing result, HttpResponse response) { result.setAbortDate(ServiceUtils.parseRfc822Date(response.getHeaders().get(Headers.ABORT_DATE))); result.setAbortRuleId(response.getHeaders().get(Headers.ABORT_RULE_ID)); }
@Override public PartListing listParts(ListPartsRequest request) throws SdkClientException, AmazonServiceException { return call(() -> getDelegate().listParts(request)); }
@Override public PartListing listParts(ListPartsRequest request) throws AmazonClientException { return null; }
@Test public void testMultipartUploadAbort() throws Exception { String blobName = "multipart-upload-abort"; ByteSource byteSource = TestUtils.randomByteSource().slice( 0, context.getBlobStore().getMinimumMultipartPartSize()); InitiateMultipartUploadResult result = client.initiateMultipartUpload( new InitiateMultipartUploadRequest(containerName, blobName)); // TODO: google-cloud-storage and openstack-swift cannot list multipart // uploads MultipartUploadListing multipartListing = client.listMultipartUploads( new ListMultipartUploadsRequest(containerName)); if (blobStoreType.equals("azureblob")) { // Azure does not create a manifest during initiate multi-part // upload. Instead the first part creates this. assertThat(multipartListing.getMultipartUploads()).isEmpty(); } else { assertThat(multipartListing.getMultipartUploads()).hasSize(1); } PartListing partListing = client.listParts(new ListPartsRequest( containerName, blobName, result.getUploadId())); assertThat(partListing.getParts()).isEmpty(); client.uploadPart(new UploadPartRequest() .withBucketName(containerName) .withKey(blobName) .withUploadId(result.getUploadId()) .withPartNumber(1) .withPartSize(byteSource.size()) .withInputStream(byteSource.openStream())); multipartListing = client.listMultipartUploads( new ListMultipartUploadsRequest(containerName)); assertThat(multipartListing.getMultipartUploads()).hasSize(1); partListing = client.listParts(new ListPartsRequest( containerName, blobName, result.getUploadId())); assertThat(partListing.getParts()).hasSize(1); client.abortMultipartUpload(new AbortMultipartUploadRequest( containerName, blobName, result.getUploadId())); multipartListing = client.listMultipartUploads( new ListMultipartUploadsRequest(containerName)); if (blobStoreType.equals("azureblob")) { // Azure does not support explicit abort. It automatically // removes incomplete multi-part uploads after 7 days. assertThat(multipartListing.getMultipartUploads()).hasSize(1); } else { assertThat(multipartListing.getMultipartUploads()).isEmpty(); } ObjectListing listing = client.listObjects(containerName); assertThat(listing.getObjectSummaries()).isEmpty(); }
@Override public PartListing listParts(ListPartsRequest request) throws AmazonClientException, AmazonServiceException { // TODO Auto-generated method stub return null; }