/** * @see com.amazonaws.transform.Unmarshaller#unmarshall(java.lang.Object) */ public AmazonServiceException unmarshall(Node in) throws Exception { XPath xpath = xpath(); String errorCode = parseErrorCode(in, xpath); String errorType = asString("ErrorResponse/Error/Type", in, xpath); String requestId = asString("ErrorResponse/RequestId", in, xpath); String message = asString("ErrorResponse/Error/Message", in, xpath); AmazonServiceException ase = newException(message); ase.setErrorCode(errorCode); ase.setRequestId(requestId); if (errorType == null) { ase.setErrorType(ErrorType.Unknown); } else if (errorType.equalsIgnoreCase("Receiver")) { ase.setErrorType(ErrorType.Service); } else if (errorType.equalsIgnoreCase("Sender")) { ase.setErrorType(ErrorType.Client); } return ase; }
@Override public ObjectMetadata getObjectMetadata(GetObjectMetadataRequest getObjectMetadataRequest) throws SdkClientException, AmazonServiceException { getObjectMetadataRequest = beforeClientExecution(getObjectMetadataRequest); rejectNull(getObjectMetadataRequest, "The GetObjectMetadataRequest parameter must be specified when requesting an object's metadata"); String bucketName = getObjectMetadataRequest.getBucketName(); String key = getObjectMetadataRequest.getKey(); String versionId = getObjectMetadataRequest.getVersionId(); rejectNull(bucketName, "The bucket name parameter must be specified when requesting an object's metadata"); rejectNull(key, "The key parameter must be specified when requesting an object's metadata"); Request<GetObjectMetadataRequest> request = createRequest(bucketName, key, getObjectMetadataRequest, HttpMethodName.HEAD); if (versionId != null) request.addParameter("versionId", versionId); populateRequesterPaysHeader(request, getObjectMetadataRequest.isRequesterPays()); addPartNumberIfNotNull(request, getObjectMetadataRequest.getPartNumber()); populateSSE_C(request, getObjectMetadataRequest.getSSECustomerKey()); return invoke(request, new S3MetadataResponseHandler(), bucketName, key); }
@Override public void setBucketAcl(SetBucketAclRequest setBucketAclRequest) throws SdkClientException, AmazonServiceException { setBucketAclRequest = beforeClientExecution(setBucketAclRequest); String bucketName = setBucketAclRequest.getBucketName(); rejectNull(bucketName, "The bucket name parameter must be specified when setting a bucket's ACL"); AccessControlList acl = setBucketAclRequest.getAcl(); CannedAccessControlList cannedAcl = setBucketAclRequest.getCannedAcl(); if (acl == null && cannedAcl == null) { throw new IllegalArgumentException( "The ACL parameter must be specified when setting a bucket's ACL"); } if (acl != null && cannedAcl != null) { throw new IllegalArgumentException( "Only one of the acl and cannedAcl parameter can be specified, not both."); } if (acl != null) { setAcl(bucketName, null, null, acl, false, setBucketAclRequest); } else { setAcl(bucketName, null, null, cannedAcl, false, setBucketAclRequest); } }
/** * Verifies the request is actually retried for the expected times. */ private void testActualRetries(int expectedRetryAttempts) { testedClient = new AmazonHttpClient(clientConfiguration); injectMockHttpClient(testedClient, new ReturnServiceErrorHttpClient(500, "fake 500 service error")); // The ExecutionContext should collect the expected RequestCount ExecutionContext context = new ExecutionContext(true); try { testedClient.requestExecutionBuilder() .request(getSampleRequestWithRepeatableContent(originalRequest)) .errorResponseHandler(errorResponseHandler) .executionContext(context) .execute(); Assert.fail("AmazonServiceException is expected."); } catch (AmazonServiceException ase) {} RetryTestUtils.assertExpectedRetryCount(expectedRetryAttempts, context); }
private void handleErrorResponse(InputStream errorStream, int statusCode, String responseMessage) throws IOException { String errorCode = null; // Parse the error stream returned from the service. if(errorStream != null) { String errorResponse = IOUtils.toString(errorStream); try { JsonNode node = Jackson.jsonNodeOf(errorResponse); JsonNode code = node.get("code"); JsonNode message = node.get("message"); if (code != null && message != null) { errorCode = code.asText(); responseMessage = message.asText(); } } catch (Exception exception) { LOG.debug("Unable to parse error stream"); } } AmazonServiceException ase = new AmazonServiceException(responseMessage); ase.setStatusCode(statusCode); ase.setErrorCode(errorCode); throw ase; }
@Override public ObjectMetadata getObjectMetadata(String bucketName, String key) throws AmazonServiceException { AmazonS3Exception exception = new AmazonS3Exception("Not Found"); exception.setStatusCode(404); exception.setErrorType(ErrorType.Client); throw exception; }
@Test public void testAccessFailure() throws Exception { final AmazonServiceException f = new AmazonServiceException("message", null); f.setStatusCode(403); f.setErrorCode("AccessDenied"); assertTrue(new AmazonServiceExceptionMappingService().map(f) instanceof AccessDeniedException); f.setErrorCode("SignatureDoesNotMatch"); assertTrue(new AmazonServiceExceptionMappingService().map(f) instanceof LoginFailureException); }
@Override public void setObjectRedirectLocation(String bucketName, String key, String newRedirectLocation) throws SdkClientException, AmazonServiceException { throw new UnsupportedOperationException("Extend AbstractAmazonS3 to provide an implementation"); }
@Override public <T> Single<T> sendRequest(SqsAction<T> request) { return Single.defer(() -> delegate.sendRequest(request)) .retry((errCount, error) -> { if (errCount > retryCount || request.isBatchAction()) { return false; } if (error instanceof AmazonSQSException) { return ((AmazonSQSException) error).getErrorType() == AmazonServiceException.ErrorType.Service; } return true; }).subscribeWith(SingleSubject.create());//convert to Hot single }
private void executeRequest() throws Exception { AmazonHttpClient httpClient = new AmazonHttpClient(new ClientConfiguration()); try { httpClient.requestExecutionBuilder().request(newGetRequest(RESOURCE_PATH)).errorResponseHandler(stubErrorHandler()).execute(); fail("Expected exception"); } catch (AmazonServiceException expected) { } }
@Override public List<Bucket> listBuckets() throws AmazonClientException, AmazonServiceException { ArrayList<Bucket> list = new ArrayList<Bucket>(); Bucket bucket = new Bucket("camel-bucket"); bucket.setOwner(new Owner("Camel", "camel")); bucket.setCreationDate(new Date()); list.add(bucket); return list; }
@Test public void handle_UnmarshallerReturnsException_ClientErrorType() throws Exception { httpResponse.setStatusCode(400); expectUnmarshallerMatches(); when(unmarshaller.unmarshall((JsonNode) anyObject())) .thenReturn(new CustomException("error")); AmazonServiceException ase = responseHandler.handle(httpResponse); assertEquals(ERROR_CODE, ase.getErrorCode()); assertEquals(400, ase.getStatusCode()); assertEquals(SERVICE_NAME, ase.getServiceName()); assertEquals(ErrorType.Client, ase.getErrorType()); }
@Override public void deleteObject(DeleteObjectRequest deleteObjectRequest) throws SdkClientException, AmazonServiceException { deleteObjectRequest = beforeClientExecution(deleteObjectRequest); rejectNull(deleteObjectRequest, "The delete object request must be specified when deleting an object"); rejectNull(deleteObjectRequest.getBucketName(), "The bucket name must be specified when deleting an object"); rejectNull(deleteObjectRequest.getKey(), "The key must be specified when deleting an object"); Request<DeleteObjectRequest> request = createRequest(deleteObjectRequest.getBucketName(), deleteObjectRequest.getKey(), deleteObjectRequest, HttpMethodName.DELETE); invoke(request, voidResponseHandler, deleteObjectRequest.getBucketName(), deleteObjectRequest.getKey()); }
@Override public Bucket createBucket(CreateBucketRequest createBucketRequest) throws AmazonClientException, AmazonServiceException { if ("nonExistingBucket".equals(createBucketRequest.getBucketName())) { nonExistingBucketCreated = true; } Bucket bucket = new Bucket(); bucket.setName(createBucketRequest.getBucketName()); bucket.setCreationDate(new Date()); bucket.setOwner(new Owner("c2efc7302b9011ba9a78a92ac5fd1cd47b61790499ab5ddf5a37c31f0638a8fc ", "Christian Mueller")); return bucket; }
@Override public Parameters handleRequest(S3Event event, Context context) { context.getLogger() .log("Input Function [" + context.getFunctionName() + "], S3Event [" + event.toJson().toString() + "]"); Parameters parameters = new Parameters( event.getRecords().get(0).getS3().getBucket().getName(), event.getRecords().get(0).getS3().getObject().getKey()); AWSStepFunctions client = AWSStepFunctionsClientBuilder.defaultClient(); ObjectMapper jsonMapper = new ObjectMapper(); StartExecutionRequest request = new StartExecutionRequest(); request.setStateMachineArn(System.getenv("STEP_MACHINE_ARN")); try { request.setInput(jsonMapper.writeValueAsString(parameters)); } catch (JsonProcessingException e) { throw new AmazonServiceException("Error in ["+context.getFunctionName()+"]", e); } context.getLogger() .log("Step Function [" + request.getStateMachineArn() + "] will be called with [" + request.getInput() + "]"); StartExecutionResult result = client.startExecution(request); context.getLogger() .log("Output Function [" + context.getFunctionName() + "], Result [" + result.toString() + "]"); return parameters; }
private void printAmazonServiceException(AmazonServiceException ase) { LOG.info("Caught an AmazonServiceException, which means your request made it " + "to Amazon S3, but was rejected with an error response for some reason."); LOG.info("Error Message: " + ase.getMessage()); LOG.info("HTTP Status Code: " + ase.getStatusCode()); LOG.info("AWS Error Code: " + ase.getErrorCode()); LOG.info("Error Type: " + ase.getErrorType()); LOG.info("Request ID: " + ase.getRequestId()); LOG.info("Class Name: " + ase.getClass().getName()); }
@Override public void revokeSecurityGroupIngress() throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); }
@Override public DescribeSpotFleetRequestHistoryResult describeSpotFleetRequestHistory(DescribeSpotFleetRequestHistoryRequest describeSpotFleetRequestHistoryRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); }
@Override public void deleteNetworkAcl(DeleteNetworkAclRequest deleteNetworkAclRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); }
@Override public void abortMultipartUpload(AbortMultipartUploadRequest abortMultipartUploadRequest) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public void disableVgwRoutePropagation(DisableVgwRoutePropagationRequest disableVgwRoutePropagationRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); }
@Override public GetBucketAnalyticsConfigurationResult getBucketAnalyticsConfiguration(GetBucketAnalyticsConfigurationRequest getBucketAnalyticsConfigurationRequest) throws AmazonServiceException, AmazonServiceException { return call(() -> getDelegate().getBucketAnalyticsConfiguration(getBucketAnalyticsConfigurationRequest)); }
@Override public DescribeSpotPriceHistoryResult describeSpotPriceHistory() throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); }
@Override public S3Object getObject(GetObjectRequest getObjectRequest) throws AmazonClientException, AmazonServiceException { return getObject(getObjectRequest.getBucketName(), getObjectRequest.getKey()); }
@Override public CompleteMultipartUploadResult completeMultipartUpload(CompleteMultipartUploadRequest completeMultipartUploadRequest) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public ObjectListing listNextBatchOfObjects(ObjectListing previousObjectListing) throws SdkClientException, AmazonServiceException { return listNextBatchOfObjects(new ListNextBatchOfObjectsRequest(previousObjectListing)); }
@Override public void setObjectAcl(String bucketName, String key, CannedAccessControlList acl) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public VersionListing listVersions(String bucketName, String prefix) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public UploadPartResult uploadPart(UploadPartRequest uploadPartRequest) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public void deleteObject(DeleteObjectRequest deleteObjectRequest) throws SdkClientException, AmazonServiceException { run(() -> getDelegate().deleteObject(deleteObjectRequest)); }
@Override public void deleteBucketReplicationConfiguration(DeleteBucketReplicationConfigurationRequest request) throws AmazonServiceException, AmazonServiceException { run(() -> getDelegate().deleteBucketReplicationConfiguration(request)); }
@Override public void setObjectAcl(String bucketName, String key, String versionId, CannedAccessControlList acl) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public PutObjectResult putObject(String bucketName, String key, File file) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public InitiateMultipartUploadResult initiateMultipartUpload(InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public MultipartUploadListing listMultipartUploads(ListMultipartUploadsRequest listMultipartUploadsRequest) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public ListObjectsV2Result listObjectsV2(String bucketName) throws SdkClientException, AmazonServiceException { return listObjectsV2(new ListObjectsV2Request().withBucketName(bucketName)); }
@Override public ObjectMetadata getObjectMetadata(String bucketName, String key) throws AmazonClientException, AmazonServiceException { throw new UnsupportedOperationException(); }
@Override public void cancelExportTask(CancelExportTaskRequest cancelExportTaskRequest) throws AmazonServiceException, AmazonClientException { throw new UnsupportedOperationException("Not supported in mock"); }
@Test(expected = AmazonServiceException.class) public void testGetPrivateIp_NoInstance() { awsHelperService.getPrivateIp(TEST_INSTANCE_ID); }
@Override public InitiateMultipartUploadResult initiateMultipartUpload( InitiateMultipartUploadRequest initiateMultipartUploadRequest) throws SdkClientException, AmazonServiceException { initiateMultipartUploadRequest = beforeClientExecution(initiateMultipartUploadRequest); rejectNull(initiateMultipartUploadRequest, "The request parameter must be specified when initiating a multipart upload"); rejectNull(initiateMultipartUploadRequest.getBucketName(), "The bucket name parameter must be specified when initiating a multipart upload"); rejectNull(initiateMultipartUploadRequest.getKey(), "The key parameter must be specified when initiating a multipart upload"); Request<InitiateMultipartUploadRequest> request = createRequest(initiateMultipartUploadRequest.getBucketName(), initiateMultipartUploadRequest.getKey(), initiateMultipartUploadRequest, HttpMethodName.POST); request.addParameter("uploads", null); if (initiateMultipartUploadRequest.getStorageClass() != null) request.addHeader(Headers.STORAGE_CLASS, initiateMultipartUploadRequest.getStorageClass().toString()); if (initiateMultipartUploadRequest.getRedirectLocation() != null) { request.addHeader(Headers.REDIRECT_LOCATION, initiateMultipartUploadRequest.getRedirectLocation()); } if ( initiateMultipartUploadRequest.getAccessControlList() != null ) { addAclHeaders(request, initiateMultipartUploadRequest.getAccessControlList()); } else if ( initiateMultipartUploadRequest.getCannedACL() != null ) { request.addHeader(Headers.S3_CANNED_ACL, initiateMultipartUploadRequest.getCannedACL().toString()); } if (initiateMultipartUploadRequest.objectMetadata != null) { populateRequestMetadata(request, initiateMultipartUploadRequest.objectMetadata); } populateRequesterPaysHeader(request, initiateMultipartUploadRequest.isRequesterPays()); // Populate the SSE-C parameters to the request header populateSSE_C(request, initiateMultipartUploadRequest.getSSECustomerKey()); // Populate the SSE AWS KMS parameters to the request header populateSSE_KMS(request, initiateMultipartUploadRequest.getSSEAwsKeyManagementParams()); // Be careful that we don't send the object's total size as the content // length for the InitiateMultipartUpload request. setZeroContentLength(request); // Set the request content to be empty (but not null) to force the runtime to pass // any query params in the query string and not the request body, to keep S3 happy. request.setContent(new ByteArrayInputStream(new byte[0])); @SuppressWarnings("unchecked") ResponseHeaderHandlerChain<InitiateMultipartUploadResult> responseHandler = new ResponseHeaderHandlerChain<InitiateMultipartUploadResult>( // xml payload unmarshaller new Unmarshallers.InitiateMultipartUploadResultUnmarshaller(), // header handlers new ServerSideEncryptionHeaderHandler<InitiateMultipartUploadResult>(), new S3RequesterChargedHeaderHandler<InitiateMultipartUploadResult>(), new InitiateMultipartUploadHeaderHandler()); return invoke(request, responseHandler, initiateMultipartUploadRequest.getBucketName(), initiateMultipartUploadRequest.getKey()); }