我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用botocore.exceptions.ClientError()。
def get_credential_report(iam_client): resp1 = iam_client.generate_credential_report() if resp1['State'] == 'COMPLETE' : try: response = iam_client.get_credential_report() credential_report_csv = response['Content'] # print(credential_report_csv) reader = csv.DictReader(credential_report_csv.splitlines()) # print(reader.fieldnames) credential_report = [] for row in reader: credential_report.append(row) return(credential_report) except ClientError as e: print("Unknown error getting Report: " + e.message) else: sleep(2) return get_credential_report(iam_client) # Query the account's password policy for the password age. Return that number of days
def remote_get_lambda(self, **kwargs): response = False try: response = self.awslambda.get_function( FunctionName=kwargs["FunctionName"], ) tags = response.get("Tags", False) if tags: response["Configuration"]["Tags"] = tags except ClientError as e: if e.response['Error']['Code'] == "ResourceNotFoundException": return False else: # TODO: handle other errors there pass return response
def remote_update_alias(self, **kwargs): conf = kwargs try: logger.info("Update alias {} for function {}" " with version {}".format(conf["Name"], conf["FunctionName"], conf["FunctionVersion"])) response = self.awslambda.update_alias(**conf) except ClientError as e: if e.response['Error']['Code'] == "ResourceNotFoundException": logger.info("Alias {} not exist for function {}. " "Creating new one with version {}".format(conf["Name"], conf["FunctionName"], conf["FunctionVersion"])) response = self.awslambda.create_alias(**conf) else: # TODO: handle other errors there pass return response
def _create_lambda(arn, func_name, func_desc, lambda_handler, lambda_main, runtime): func = dict() lamb = boto3.client('lambda') with open(temp_deploy_zip) as deploy: func['ZipFile'] = deploy.read() try: resp = lamb.create_function( FunctionName=func_name, Runtime=runtime, Publish=True, Description=func_desc, Role=arn, Code=func, Handler='{0}.{1}'.format( lambda_main, lambda_handler )) logging.info("Create Lambda Function resp:{0}".format( json.dumps(resp, indent=4, sort_keys=True)) ) return resp except ClientError as ce: if ce.response['Error']['Code'] == 'ValidationException': logging.warning("Validation Error {0} creating function '{1}'.".format( ce, func_name)) else: logging.error("Unexpected Error: {0}".format(ce))
def _create_function_alias(func_alias, func_name, func_version): lamb = boto3.client('lambda') try: resp = lamb.create_alias( Name=func_alias, FunctionName=func_name, FunctionVersion=func_version ) logging.info("Create Lambda Alias resp:{0}".format( json.dumps(resp, indent=4, sort_keys=True)) ) return resp except ClientError as ce: if ce.response['Error']['Code'] == 'ValidationException': logging.warning("Validation Error {0} creating alias '{1}'.".format( ce, func_alias)) else: logging.error("Unexpected Error: {0}".format(ce))
def _update_lambda_function(zip_file, func_name): lamb = boto3.client('lambda') try: resp = lamb.update_function_code( FunctionName=func_name, ZipFile=zip_file.read(), Publish=True ) return resp['Version'] except ClientError as ce: if ce.response['Error']['Code'] == 'ValidationException': logging.warning( "Validation Error {0} updating function '{1}'.".format( ce, func_name)) else: logging.error("Unexpected Error: {0}".format(ce))
def _update_lambda_alias(func_alias, func_name, func_version): lamb = boto3.client('lambda') try: resp = lamb.update_alias( Name=func_alias, FunctionName=func_name, FunctionVersion=func_version ) return resp['AliasArn'] except ClientError as ce: if ce.response['Error']['Code'] == 'ValidationException': logging.warning( "Validation Error {0} updating alias '{1}'.".format( ce, func_name)) else: logging.error("Unexpected Error: {0}".format(ce))
def get_role_arn(role_params): if role_params['account_id']: # Try to map the name to an account ID. If it isn't found, assume an ID was passed # in and use it as-is. role_params['account_id'] = app.config['AWS_ACCOUNT_MAP'].get( role_params['account_id'], role_params['account_id'] ) else: if app.config['DEFAULT_ACCOUNT_ID']: role_params['account_id'] = app.config['DEFAULT_ACCOUNT_ID'] # No default account id defined. Get the ARN by looking up the role # name. This is a backwards compat use-case for when we didn't require # the default account id. else: iam = iam_client() try: with PrintingBlockTimer('iam.get_role'): role = iam.get_role(RoleName=role_params['name']) return role['Role']['Arn'] except ClientError as e: response = e.response['ResponseMetadata'] raise GetRoleError((response['HTTPStatusCode'], e.message)) # Return a generated ARN return 'arn:aws:iam::{account_id}:role/{name}'.format(**role_params)
def test__init__(): mock_client = Mock() with patch.object(boto3, 'client', return_value=mock_client): error_response = {'Error': {'Code': 'ResourceInUseException'}} mock_client.create_stream = Mock(side_effect=ClientError(error_response, 'create_stream')) StreamWriter('blah') assert mock_client.create_stream.called assert call.get_waiter('stream_exists') in mock_client.method_calls, "We handled stream existence" error_response = {'Error': {'Code': 'Something else'}} mock_client.create_stream = Mock(side_effect=ClientError(error_response, 'create_stream')) mock_client.reset_mock() with pytest.raises(ClientError): StreamWriter('blah') assert mock_client.create_stream.called assert call.get_waiter('stream_exists') not in mock_client.method_calls, "never reached"
def __init__(self, stream_name, back_off_limit=60, send_window=13): self.stream_name = stream_name self.back_off_limit = back_off_limit self.last_send = 0 self._kinesis = boto3.client('kinesis') self._sequence_number_for_ordering = '0' self._record_agg = aws_kinesis_agg.aggregator.RecordAggregator() self._send_window = send_window try: self._kinesis.create_stream(StreamName=stream_name, ShardCount=1) except ClientError as e: # ResourceInUseException is raised when the stream already exists if e.response['Error']['Code'] != 'ResourceInUseException': logger.error(e) raise waiter = self._kinesis.get_waiter('stream_exists') # waits up to 180 seconds for stream to exist waiter.wait(StreamName=self.stream_name)
def get_lifecycle(bucket_name, **conn): try: result = get_bucket_lifecycle_configuration(Bucket=bucket_name, **conn) except ClientError as e: if 'NoSuchLifecycleConfiguration' not in str(e): raise e return [] for rule in result['Rules']: # Save all dates as a Proper ISO 8601 String: for transition in rule.get('Transitions', []): if 'Date' in transition: transition['Date'] = transition["Date"].replace(tzinfo=None, microsecond=0).isoformat() + "Z" if rule.get("Expiration"): if 'Date' in rule["Expiration"]: rule["Expiration"]["Date"] = \ rule["Expiration"]["Date"].replace(tzinfo=None, microsecond=0).isoformat() + "Z" return result['Rules']
def get_website(bucket_name, **conn): try: result = get_bucket_website(Bucket=bucket_name, **conn) except ClientError as e: if "NoSuchWebsiteConfiguration" not in str(e): raise e return None website = {} if result.get("IndexDocument"): website["IndexDocument"] = result["IndexDocument"] if result.get("RoutingRules"): website["RoutingRules"] = result["RoutingRules"] if result.get("RedirectAllRequestsTo"): website["RedirectAllRequestsTo"] = result["RedirectAllRequestsTo"] if result.get("ErrorDocument"): website["ErrorDocument"] = result["ErrorDocument"] return website
def get_cors(bucket_name, **conn): try: result = get_bucket_cors(Bucket=bucket_name, **conn) except ClientError as e: if "NoSuchCORSConfiguration" not in str(e): raise e return [] cors = [] for rule in result["CORSRules"]: cors_rule = {} if rule.get("AllowedHeaders"): cors_rule["AllowedHeaders"] = rule["AllowedHeaders"] if rule.get("AllowedMethods"): cors_rule["AllowedMethods"] = rule["AllowedMethods"] if rule.get("AllowedOrigins"): cors_rule["AllowedOrigins"] = rule["AllowedOrigins"] if rule.get("ExposeHeaders"): cors_rule["ExposeHeaders"] = rule["ExposeHeaders"] if rule.get("MaxAgeSeconds"): cors_rule["MaxAgeSeconds"] = rule["MaxAgeSeconds"] cors.append(cors_rule) return cors
def get_bucket_region(**kwargs): # Some s3 buckets do not allow viewing of details. We do not want to # throw an error in this case because we still want to see that the # bucket exists try: result = get_bucket_location(**kwargs) location = result['LocationConstraint'] except ClientError as e: if 'AccessDenied' not in str(e): raise e return None if not location: return 'us-east-1' return S3_REGION_MAPPING.get(location, location)
def create(self): """ Create a new lambda function or update an existing function. :return: """ try: PrintMsg.out(self.get()) PrintMsg.updating('{} in region {}'.format( self.function_name, self.region)) self.__update__() except ClientError as e: m = re.search('.*\sarn:aws:lambda:(.*):(.*):function:.*', str(e)) if m: account_id = m.group(2) else: account_id = None PrintMsg.out(e) PrintMsg.creating(self.function_name) self.__create__(account_id)
def commit(self, preserve_cache=False): if not preserve_cache: self._clear_cache() if not self._change_batch: return try: self._client.change_resource_record_sets( HostedZoneId=self.id, ChangeBatch={'Changes': self._change_batch} ) except ClientError as excp: if excp.response['Error']['Code'] == 'InvalidInput': logging.exception("failed to process batch %r", self._change_batch) raise self._reset_change_batch()
def _cache_aws_records(self): if self._aws_records is not None: return if not self.id: return paginator = self._client.get_paginator('list_resource_record_sets') records = [] try: for page in paginator.paginate(HostedZoneId=self.id): records.extend(page['ResourceRecordSets']) except ClientError as excp: if excp.response['Error']['Code'] != 'NoSuchHostedZone': raise self._clear_cache() else: self._aws_records = records self._exists = True
def _reconcile_zone(self): """ Handles zone creation/deletion. """ if self.db_zone.deleted: self.delete() elif self.db_zone.route53_id is None: self.create() elif not self.exists: try: self.create() except ClientError as excp: if excp.response['Error']['Code'] != 'HostedZoneAlreadyExists': raise # This can happen if a zone was manually deleted from AWS. # Create will fail because we re-use the caller_reference self.db_zone.caller_reference = None self.db_zone.save() self.create()
def reconcile(self): if self.ip.deleted: self.delete() self.ip.delete() elif self.exists: # if the desired config is not a subset of the current config if not self.desired_config.items() <= self.config.items(): self.delete() self.create() else: logger.info("%-15s nothing to do", self.ip.ip) else: try: self.create() except ClientError as excp: if excp.response['Error']['Code'] != 'HealthCheckAlreadyExists': raise self.ip.healthcheck_caller_reference = None self.ip.save() self.create()
def check_bucket(bucket): s3 = boto3.client('s3', region_name=AWS_DEFAULT_REGION) print('Checking bucket: ' + bucket) try: s3.head_bucket(Bucket=bucket) except ClientError: print('Creating bucket: ' + bucket) args = { 'Bucket': bucket } if AWS_DEFAULT_REGION != 'us-east-1': args['CreateBucketConfiguration'] = { 'LocationConstraint': AWS_DEFAULT_REGION } s3.create_bucket(**args) waiter = s3.get_waiter('bucket_exists') waiter.wait(Bucket=bucket)
def get_timestamp_from_s3_object(bucket, key): s3 = boto3.client('s3', region_name=AWS_DEFAULT_REGION) try: response = s3.get_object( Bucket=bucket, Key=key ) timestamp = response['LastModified'] # We assume this is UTC. except ClientError: timestamp = datetime(1970, 1, 1, tzinfo=None) return (timestamp.replace(tzinfo=None) - datetime(1970, 1, 1, tzinfo=None)).total_seconds() # IAM
def run(self): while not self.job_queue.empty(): try: key = self.job_queue.get(True, 1) except Empty: return try: self.s3.head_object(Bucket=self.source, Key=key) logger.info('Key: ' + key + ' is present in source bucket, nothing to do.') except ClientError as e: if int(e.response['Error']['Code']) == 404: # The key was not found. logger.info('Key: ' + key + ' is not present in source bucket. Deleting orphaned key.') self.s3.delete_object(Bucket=self.destination, Key=key) else: raise e # Functions
def test_create_unable_to_create_vpc(self): self.mock_object( self.fake_driver.client, 'create_vpc', mock.Mock(side_effect=ClientError( fake_error_code, 'operation_name' ) ) ) self.mock_object( self.fake_driver.client, 'create_subnet', mock.Mock()) self.assertRaises(ClientError, self.fake_driver.create, 'fake_name', '10.10.10.0/24') self.fake_driver.client.create_vpc.\ assert_called_once_with( CidrBlock='10.10.10.0/24', InstanceTenancy='default' ) self.assertFalse(self.fake_driver.client.create_subnet.called)
def test_create_unable_to_create_subnet(self): self.mock_object( self.fake_driver.client, 'create_vpc', mock.Mock(return_value=fake_vpc_out)) self.mock_object( self.fake_driver.client, 'create_subnet', mock.Mock(side_effect=ClientError( fake_error_code, 'operation_name' ))) self.assertRaises(ClientError, self.fake_driver.create, 'fake_name', '10.10.10.0/24') self.fake_driver.client.create_vpc.\ assert_called_once_with( CidrBlock='10.10.10.0/24', InstanceTenancy='default' ) self.fake_driver.client.create_subnet.\ assert_called_once_with( VpcId='vpc-5eed72c5', CidrBlock='10.10.10.0/24' )
def test_delete_unable_to_describe_subnets(self): self.mock_object( self.fake_driver.client, 'describe_subnets', mock.Mock(side_effect=ClientError( fake_error_code, 'operation_name' ) )) self.mock_object(self.fake_driver.client, 'delete_subnet', mock.Mock()) self.mock_object(self.fake_driver.client, 'delete_vpc', mock.Mock()) self.assertRaises(ClientError, self.fake_driver.delete, 'subnet-9dcb6b38') self.fake_driver.client.describe_subnets.\ assert_called_once_with(SubnetIds=['subnet-9dcb6b38']) self.assertFalse(self.fake_driver.client.delete_subnet.called) self.assertFalse(self.fake_driver.client.delete_vpc.called)
def test_delete_unable_to_detete_vpc(self): self.mock_object( self.fake_driver.client, 'describe_subnets', mock.Mock(return_value=fake_describe_subnets)) self.mock_object( self.fake_driver.client, 'delete_subnet', mock.Mock(return_value=fake_delete_subnet_out)) self.mock_object( self.fake_driver.client, 'delete_vpc', mock.Mock(side_effect=ClientError( fake_error_code, 'operation_name' ) )) self.assertRaises(ClientError, self.fake_driver.delete, 'subnet-9dcb6b38') self.fake_driver.client.describe_subnets.\ assert_called_once_with(SubnetIds=['subnet-9dcb6b38']) self.fake_driver.client.delete_subnet.\ assert_called_once_with(SubnetId='subnet-9dcb6b38') self.fake_driver.client.delete_vpc.\ assert_called_once_with(VpcId='vpc-5eed72c5')
def test_upload_object_failed(self): self.mock_object( self.fake_driver.client, 'put_object', mock.Mock(side_effect=ClientError( fake_error_code_resp, 'UploadObject' )) ) self.assertRaises(ClientError, self.fake_driver.upload_object, 'fake-container', 'fake-obj', 'fake-content', metadata={'newkey': 'newvalue'}, content_length=None) self.fake_driver.client.put_object.assert_called_once_with( Bucket='fake-container', Key='fake-obj', Body='fake-content', ContentLength=None, Metadata={'x-amz-newkey': 'newvalue'}, )
def test_download_object_failed(self): self.mock_object( self.fake_driver.client, 'get_object', mock.Mock(side_effect=ClientError( fake_error_code_resp, 'DownloadObject' )) ) self.assertRaises(ClientError, self.fake_driver.download_object, 'fake-container', 'fake-obj') self.fake_driver.client.get_object.assert_called_once_with( Bucket='fake-container', Key='fake-obj', )
def test_stat_object_failed(self): self.mock_object( self.fake_driver.client, 'head_object', mock.Mock(side_effect=ClientError( fake_error_code_resp, 'HeadObject' )) ) self.assertRaises(ClientError, self.fake_driver.stat_object, 'fake-container', 'fake-obj') self.fake_driver.client.head_object.assert_called_once_with( Bucket='fake-container', Key='fake-obj', )
def test_delete_object_failed(self): self.mock_object( self.fake_driver.client, 'delete_object', mock.Mock(side_effect=ClientError( fake_error_code_resp, 'DeleteObject' )) ) self.assertRaises(ClientError, self.fake_driver.delete_object, 'fake-container', 'fake-obj') self.fake_driver.client.delete_object.assert_called_once_with( Bucket='fake-container', Key='fake-obj', )
def test_list_container_objects_failed(self): self.mock_object( self.fake_driver.client, 'list_objects', mock.Mock(side_effect=ClientError( fake_error_code_resp, 'ListObjects' )) ) self.assertRaises(ClientError, self.fake_driver.list_container_objects, 'fake-container', prefix=None, delimiter=None) self.fake_driver.client.list_objects.assert_called_once_with( Bucket='fake-container', Prefix=None, Delimiter=None, )
def get_item(self, data): """Method to get an item Args: data (dict): A dictionary of attributes to put Returns: (dict) """ try: response = self.table.get_item(Key=data, ConsistentRead=True) except ClientError as err: raise IOError("Error getting item: {}".format(err.message)) if response['ResponseMetadata']['HTTPStatusCode'] != 200: raise IOError("Error getting item: {}".format(response['ResponseMetadata'])) if "Item" in response: return response["Item"] else: return None
def delete_item(self, data): """Method to get an item Args: data (dict): A dictionary of attributes to access an item (hash and sort keys) Returns: None """ try: response = self.table.delete_item(Key=data) except ClientError as err: raise IOError("Error deleting item: {}".format(err.message)) if response['ResponseMetadata']['HTTPStatusCode'] != 200: raise IOError("Error deleting item: {}".format(response['ResponseMetadata']))
def create_trigger_from_bucket(self, bucket_name, function_arn): notification = { "LambdaFunctionConfigurations": [ { "LambdaFunctionArn": function_arn, "Events": [ "s3:ObjectCreated:*" ], "Filter": { "Key": { "FilterRules": [ { "Name": "prefix", "Value": "input/" }] } } }] } try: self.get_s3().put_bucket_notification_configuration( Bucket=bucket_name, NotificationConfiguration=notification ) except ClientError as ce: print ("Error configuring S3 bucket: %s" % ce)
def get_functions_arn_list(self): arn_list = [] try: # Creation of a function filter by tags client = self.get_resource_groups_tagging_api() tag_filters = [ { 'Key': 'owner', 'Values': [ self.get_user_name_or_id() ] }, { 'Key': 'createdby', 'Values': ['scar'] } ] response = client.get_resources(TagFilters=tag_filters, TagsPerPage=100) for function in response['ResourceTagMappingList']: arn_list.append(function['ResourceARN']) while ('PaginationToken' in response) and (response['PaginationToken']): response = client.get_resources(PaginationToken=response['PaginationToken'], TagFilters=tag_filters, TagsPerPage=100) for function in response['ResourceTagMappingList']: arn_list.append(function['ResourceARN']) except ClientError as ce: print ("Error getting function arn by tag: %s" % ce) return arn_list
def test_init_retention_policy_error(self, mock_aws_client): mock_aws_client.return_value.get_access_key.return_value = 'test_key' mock_aws_client.return_value.get_lambda.return_value.create_function.return_value = {'FunctionArn':'arn123', 'Timeout':'300', 'MemorySize':'512', 'FunctionName':'f1-name', 'Extra1':'e1', 'Extra2':'e2'} mock_aws_client.return_value.get_log.return_value.put_retention_policy.side_effect = ClientError({'Error' : {'Code' : '42', 'Message' : 'test_message'}}, 'test2') args = Args() args.verbose = False Scar().init(args) output = TestScar.capturedOutput.getvalue() self.assertTrue("Error setting log retention policy:" in output) self.assertTrue("An error occurred (42) when calling the test2 operation: test_message" in output)
def test_init_event_source_error(self, mock_aws_client): mock_aws_client.return_value.get_access_key.return_value = 'test_key' mock_aws_client.return_value.get_lambda.return_value.create_function.return_value = {'FunctionArn':'arn123', 'Timeout':'300', 'MemorySize':'512', 'FunctionName':'f1-name', 'Extra1':'e1', 'Extra2':'e2'} mock_aws_client.return_value.check_and_create_s3_bucket.side_effect = ClientError({'Error' : {'Code' : '42', 'Message' : 'test_message'}}, 'test2') args = Args() args.verbose = False args.event_source = True Scar().init(args) output = TestScar.capturedOutput.getvalue() self.assertTrue("Error creating the event source:" in output) self.assertTrue("An error occurred (42) when calling the test2 operation: test_message" in output)
def test_init_log_group_error(self, mock_aws_client): mock_aws_client.return_value.get_access_key.return_value = 'test_key' mock_aws_client.return_value.get_lambda.return_value.create_function.return_value = {'FunctionArn':'arn123', 'Timeout':'300', 'MemorySize':'512', 'FunctionName':'f1-name', 'Extra1':'e1', 'Extra2':'e2'} mock_aws_client.return_value.get_log.return_value.create_log_group.side_effect = ClientError({'Error' : {'Code' : '42', 'Message' : 'test_message'}}, 'test2') args = Args() args.verbose = False Scar().init(args) output = TestScar.capturedOutput.getvalue() self.assertTrue("Error creating log groups:" in output) self.assertTrue("An error occurred (42) when calling the test2 operation: test_message" in output)
def test_init_existing_log_group(self, mock_aws_client): mock_aws_client.return_value.get_access_key.return_value = 'test_key' mock_aws_client.return_value.get_lambda.return_value.create_function.return_value = {'FunctionArn':'arn123', 'Timeout':'300', 'MemorySize':'512', 'FunctionName':'f1-name', 'Extra1':'e1', 'Extra2':'e2'} mock_aws_client.return_value.get_log.return_value.create_log_group.side_effect = ClientError({'Error' : {'Code' : 'ResourceAlreadyExistsException', 'Message' : 'test_message'}}, 'test2') args = Args() args.verbose = False Scar().init(args) output = TestScar.capturedOutput.getvalue() self.assertTrue("Function 'test-name' successfully created.\n" in output) self.assertTrue("Warning: Using existent log group '/aws/lambda/test-name'\n\n" in output)
def test_has_private_bubble_other_clienterrors(botomock): def mock_api_call(self, operation_name, api_params): parsed_response = {'Error': {'Code': '403', 'Message': 'Not found'}} raise ClientError(parsed_response, operation_name) urls = ( 'https://s3.example.com/private/prefix/', ) downloader = SymbolDownloader(urls) # Expect this to raise a ClientError because the bucket ('private') # doesn't exist. So boto3 would normally trigger a ClientError # with a code 'Forbidden'. with botomock(mock_api_call): with pytest.raises(ClientError): downloader.has_symbol( 'xul.pdb', '44E4EC8C2F41492B9369D6B9A059577C2', 'xul.sym' )
def test_get_stream_private_other_clienterrors(botomock): def mock_api_call(self, operation_name, api_params): assert operation_name == 'GetObject' parsed_response = { 'Error': {'Code': '403', 'Message': 'Forbidden'}, } raise ClientError(parsed_response, operation_name) urls = ( 'https://s3.example.com/private/prefix/', ) downloader = SymbolDownloader(urls) with botomock(mock_api_call): stream = downloader.get_symbol_stream( 'xul.pdb', '44E4EC8C2F41492B9369D6B9A059577C2', 'xul.sym' ) with pytest.raises(ClientError): next(stream)
def key_existing(client, bucket, key): """return a tuple of ( key's size if it exists or 0, S3 key metadata ) If the file doesn't exist, return None for the metadata. """ # Return 0 if the key can't be found so the memoize cache can cope try: response = client.head_object( Bucket=bucket, Key=key, ) return response['ContentLength'], response.get('Metadata') except ClientError as exception: if exception.response['Error']['Code'] == '404': return 0, None raise except (ReadTimeout, socket.timeout) as exception: logger.info( f'ReadTimeout trying to list_objects_v2 for {bucket}:' f'{key} ({exception})' ) return 0, None
def is_timeout(ex, op_name=None): """Check the exception to determine if it is a Boto3 ClientError thrown because the task timed out. Args: ex (Exception) : Exception to check op_name (string|None) : (Optional) name of the operation that was attempted Returns: bool : If the exception was caused by the task timing out """ try: rst = ex.response['Error']['Code'] == 'TaskTimedOut' if op_name: rst &= ex.operation_name == op_name return rst except: return False
def success(self, output): """Marks the task successfully complete and returns the processed data Note: This method will silently fail if the task has timed out Args: output (string|dict): Json response to return to the state machine """ if self.token is None: raise Exception("Not currently working on a task") output = json.dumps(output) try: resp = self.client.send_task_success(taskToken = self.token, output = output) except ClientError as e: # eat the timeout if not self.is_timeout(e): self.log.exception("Error sending task success") raise finally: self.token = None # finished with task
def failure(self, error, cause): """Marks the task as a failure with a given reason Note: This method will silently fail if the task has timed out Args: error (string): Failure error cause (string): Failure error cause """ if self.token is None: raise Exception("Not currently working on a task") try: resp = self.client.send_task_failure(taskToken = self.token, error = error, cause = cause) except ClientError as e: # eat the timeout if not self.is_timeout(e): self.log.exception("Eror sending task failure") raise finally: self.token = None # finished with task
def heartbeat(self, token): """Called from the heartbeat thread every X seconds""" if token is not None and token != self._heartbeat_fail_token: try: self.logger.debug('Sending heartbeat for task') self.heartbeat_sf_client.send_task_heartbeat(taskToken=token) self._heartbeat_fail_token = None except ClientError as e: ecode = e.response['Error'].get('Code', 'Unknown') if ecode in ['TaskDoesNotExist', 'InvalidToken', 'TaskTimedOut']: # We set the heartbeat_fail_token so we don't retry a heartbeat for this token. self._heartbeat_fail_token = token # We only use debug level logging since the task either deleted or ended. self.logger.debug('Error sending heartbeat for task: %s', ecode) else: self.logger.exception('Error sending heartbeat for task') except Exception: self.logger.exception('Error sending heartbeat for task')
def process_DetachVolume(event,volume_id, username): # # Capture who did the detach and when # logger.info("user: " + username + " detached Volume " + volume_id ) try: response = client.create_tags( Resources=[ volume_id ], Tags=[ { 'Key': TAG_PREFIX + 'detached_by', 'Value': username }, { 'Key': TAG_PREFIX + 'detached_date', 'Value': event['detail']['eventTime'] } ] ) except ClientError as e: logger.error("unable to tag volume " + volume_id + " with username " + username + ": " + e.message ) # end process_DetachVolume()
def email_user(email, message, account_name): global ACTION_SUMMARY # This is what we send to the admins if SEND_EMAIL != "true": return # Abort if we're not supposed to send email if message == "": return # Don't send an empty message client = boto3.client('ses') body = EXPLANATION_HEADER + "\n" + message + "\n\n" + EXPLANATION_FOOTER try: response = client.send_email( Source=FROM_ADDRESS, Destination={ 'ToAddresses': [ email ] }, Message={ 'Subject': { 'Data': email_subject.format(account_name) }, 'Body': { 'Text': { 'Data': body } } } ) ACTION_SUMMARY = ACTION_SUMMARY + "\nEmail Sent to {}".format(email) return except ClientError as e: print("Failed to send message to {}: {}".format(email, e.message)) ACTION_SUMMARY = ACTION_SUMMARY + "\nERROR: Message to {} was rejected: {}".format(email, e.message)
def modify_bucket_tags(session_factory, buckets, add_tags=(), remove_tags=()): for bucket in buckets: client = bucket_client(local_session(session_factory), bucket) # all the tag marshalling back and forth is a bit gross :-( new_tags = {t['Key']: t['Value'] for t in add_tags} for t in bucket.get('Tags', ()): if (t['Key'] not in new_tags and not t['Key'].startswith('aws') and t['Key'] not in remove_tags): new_tags[t['Key']] = t['Value'] tag_set = [{'Key': k, 'Value': v} for k, v in new_tags.items()] try: client.put_bucket_tagging( Bucket=bucket['Name'], Tagging={'TagSet': tag_set}) except ClientError as e: log.exception( 'Exception tagging bucket %s: %s', bucket['Name'], e) continue