我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用boto3.resource()。
def spin_up(data): ec2 = boto3.resource('ec2') instances = ec2.create_instances( ImageId=settings.EC2_AMI_ID, InstanceType=settings.EC2_INSTANCE_TYPE, KeyName=settings.EC2_SSH_KEYPAIR_ID, MinCount=1, MaxCount=1, IamInstanceProfile={ 'Arn': settings.EC2_IAM_INSTANCE_PROFILE_ARN }, InstanceInitiatedShutdownBehavior='terminate', SecurityGroupIds=[settings.EC2_SECURITY_GROUP_NAME], UserData=USERDATA_TEMPLATE.format(**data) ) instance = instances[0] logger.info('Spinning up instance with id {} at {}'.format(instance.id, instance.launch_time)) return instance.id
def bucket_load(self, *args, **kwargs): """ Calls s3.Client.list_buckets() to update the attributes of the Bucket resource. """ # The docstring above is phrased this way to match what the autogenerated # docs produce. # We can't actually get the bucket's attributes from a HeadBucket, # so we need to use a ListBuckets and search for our bucket. response = self.meta.client.list_buckets() for bucket_data in response['Buckets']: if bucket_data['Name'] == self.name: self.meta.data = bucket_data break else: raise ClientError({'Error': {'Code': '404', 'Message': 'NotFound'}}, 'ListBuckets')
def _create_lambda_from_s3(self): zip_file = MockZipFile.create_zip("test") bucket_name = "test_bucket" s3 = boto3.resource('s3') bucket = s3.Bucket(bucket_name) bucket.create() bucket.put_object( Key=zip_file, Body=MockZipFile.read_file(zip_file) ) self.lambda_conf["Code"] = {'S3Bucket': bucket_name, 'S3Key': zip_file} response = self.deploy.remote_create_lambada(**self.lambda_conf) self.assertEqual(response["ResponseMetadata"]["HTTPStatusCode"], 201) for key in ["FunctionName", "Role", "Runtime", "Handler"]: self.assertEqual(response[key], self.lambda_conf[key]) return response
def close(self, *args, **kwargs): """ Engine closed, copy file to DB """ super(DatabaseWrapper, self).close(*args, **kwargs) signature_version = self.settings_dict.get("SIGNATURE_VERSION", "s3v4") s3 = boto3.resource('s3', config=botocore.client.Config(signature_version=signature_version)) try: with open(self.settings_dict['NAME'], 'rb') as f: fb = f.read() bytesIO = BytesIO() bytesIO.write(fb) bytesIO.seek(0) s3_object = s3.Object(self.settings_dict['BUCKET'], self.settings_dict['REMOTE_NAME']) result = s3_object.put('rb', Body=bytesIO) except Exception as e: print(e) logging.debug("Saved to remote DB!")
def __init__(self, ansible_config_path, aws_key_name=None, interval=60, qname='sm_annotate', debug=False): with open(ansible_config_path) as fp: self.ansible_config = yaml.load(fp) self.interval = min(interval, 1200) self.aws_key_name = aws_key_name or self.ansible_config['aws_key_name'] self.master_hostgroup = self.ansible_config['cluster_configuration']['instances']['master']['hostgroup'] self.slave_hostgroup = self.ansible_config['cluster_configuration']['instances']['slave']['hostgroup'] self.stage = self.ansible_config['stage'] self.qname = qname self.debug = debug self._setup_logger() self.ec2 = boto3.resource('ec2', self.ansible_config['aws_region'])
def handler(event, context): s3 = boto3.resource('s3') url = event["URL"] parsed = urllib2.urlparse.urlparse(url) response = s3.Object(parsed.netloc, parsed.path.lstrip('/')).get() linecount = bytes = 0 contents = response['Body'].read(BUF_SZ) while contents: bytes += len(contents) linecount += contents.count('\n') contents = response['Body'].read(BUF_SZ) return { 'url': event["URL"], 'lines': linecount, 'bytes': bytes }
def set_mapper_number(manifest_file): fastq_counts = 0 if manifest_file.startswith("s3://"): s3 = boto3.resource("s3") bucket_name, key_prefix = manifest_file.strip().strip("/")[5:].split("/", 1) with tempfile.TemporaryDirectory() as tmpdirname: s3.meta.client.download_file(bucket_name, key_prefix, tmpdirname + "/manifest") for line in open(tmpdirname+"/manifest"): fastq_counts += 1 else: for line in open(manifest_file): fastq_counts += 1 return fastq_counts
def set_mapper_number(manifest_file): accession_counts = 0 if manifest_file.startswith("s3://"): s3_client = boto3.resource("s3") bucket_name, key_prefix = manifest_file.strip().strip("/")[5:].split("/", 1) with tempfile.TemporaryDirectory() as tmpdirname: s3_client.Object(bucket_name, key_prefix).download_file(tmpdirname+"/manifest") for line in open(tmpdirname+"/manifest"): accession_counts += 1 else: for line in open(manifest_file): accession_counts += 1 return accession_counts
def upload_split(file_name): global parser_result base_file_name = file_name.split("/")[-1] if parser_result.output_dir.startswith("s3://"): bucket_name, folder_name = parser_result.output_dir[5:].split("/", 1) key_name = "{}/{}".format(folder_name.rstrip("/"), base_file_name) s3 = boto3.resource("s3", region_name=parser_result.s3_region) s3.Bucket(bucket_name).upload_file(file_name, key_name) os.remove(file_name) else: subprocess.call(["hdfs", "dfs", "-mkdir", "-p", parser_result.output_dir]) subprocess.call(["hdfs", "dfs", "-put", file_name, parser_result.output_dir]) print(base_file_name)
def upload_split(file_name, print_output_name=True): global parser_result base_file_name = file_name.split("/")[-1] if parser_result.output_dir.startswith("s3://"): bucket_name, folder_name = parser_result.output_dir[5:].split("/", 1) key_name = "{}/{}".format(folder_name.rstrip("/"), base_file_name) s3 = boto3.resource("s3", region_name=parser_result.s3_region) s3.Bucket(bucket_name).upload_file(file_name, key_name) os.remove(file_name) else: subprocess.call(["hdfs", "dfs", "-mkdir", "-p", parser_result.output_dir]) subprocess.call(["hdfs", "dfs", "-put", file_name, parser_result.output_dir]) if print_output_name: print(base_file_name)
def remove_s3_bucket_lambda_event(bucket_name, lambda_function_arn): s3 = boto3.resource('s3') s3_client = boto3.client('s3') try: new_conf = {} notification_conf = s3_client.get_bucket_notification_configuration(Bucket=bucket_name) if 'TopicConfigurations' in notification_conf: new_conf['TopicConfigurations'] = notification_conf['TopicConfigurations'] if 'QueueConfigurations' in notification_conf: new_conf['QueueConfigurations'] = notification_conf['QueueConfigurations'] if 'LambdaFunctionConfigurations' in notification_conf: new_conf['LambdaFunctionConfigurations'] = [] for lfc in notification_conf['LambdaFunctionConfigurations']: if lfc['LambdaFunctionArn'] == lambda_function_arn: continue #remove all references for Log Parser event else: new_conf['LambdaFunctionConfigurations'].append(lfc) response = s3_client.put_bucket_notification_configuration(Bucket=bucket_name, NotificationConfiguration=new_conf) except Exception, e: print(e) print("[ERROR] Error to remove S3 Bucket lambda event")
def setup_bucket_policy(cls): sts_client = boto3.client('sts', region_name='eu-west-1') account_id = sts_client.get_caller_identity()['Account'] policy_document = { "Version": "2008-10-17", "Statement": [ { "Sid": "GiveSESPermissionToWriteEmail", "Effect": "Allow", "Principal": { "Service": "ses.amazonaws.com" }, "Action": "s3:PutObject", "Resource": "arn:aws:s3:::{0}/*".format(cls.s3_bucket), "Condition": { "StringEquals": { "aws:Referer": account_id } } } ] } s3 = boto3.resource('s3') policy = s3.BucketPolicy(cls.s3_bucket) policy.put(Policy=json.dumps(policy_document))
def create(self, image_id, flavor_id, network_id, name=None, number=1, **kargs): if name is None: name = six.text_type(datetime.now()) profile = { 'Arn': '', 'Name': name } server = self.resource.create_instances( ImageId=image_id, MinCount=number, MaxCount=number, InstanceType=flavor_id, SubnetId=network_id, IamInstanceProfile=profile ) return server
def delete_role(self): """Method to delete a role for a stack Returns: None """ # Remove Role in-line policy try: iam = boto3.resource('iam') role_policy = iam.RolePolicy(self.role_name, self.policy_name) role_policy.delete() except botocore.exceptions.ClientError as e: # Assume a client error is a NoSuchEntity print(" No Role found. Skipping") return # Remove Role response = self.client.delete_role(RoleName=self.role_name) if response["ResponseMetadata"]["HTTPStatusCode"] != 200: raise Exception("Failed to delete policy.")
def get_s3_client(): endpoint_url = os.environ.get("S3_ENDPOINT_URL") s3_client = boto3.client('s3', # region_name='us-east-1', aws_access_key_id=config['STORAGE_ACCESS_KEY_ID'], config=Config(signature_version='s3v4'), aws_secret_access_key=config['STORAGE_SECRET_ACCESS_KEY'], endpoint_url=endpoint_url ) if endpoint_url: try: s3 = boto3.resource('s3', aws_access_key_id=config['STORAGE_ACCESS_KEY_ID'], config=Config(signature_version='s3v4'), aws_secret_access_key=config['STORAGE_SECRET_ACCESS_KEY'], endpoint_url=endpoint_url) s3.create_bucket(Bucket=config['STORAGE_BUCKET_NAME']) bucket = s3.Bucket(config['STORAGE_BUCKET_NAME']) bucket.Acl().put(ACL='public-read') except: # noqa logging.exception('Failed to create the bucket') pass return s3_client
def upload_file(self, Filename, Bucket, Key, ExtraArgs=None, Callback=None, Config=None): """Upload a file to an S3 object. Usage:: import boto3 s3 = boto3.resource('s3') s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt') Similar behavior as S3Transfer's upload_file() method, except that parameters are capitalized. Detailed examples can be found at :ref:`S3Transfer's Usage <ref_s3transfer_usage>`. """ with S3Transfer(self, Config) as transfer: return transfer.upload_file( filename=Filename, bucket=Bucket, key=Key, extra_args=ExtraArgs, callback=Callback)
def download_file(self, Bucket, Key, Filename, ExtraArgs=None, Callback=None, Config=None): """Download an S3 object to a file. Usage:: import boto3 s3 = boto3.resource('s3') s3.meta.client.download_file('mybucket', 'hello.txt', '/tmp/hello.txt') Similar behavior as S3Transfer's download_file() method, except that parameters are capitalized. Detailed examples can be found at :ref:`S3Transfer's Usage <ref_s3transfer_usage>`. """ with S3Transfer(self, Config) as transfer: return transfer.download_file( bucket=Bucket, key=Key, filename=Filename, extra_args=ExtraArgs, callback=Callback)
def bucket_upload_file(self, Filename, Key, ExtraArgs=None, Callback=None, Config=None): """Upload a file to an S3 object. Usage:: import boto3 s3 = boto3.resource('s3') s3.Bucket('mybucket').upload_file('/tmp/hello.txt', 'hello.txt') Similar behavior as S3Transfer's upload_file() method, except that parameters are capitalized. Detailed examples can be found at :ref:`S3Transfer's Usage <ref_s3transfer_usage>`. """ return self.meta.client.upload_file( Filename=Filename, Bucket=self.name, Key=Key, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def object_upload_file(self, Filename, ExtraArgs=None, Callback=None, Config=None): """Upload a file to an S3 object. Usage:: import boto3 s3 = boto3.resource('s3') s3.Object('mybucket', 'hello.txt').upload_file('/tmp/hello.txt') Similar behavior as S3Transfer's upload_file() method, except that parameters are capitalized. Detailed examples can be found at :ref:`S3Transfer's Usage <ref_s3transfer_usage>`. """ return self.meta.client.upload_file( Filename=Filename, Bucket=self.bucket_name, Key=self.key, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def object_download_file(self, Filename, ExtraArgs=None, Callback=None, Config=None): """Download an S3 object to a file. Usage:: import boto3 s3 = boto3.resource('s3') s3.Object('mybucket', 'hello.txt').download_file('/tmp/hello.txt') Similar behavior as S3Transfer's download_file() method, except that parameters are capitalized. Detailed examples can be found at :ref:`S3Transfer's Usage <ref_s3transfer_usage>`. """ return self.meta.client.download_file( Bucket=self.bucket_name, Key=self.key, Filename=Filename, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
def get_checkpoints_from_s3_path(path): import boto3 s3_resource = boto3.resource('s3') bucket_name, key_name = split_s3_bucket_key(path) bucket = s3_resource.Bucket(bucket_name) all_objects = list(bucket.objects.filter(Prefix=key_name)) all_keys = [o.key for o in all_objects] keys = fnmatch.filter(all_keys, S3_KEY_PATTERN) checkpoints = [] for f in keys: try: file_path = os.path.join(bucket_name, f) checkpoints.append(parse_checkpoint_s3_path(file_path)) except ValueError: continue return sorted(checkpoints, key=lambda cp: cp.start)
def test_generate_dictionary(spark, multi_locales_df): conn = boto3.resource('s3', region_name='us-west-2') conn.create_bucket(Bucket=taar_utils.AMO_DUMP_BUCKET) # Store the data in the mocked bucket. conn.Object(taar_utils.AMO_DUMP_BUCKET, key=taar_utils.AMO_DUMP_KEY)\ .put(Body=json.dumps(FAKE_AMO_DUMP)) multi_locales_df.createOrReplaceTempView("longitudinal") # The "en-US" locale must not be reported: we set it to a low # frequency on |multi_locale_df|. expected = { "it-IT": ["test-guid-0001"] } assert taar_locale.generate_dictionary(spark, 5) == expected
def test_write_csv_to_s3(generate_data): bucket = 'test-bucket' key = 'test.csv' conn = boto3.resource('s3', region_name='us-west-2') conn.create_bucket(Bucket=bucket) utils.write_csv_to_s3(generate_data(["foo"]), bucket, key) body = ( conn .Object(bucket, key) .get()['Body'] .read().decode('utf-8') ) # header + 1x row = 2 assert len(body.rstrip().split('\n')) == 2
def test_write_csv_to_s3_no_header(generate_data): bucket = 'test-bucket' key = 'test.csv' conn = boto3.resource('s3', region_name='us-west-2') conn.create_bucket(Bucket=bucket) utils.write_csv_to_s3(generate_data(), bucket, key, header=False) body = ( conn .Object(bucket, key) .get()['Body'] .read().decode('utf-8') ) assert len(body.rstrip().split('\n')) == 1
def test_write_csv_to_s3_existing(generate_data): bucket = 'test-bucket' key = 'test.csv' conn = boto3.resource('s3', region_name='us-west-2') conn.create_bucket(Bucket=bucket) utils.write_csv_to_s3(generate_data(["foo"]), bucket, key) utils.write_csv_to_s3(generate_data(["foo", "bar"]), bucket, key) body = ( conn .Object(bucket, key) .get()['Body'] .read().decode('utf-8') ) # header + 2x row = 3 assert len(body.rstrip().split('\n')) == 3
def _put_file_on_s3(cls, stream, store_items_model, session, store_id): cls._logger.info("Started put file on S3 for '{}'".format(store_items_model.__key__)) store = cls._run_coro( cls.get_model('stores').get(session, [{'id': store_id}]), session )[0] s3_bucket = store['configuration']['aws']['s3']['bucket'] access_key_id = store['configuration']['aws'].get('access_key_id') secret_access_key = store['configuration']['aws'].get('secret_access_key') s3_key = '{}.zip'.format(store_items_model.__key__) boto3.resource( 's3', aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key ).Bucket(s3_bucket).put_object(Body=stream, Key=s3_key) cls._logger.info("Finished put file on S3 for '{}'".format(store_items_model.__key__))
def create_or_get_lambda_role(): """Creates the Lambda execution role for github-snooze-button. Args: None Returns: None """ lambda_role_path = "/tdsmith/github-snooze-button/" lambda_role_name = "snooze_lambda_role" iam = boto3.resource("iam") roles = iam.roles.all() for role in roles: if role.path == lambda_role_path and role.name == lambda_role_name: return role role = iam.create_role( Path=lambda_role_path, RoleName=lambda_role_name, AssumeRolePolicyDocument=LAMBDA_ROLE_TRUST_POLICY) role.attach_policy( PolicyArn="arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole") return role
def test_poll(self, config, trivial_message): self._test_poll_was_polled = False def my_callback(event, message): self._test_poll_was_polled = True responses.add(responses.POST, "https://api.github.com/repos/tdsmith/test_repo/hooks") repo_listener = snooze.RepositoryListener( events=snooze.LISTEN_EVENTS, callbacks=[my_callback], **config["tdsmith/test_repo"]) sqs = boto3.resource("sqs", region_name="us-west-2") sqs_queue = list(sqs.queues.all())[0] sqs_queue.send_message(MessageBody=trivial_message) assert int(sqs_queue.attributes["ApproximateNumberOfMessages"]) > 0 repo_listener.poll() sqs_queue.reload() assert int(sqs_queue.attributes["ApproximateNumberOfMessages"]) == 0 assert self._test_poll_was_polled
def _create_request(self): """ Handles create request from cloudformation custom resource :return: """ try: self._setup() self.physical_resource_id = self.__class__.__name__.lower() if allow_send_metrics(): self._send_create_metrics() return True except Exception as ex: self.response["Reason"] = str(ex) return False
def _delete_request(self): """ Handles delete request from cloudformation custom resource :return: """ try: self.delete_templates() self.delete_external_task_config_stacks() if allow_send_metrics(): self._send_delete_metrics() return True except Exception as ex: self.response["Reason"] = str(ex) return False
def download_fxa_files(): s3 = boto3.resource('s3', aws_access_key_id=settings.FXA_ACCESS_KEY_ID, aws_secret_access_key=settings.FXA_SECRET_ACCESS_KEY) bucket = s3.Bucket(settings.FXA_S3_BUCKET) for obj in bucket.objects.filter(Prefix=BUCKET_DIR): log('found %s in s3 bucket' % obj.key) tmp_path = TMP.joinpath(obj.key) if not tmp_path.name.endswith('.csv'): continue if file_is_done(tmp_path): continue if not tmp_path.exists(): log('getting ' + obj.key) log('size is %s' % obj.size) tmp_path.parent.mkdir(parents=True, exist_ok=True) try: bucket.download_file(obj.key, str(tmp_path)) log('downloaded %s' % tmp_path) except Exception: # something went wrong, delete file log('bad things happened. deleting %s' % tmp_path) tmp_path.unlink()
def touch_status(status): """ Updates the status of the current server and the last time it was modified. """ print "Updating status: %s" % status timestamp = int(time.time()) dbclient = boto3.resource('dynamodb') table = dbclient.Table(DYNAMO_NAME) table.put_item( Item={ 'ip': PUBLIC_IP, 'time': timestamp, 'status': status } )
def set_description(description): """ Adds a description of what is the server doing. """ print "Setting description: %s" % description dbclient = boto3.resource('dynamodb') table = dbclient.Table(DYNAMO_NAME) table.update_item( Key={ 'ip': PUBLIC_IP }, UpdateExpression="set description = :desc", ExpressionAttributeValues={ ':desc': description }, ReturnValues='NONE' )
def get_or_create_topic(email): topicname = "letsencrypt-lambda-notify" sns_r = boto3.resource('sns') sns_c = boto3.client('sns') # If the topic doesn't exist, this will create it, otherwise it returns # the existing topic. topic = sns_c.create_topic(Name=topicname) topic_arn = topic['TopicArn'] # subscribe the email to the topic sns_c.subscribe( TopicArn=topic_arn, Protocol='email', Endpoint=email ) return topic_arn
def get_s3_bucket(s3_access_key, s3_secret_key, s3_region, s3_bucket): # bucket.upload_file('path', 'key') # bucket.download_file('key', 'path') s3 = boto3.resource( 's3', aws_access_key_id=s3_access_key, aws_secret_access_key=s3_secret_key, region_name=s3_region ) bucket = s3.Bucket(s3_bucket) try: s3.meta.client.head_bucket(Bucket=s3_bucket) except botocore.exceptions.ClientError as e: error_code = int(e.response['Error']['Code']) if error_code == 404: raise Exception('Bucket does not exist') else: raise e return bucket
def scan_area(north, south, west ,east): result = [] # 1. Find all points to scan in this area cell_ids = break_down_area_to_cell(north, south, west ,east) # work_queue = boto3.resource('sqs', region_name='us-west-2').get_queue_by_name(QueueName=SQS_QUEUE_NAME) # # 2.Scan current area # for cell_id in cell_ids: # # result += scan_point(cell_id) # work_queue.send_message(MessageBody=json.dumps({"cell_id":cell_id})) entries = [] for i in range(len(cell_ids)): entries.append({ 'Id' : str(i), 'MessageBody' : json.dumps( {"cell_id": cell_ids[i]}) }) client.send_message_batch(QueueUrl='https://sqs.us-west-2.amazonaws.com/106001836577/{0}'.format(SQS_QUEUE_NAME), Entries=entries) # 4. Deliver pokemon location return result
def check_s3(basename): s3 = boto3.resource('s3') exists = False try: s3.Object(cfg.BUCKET_NAME, basename).load() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": exists = False else: raise else: exists = True return exists
def test(self): for region in self.regions: cloudwatch=boto3.resource('cloudwatch',region_name=region) self.subchecks.append( CloudWatchBillingAlertEnabledCheck( region, cloudwatch.alarms.all() ) ) if any(x.status == common.CheckState.PASS for x in self.subchecks): self.status = common.CheckState.PASS self.reason = 'Atleast one CloudWatch Billing Alert is Enabled' else: self.status = common.CheckState.FAIL self.reason = 'No CloudWatch Billing Alerts are Enabled'
def __init__(self, region='us-east-1'): self.check_categories = ['S3','IAM', 'VPC', 'CloudWatch', 'CloudTrail'] self.ec2 = boto3.resource("ec2", region_name=region) self.ec2_client = boto3.client("ec2", region_name=region) self.cloudwatch = boto3.resource("cloudwatch", region_name=region) self.cloudwatch_client = boto3.client("cloudwatch", region_name=region) self.cloudtrail_client = boto3.client('cloudtrail', region_name=region) self.iam = boto3.resource("iam", region_name=region) self.iam_client = boto3.client("iam", region_name=region) self.s3 = boto3.resource("s3", region_name=region) self.results = [] self.results_dict = {}
def check_vpcs(self): #collect vpc ids regions = get_regions() for region in regions: ec2 = boto3.resource('ec2', region_name=region) ec2_client = boto3.client('ec2', region_name=region) ids = [ x.id for x in ec2.vpcs.all() ] flowlogs = self.get_flowlogs_by_vpc_id(ec2_client) for vpc_id in ids: vpc_dict = flowlogs.get(vpc_id, None) self.append_collection( misc_checks.VPCFlowLogCheck(vpc_id, vpc_dict) )
def test(self): policy = 'arn:aws:iam::aws:policy/AdministratorAccess' user = boto3.resource('iam').User(self.user_dict['user']) attached_admin_policies = filter( lambda x: x.arn == policy, user.attached_policies.all() ) if len(attached_admin_policies) > 0: self.reason = 'This user has the {0} policy.'.format(policy) self.status = common.CheckState.FAIL else: self.status = common.CheckState.PASS self.reason = 'This user does not have the {0} policy.'.format(policy)
def _get_resource(self, client): return boto3.resource(client, **self._get_aws_cretentials_from_config())