我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用botocore.client.Config()。
def _build_client(self): # Either they provided ACCESS_KEY and SECRET_ACCESS_KEY in which case # we use those, or they didn't in which case boto3 pulls credentials # from one of a myriad of other places. # http://boto3.readthedocs.io/en/latest/guide/configuration.html#configuring-credentials session_kwargs = {} if self.config('access_key') and self.config('secret_access_key'): session_kwargs['aws_access_key_id'] = self.config('access_key') session_kwargs['aws_secret_access_key'] = self.config('secret_access_key') session = boto3.session.Session(**session_kwargs) kwargs = { 'service_name': 's3', 'region_name': self.config('region'), # NOTE(willkg): We use path-style because that lets us have dots in # our bucket names and use SSL. 'config': Config(s3={'addressing_style': 'path'}) } if self.config('endpoint_url'): kwargs['endpoint_url'] = self.config('endpoint_url') return session.client(**kwargs)
def connect(self): session_kwargs = {} if self.access_key and self.secret_access_key: session_kwargs['aws_access_key_id'] = self.access_key session_kwargs['aws_secret_access_key'] = self.secret_access_key session = boto3.session.Session(**session_kwargs) client_kwargs = { 'service_name': 's3', 'region_name': self.region, 'config': Config(s3={'addression_style': 'path'}) } if self.endpointurl: client_kwargs['endpoint_url'] = self.endpointurl client = session.client(**client_kwargs) return client
def __init__(self, handle_task = lambda t, i: None, **kwargs): """Will not be called if used as a mixin. Provides just the expected variables. Args: handle_task (callable) : Callable to process task input and send success or failure kwargs : Arguments for heaviside.utils.create_session """ session, _ = create_session(**kwargs) # DP NOTE: read_timeout is needed so that the long poll for tasking doesn't # timeout client side before AWS returns that there is no work self.client = session.client('stepfunctions', config=Config(read_timeout=70)) self.log = logging.getLogger(__name__) self.name = None self.arn = None self.handle_task = handle_task self.max_concurrent = 0 self.poll_delay = 1 self.polling = False
def __init__(self, name, target=None, **kwargs): """ Args: name (string): Name of the activity to monitor The activity's ARN is looked up in AWS using the provided AWS credentials target (string|callable): Function to pass to TaskProcess as the target, If string, the class / function will be imported kwargs (dict): Same arguments as utils.create_session() """ super(ActivityProcess, self).__init__(name=name) self.name = name self.credentials = kwargs self.session, self.account_id = create_session(**kwargs) self.client = self.session.client('stepfunctions', config=Config(read_timeout=70)) self.log = logging.getLogger(__name__) self.max_concurrent = 0 self.poll_delay = 1 if isinstance(target, str): target = TaskProcess.resolve_function(target) self.target = target
def get_s3_client(): endpoint_url = os.environ.get("S3_ENDPOINT_URL") s3_client = boto3.client('s3', # region_name='us-east-1', aws_access_key_id=config['STORAGE_ACCESS_KEY_ID'], config=Config(signature_version='s3v4'), aws_secret_access_key=config['STORAGE_SECRET_ACCESS_KEY'], endpoint_url=endpoint_url ) if endpoint_url: try: s3 = boto3.resource('s3', aws_access_key_id=config['STORAGE_ACCESS_KEY_ID'], config=Config(signature_version='s3v4'), aws_secret_access_key=config['STORAGE_SECRET_ACCESS_KEY'], endpoint_url=endpoint_url) s3.create_bucket(Bucket=config['STORAGE_BUCKET_NAME']) bucket = s3.Bucket(config['STORAGE_BUCKET_NAME']) bucket.Acl().put(ACL='public-read') except: # noqa logging.exception('Failed to create the bucket') pass return s3_client
def bucket_client(session, b, kms=False): location = b.get('Location') if location is None: region = 'us-east-1' else: region = location['LocationConstraint'] or 'us-east-1' if kms: # Need v4 signature for aws:kms crypto, else let the sdk decide # based on region support. config = Config( signature_version='s3v4', read_timeout=200, connect_timeout=120) else: config = Config(read_timeout=200, connect_timeout=120) return session.client('s3', region_name=region, config=config)
def client(self): if not hasattr(self._local, 'client'): _logger.info("Creating new S3 Client") if self._dev_url: _logger.warning("S3 dev mode enabled") session = botocore.session.get_session() self._local.client = session.create_client( 's3', aws_access_key_id='-', aws_secret_access_key='-', endpoint_url=self._dev_url, config=Config( s3={'addressing_style': 'path'}, signature_version='s3' ) ) else: self._local.client = boto3.client( 's3', aws_access_key_id=self._aws_access_key_id, aws_secret_access_key=self._aws_secret_access_key ) return self._local.client
def client(*args, **kwargs): """ Create a low-level service client by name using the default session. Socket level timeouts are preconfigured according to the defaults set via the `fleece.boto3.set_default_timeout()` function, or they can also be set explicitly for a client by passing the `timeout`, `connect_timeout` or `read_timeout` arguments. """ timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT) connect_timeout = kwargs.pop('connect_timeout', DEFAULT_CONNECT_TIMEOUT or timeout) read_timeout = kwargs.pop('read_timeout', DEFAULT_READ_TIMEOUT or timeout) config = Config(connect_timeout=connect_timeout, read_timeout=read_timeout) return real_boto3.client(*args, config=config, **kwargs)
def resource(*args, **kwargs): """ Create a resource service client by name using the default session. Socket level timeouts are preconfigured according to the defaults set via the `fleece.boto3.set_default_timeout()` function, or they can also be set explicitly for a client by passing the `timeout`, `connect_timeout` or `read_timeout` arguments. """ timeout = kwargs.pop('timeout', DEFAULT_TIMEOUT) connect_timeout = kwargs.pop('connect_timeout', DEFAULT_CONNECT_TIMEOUT or timeout) read_timeout = kwargs.pop('read_timeout', DEFAULT_READ_TIMEOUT or timeout) config = Config(connect_timeout=connect_timeout, read_timeout=read_timeout) return real_boto3.resource(*args, config=config, **kwargs)
def client(self): if not hasattr(self._local, 'client'): _logger.info("Creating new S3 Client") self._local.client = boto3.client( 's3', region_name=self._aws_region, aws_access_key_id=self._aws_access_key_id, aws_secret_access_key=self._aws_secret_access_key, endpoint_url=self._endpoint_url, config=Config( s3={'addressing_style': self._addressing_style}, signature_version=self._signature_version ) ) return self._local.client
def process_trail_set( object_set, map_records, reduce_results=None, trail_bucket=None): session_factory = SessionFactory( options.region, options.profile, options.assume_role) s3 = session_factory().client( 's3', config=Config(signature_version='s3v4')) previous = None for o in object_set: body = s3.get_object(Key=o['Key'], Bucket=trail_bucket)['Body'] fh = GzipFile(fileobj=StringIO(body.read())) data = json.load(fh) s = map_records(data['Records']) if reduce_results: previous = reduce_results(s, previous) return previous
def setup_parser(): parser = argparse.ArgumentParser() parser.add_argument("--bucket", required=True) parser.add_argument("--prefix", default="") parser.add_argument("--account", required=True) parser.add_argument("--user") parser.add_argument("--event") parser.add_argument("--source") parser.add_argument("--not-source") parser.add_argument("--day") parser.add_argument("--month") parser.add_argument("--tmpdir", default="/tmp/traildb") parser.add_argument("--region", default="us-east-1") parser.add_argument("--output", default="results.db") parser.add_argument( "--profile", default=os.environ.get('AWS_PROFILE'), help="AWS Account Config File Profile to utilize") parser.add_argument( "--assume", default=None, dest="assume_role", help="Role to assume") parser.add_argument('--field', action='append', help='additonal fields that can be added to each record', choices=['userIdentity', 'requestParameters', 'responseElements']) return parser
def DeleteImageHandlerUI(deploy_config): #Expected dict entries #deploy_config['UIBucket'] #deploy_config['UIPrefix'] log.info("Deleting Serverless Image Handler UI from %s/%s", deploy_config['UIBucket'], deploy_config['UIPrefix']) try: s3 = boto3.client("s3", config=Config(signature_version='s3v4')) log.info("Listing UI objects in %s/%s", deploy_config['UIBucket'], deploy_config['UIPrefix']) for s3object in s3.list_objects(Bucket=deploy_config['UIBucket'], Prefix=deploy_config['UIPrefix'])['Contents']: log.info("Deleting %s/%s", deploy_config['UIBucket'], s3object['Key']) s3.delete_object(Bucket=deploy_config['UIBucket'], Key=s3object['Key']) log.info("Deleting %s/%s", deploy_config['UIBucket'], deploy_config['UIPrefix']) s3.delete_object(Bucket=deploy_config['UIBucket'], Key=deploy_config['UIPrefix']) except Exception as e: log.error("Error deleting UI. Error: %s", e) raise
def test_sigv4_progress_callbacks_invoked_once(self): # Reset the client and manager to use sigv4 self.reset_stubber_with_new_client( {'config': Config(signature_version='s3v4')}) self.client.meta.events.register( 'before-parameter-build.s3.*', self.collect_body) self._manager = TransferManager(self.client, self.config) # Add the stubbed response. self.add_put_object_response_with_default_expected_params() subscriber = RecordingSubscriber() future = self.manager.upload( self.filename, self.bucket, self.key, subscribers=[subscriber]) future.result() self.assert_expected_client_calls_were_correct() # The amount of bytes seen should be the same as the file size self.assertEqual(subscriber.calculate_bytes_seen(), len(self.content))
def test_callback_called_once_with_sigv4(self): # Verify #98, where the callback was being invoked # twice when using signature version 4. self.amount_seen = 0 lock = threading.Lock() def progress_callback(amount): with lock: self.amount_seen += amount client = self.session.create_client( 's3', self.region, config=Config(signature_version='s3v4')) transfer = s3transfer.S3Transfer(client) filename = self.files.create_file_with_size( '10mb.txt', filesize=10 * 1024 * 1024) transfer.upload_file(filename, self.bucket_name, '10mb.txt', callback=progress_callback) self.addCleanup(self.delete_object, '10mb.txt') self.assertEqual(self.amount_seen, 10 * 1024 * 1024)
def __init__( self, region_name, aws_access_key_id, aws_secret_access_key ): """ :param region_name: AWS region name :param aws_access_key_id: AWS credentials :param aws_secret_access_key: AWS credentials """ super(LambdaClient, self).__init__( service='lambda', region_name=region_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, config=Config(read_timeout=300) )
def __init__(self, name, arn=None, worker=None, **kwargs): """ Args: name (String): Name of the Activity to monitor arn (String): Full ARN of Activity to monitor If not given, it is looked up If given, the actual ARN and Name are compared process (callable): Callable that transforms the task's input into an output that is then returned kwargs : Arguments to heaviside.utils.create_session """ self.name = name self.arn = arn self.worker = worker self.token = None self.session, self.account_id = create_session(**kwargs) self.client = self.session.client('stepfunctions', config=Config(read_timeout=70)) self.log = logging.getLogger(__name__) self.max_concurrent = 0 self.poll_delay = 1 if self.arn is None: self.arn = self.lookup_activity_arn(name) else: try: resp = self.client.describe_activity(activityArn = self.arn) if resp['name'] != name: raise Exception("Name of {} is not {}".format(self.arn, self.name)) except ClientError: raise Exception("ARN {} is not valid".format(self.arn))
def lambda_handler(event, context): s3resource = boto3.resource('s3', config=Config(signature_version='s3v4'), region_name=bucketregion) s3resource.meta.client.download_file(bucketname, key, '/tmp/OpenSGs.txt') AllOpenSGs=[] f = open('/tmp/OpenSGs.txt', 'r') AllOpenSGs = f.read() AllOpenSGs = AllOpenSGs.split('\n') #print("Existing SGS are: %s" % AllOpenSGs) response = ec2.describe_security_groups() for sg in response['SecurityGroups']: IPPermissions=sg['IpPermissions'] for ingress in IPPermissions: IpRanges=ingress['IpRanges'] for range in IpRanges: cidr=range['CidrIp'] if '0.0.0.0/0' in cidr: print(cidr) sgname=sg['GroupId'] AllOpenSGs.append(sgname) # Creates array of unique values to remove duplicate SGs AllUniqueSGs = list(set(AllOpenSGs)) # Convert the List to a String to avoid S3 errors StringOfSGs = '\n'.join(AllUniqueSGs) # Upload the txt file to S3 response = s3.put_object( Body=StringOfSGs, Bucket=bucketname, Key=key ) return 'File Has Been Uploaded To S3'
def handle_BucketVersioningConfiguration(self, resource, item_value): # Config defaults versioning to 'Off' for a null value if item_value['status'] not in ('Enabled', 'Suspended'): return resource['Versioning'] = {'Status': item_value['status']} if item_value['isMfaDeleteEnabled']: resource['Versioning']['MFADelete'] = item_value[ 'isMfaDeleteEnabled'].title()
def bucket_client(session, b, kms=False): region = get_region(b) if kms: # Need v4 signature for aws:kms crypto, else let the sdk decide # based on region support. config = Config( signature_version='s3v4', read_timeout=200, connect_timeout=120) else: config = Config(read_timeout=200, connect_timeout=120) return session.client('s3', region_name=region, config=config)
def __init__(self, identity_pool_id=None): # region name for the base account self.region_name = 'eu-west-1' self.identity_pool_id = identity_pool_id or const.AWS_IDENTITY_POOL_ID self.cognito_client = boto3.client('cognito-identity', region_name=self.region_name, config=Config(signature_version=UNSIGNED)) self.lambda_client_no_auth = self.create_aws_lambda_client()
def _run_main(self, parsed_args, parsed_globals): s3_client = self._session.create_client( "s3", config=Config(signature_version='s3v4'), region_name=parsed_globals.region, verify=parsed_globals.verify_ssl) template_path = parsed_args.template_file if not os.path.isfile(template_path): raise exceptions.InvalidTemplatePathError( template_path=template_path) bucket = parsed_args.s3_bucket self.s3_uploader = S3Uploader(s3_client, bucket, parsed_globals.region, parsed_args.s3_prefix, parsed_args.kms_key_id, parsed_args.force_upload) output_file = parsed_args.output_template_file use_json = parsed_args.use_json exported_str = self._export(template_path, use_json) sys.stdout.write("\n") self.write_output(output_file, exported_str) if output_file: msg = self.MSG_PACKAGED_TEMPLATE_WRITTEN.format( output_file_name=output_file, output_file_path=os.path.abspath(output_file)) sys.stdout.write(msg) sys.stdout.flush() return 0
def _update_default_client_config(session, arg_name, arg_value): current_default_config = session.get_default_client_config() new_default_config = Config(**{arg_name: arg_value}) if current_default_config is not None: new_default_config = current_default_config.merge(new_default_config) session.set_default_client_config(new_default_config)
def set_clients(self): client_config = None if self.parameters.get('sse') == 'aws:kms': client_config = Config(signature_version='s3v4') self._client = get_client( self.session, region=self.parameters['region'], endpoint_url=self.parameters['endpoint_url'], verify=self.parameters['verify_ssl'], config=client_config ) self._source_client = get_client( self.session, region=self.parameters['region'], endpoint_url=self.parameters['endpoint_url'], verify=self.parameters['verify_ssl'], config=client_config ) if self.parameters['source_region']: if self.parameters['paths_type'] == 's3s3': self._source_client = get_client( self.session, region=self.parameters['source_region'], endpoint_url=None, verify=self.parameters['verify_ssl'], config=client_config )
def assume_role(account_role, samlAssertion): conn = boto3.client('sts', config=client.Config(signature_version=botocore.UNSIGNED)) aws_session_token = conn.assume_role_with_saml( RoleArn=account_role.role_arn, PrincipalArn=account_role.principal_arn, SAMLAssertion=samlAssertion, DurationSeconds=3600, ) return aws_session_token
def boto3_agent_from_sts(agent_service, agent_type, region, credentials={}): session = boto3.session.Session() # Generate our kwargs to pass kw_args = { "region_name": region, "config": Config(signature_version='s3v4') } if credentials: kw_args["aws_access_key_id"] = credentials['accessKeyId'] kw_args["aws_secret_access_key"] = credentials['secretAccessKey'] kw_args["aws_session_token"] = credentials['sessionToken'] # Build our agent depending on how we're called. if agent_type == "client": return(session.client( agent_service, **kw_args )) if agent_type == "resource": return(session.resource( agent_service, **kw_args ))
def open_session(self, session_parameter): config = Config(connect_timeout=50, read_timeout=70) session = boto3.session.Session(**session_parameter) return session.client('swf', config=config)
def get_boto_resource(storage): return boto3.resource( 's3', aws_access_key_id=storage.access_key, aws_secret_access_key=storage.secret_key, region_name=storage.region, config=Config(signature_version='s3v4') )
def get_boto_ressource(self): return boto3.resource( 's3', aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key, region_name=self.region, config=Config(signature_version='s3v4') )
def get_signed_url(self, key): dir = '' if '/' in key: dir = key[:key.rfind('/') + 1] ext = key[key.rfind('.'):] s3_key = '' while not s3_key: temp_key = dir + random_id() + ext if not self.exists(temp_key): s3_key = temp_key condtions = [] if self.max_size: condtions.append(["content-length-range", 0, self.max_size]) params = { 'Bucket': self.bucket_name, 'Key': s3_key, 'Conditions': condtions } client = boto3.client( 's3', aws_access_key_id=self.access_key, aws_secret_access_key=self.secret_key, region_name=self.region, config=Config(signature_version='s3v4') ) return client.generate_presigned_post(**params)
def run(self): """Run command.""" self.run_command('ldist') ldist_cmd = self.get_finalized_command('ldist') dist_path = getattr(ldist_cmd, 'dist_path', None) dist_name = getattr(ldist_cmd, 'dist_name', None) if dist_path is None or dist_name is None: raise DistutilsArgError('\'ldist\' missing attributes') dist_name = getattr(self, 's3_prefix') + dist_name s3 = boto3.client( 's3', aws_access_key_id=getattr(self, 'access_key'), aws_secret_access_key=getattr(self, 'secret_access_key'), config=Config(signature_version='s3v4') ) log.info('uploading {} to {} using kms key {}'.format( dist_name, getattr(self, 's3_bucket'), getattr(self, 'kms_key_id') )) with open(dist_path, 'rb') as dist: if getattr(self, 'kms_key_id'): response = s3.put_object( Body=dist, Bucket=getattr(self, 's3_bucket'), Key=dist_name, ServerSideEncryption='aws:kms', SSEKMSKeyId=getattr(self, 'kms_key_id') ) else: response = s3.put_object( Body=dist, Bucket=getattr(self, 's3_bucket'), Key=dist_name, ServerSideEncryption='AES256' ) log.info('upload complete:\n{}'.format( json.dumps(response, sort_keys=True, indent=4, separators=(',', ': '))) )
def setup_s3_client(self): """Creates an authenticated s3 client. :return: S3 client instance. :rtype: botocore.client.BaseClient """ session = boto3.Session(aws_access_key_id=self.access_key_id, aws_secret_access_key=self.secret_access_key) s3_config = Config(connect_timeout=S3_CONNECT_TIMEOUT, read_timeout=S3_READ_TIMEOUT) client = session.client('s3', region_name=self.default_region, config=s3_config) return client
def _upload_object(self, file_obj, object_key): """Upload objects to S3 in streaming fashion. :param file file_obj: A file like object to upload. At a minimum, it must implement the read method, and must return bytes. :param str object_key: The destination key where to upload the object. :raise S3DestinationError: if failed to upload object. """ remote_name = "s3://{bucket}/{name}".format( bucket=self.bucket, name=object_key ) LOG.debug("Generating S3 transfer config") s3_transfer_config = self.get_transfer_config() LOG.debug("Starting to stream to %s", remote_name) try: self.s3_client.upload_fileobj(file_obj, self.bucket, object_key, Config=s3_transfer_config) LOG.debug("Successfully streamed to %s", remote_name) except ClientError as err: raise S3DestinationError(err) return self._validate_upload(object_key)
def __init__(self, config=None): self.config = DEFAULT_SETTINGS self.config.update(config or settings.DJAMAZING) self.cloud_front_base_url = self.config['CLOUDFRONT_URL'] self.bucket = boto3.resource( 's3', aws_access_key_id=self.config['S3_KEY_ID'], aws_secret_access_key=self.config['S3_SECRET_KEY'], config=Config(signature_version='s3v4') ).Bucket(self.config['S3_BUCKET']) self._init_protected_mode(self.config)
def get_s3(): url, key, secret = map(os.environ.get, ["S3_ENDPOINT_URL", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]) s3 = False if url and key and secret: with utils.temp_loglevel(): s3 = boto3.client('s3', endpoint_url=os.environ["S3_ENDPOINT_URL"], aws_access_key_id=os.environ["AWS_ACCESS_KEY_ID"], aws_secret_access_key=os.environ["AWS_SECRET_ACCESS_KEY"], config=Config(signature_version='s3v4'), region_name='us-east-1') return s3
def initialize(cls): """ This will be called the first time a toolbox of this type is created. This is meant for installing libraries if they are needed. For example, if you need boto3 for an Amazon Handler, you would call FloatingTools.installPackage('boto3', 'boto') here. This is also meant for any other set up such as getting login data. .. note:: This is only called once during the first call to create a toolbox if this type. .. code-block:: python :linenos: @classmethod def initialize(cls): # install the aws api lib through pip FloatingTools.installPackage('boto3', 'boto') import boto3 from botocore.client import Config # set log in data for AWS os.environ['AWS_ACCESS_KEY_ID'] = cls.userData()['access key'] os.environ['AWS_SECRET_ACCESS_KEY'] = cls.userData()['secret key'] cls.CONNECTION = boto3.resource('s3', config=Config(signature_version='s3v4')) """ pass
def initiator(globalBaseUrl): """take a url and set up s3 auth. Then call the driver""" global s3 # alternate way to authenticate in else. # use what you prefer if True: access_key = os.environ.get('AWS_ACCESS_KEY_ID') secret_key = os.environ.get('AWS_SECRET_ACCESS_KEY') if access_key is None or secret_key is None: print printWarning("""No access credentials available. Please export your AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY. Details: http://docs.aws.amazon.com/general/latest/gr/managing-aws-access-keys.html """) sys.exit(0) s3 = boto3.resource('s3', config=Config(signature_version='s3v4')) else: # If you prefer to supply the credentials here, # make sure you flip the if condition to False # and subsitiute the necessary data :) s3 = boto3.resource('s3', aws_access_key_id=ACCESS_ID, aws_secret_access_key=ACCESS_KEY, config=Config(signature_version='s3v4') ) print printScreen("[>]Initiating...", "blue") print printScreen("[>]Press Ctrl+C to terminate script", "blue") scanner(globalBaseUrl) driver(globalBaseUrl)
def pushConfig(ssh, config): #log.info("Starting to push config") #ssh.send('term len 0\n') #prompt(ssh) #CISCO --ssh.send('config t\n') log.info("Config received for push %s", config) ssh.send('edit\n') log.debug("%s", prompt(ssh)) stime = time.time() for line in config[0].split("\n"): if line == "WAIT": log.debug("Waiting 30 seconds...") time.sleep(30) else: ssh.send(line+'\n') log.info("%s", prompt(ssh)) log.info("Saving config!") ssh.send('save /var/log/AWS_config.txt\n\n\n\n\n') log.info("Saved config!") time.sleep(15) #log.info("%s", prompt(ssh)) log.info("Committing---") ssh.send('commit\n') time.sleep(30) ssh.send('exit\n') #log.info("%s", prompt(ssh)) log.debug(" --- %s seconds ---", (time.time() - stime)) ##ssh.send('copy run start\n\n\n\n\n') ssh.send('exit\n') #log.info("%s", prompt(ssh)) log.info("Update complete!") #Logic to determine the bucket prefix from the S3 key name that was provided
def getTransitConfig(bucket_name, bucket_prefix, s3_url, config_file): s3 = boto3.client('s3', endpoint_url=s3_url, config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4')) log.info("Downloading config file: %s/%s/%s%s", s3_url, bucket_name, bucket_prefix,config_file) return ast.literal_eval(s3.get_object(Bucket=bucket_name,Key=bucket_prefix+config_file)['Body'].read()) #Logic to upload a new/updated transit VPC configuration file to S3 (not currently used)
def putTransitConfig(bucket_name, bucket_prefix, s3_url, config_file, config): s3=boto3.client('s3', endpoint_url=s3_url, config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4')) log.info("Uploading new config file: %s/%s/%s%s", s3_url,bucket_name, bucket_prefix,config_file) s3.put_object(Bucket=bucket_name,Key=bucket_prefix+config_file,Body=str(config)) #Logic to download the SSH private key from S3 to be used for SSH public key authentication
def downloadPrivateKey(bucket_name, bucket_prefix, s3_url, prikey): if os.path.exists('/tmp/'+prikey): os.remove('/tmp/'+prikey) s3=boto3.client('s3', endpoint_url=s3_url, config=Config(s3={'addressing_style': 'virtual'}, signature_version='s3v4')) log.info("Downloading private key: %s/%s/%s%s",s3_url, bucket_name, bucket_prefix, prikey) s3.download_file(bucket_name,bucket_prefix+prikey, '/tmp/'+prikey) #Logic to create the appropriate Sysco configuration
def get_build_artifact_id(build_id): """Get artifact (build.json) from the build project . We are making this as an additional call to get the build.json which already contains the new built repository ECR path. We could have consolidated this script and executed in the build phase, but as codebuild accepts the input from one source only (scripts and application code are in different sources), thats why an additional call to retrieve build.json from a different build project. Args: build_id - Build ID for codebuild (build phase) Returns: build.json Raises: Exception: Any exception thrown by handler """ codebuild_client = boto3.client('codebuild') response = codebuild_client.batch_get_builds( ids=[ str(build_id), ] ) for build in response['builds']: s3_location = build['artifacts']['location'] bucketkey = s3_location.split(":")[5] bucket = bucketkey.split("/")[0] key = bucketkey[bucketkey.find("/") + 1:] s3_client = boto3.client('s3', config=Config(signature_version='s3v4')) s3_client.download_file(bucket, key, 'downloaded_object') zip_ref = zipfile.ZipFile('downloaded_object', 'r') zip_ref.extractall('downloaded_folder') zip_ref.close() with open('downloaded_folder/build.json') as data_file: objbuild = json.load(data_file) print(objbuild['tag']) return objbuild['tag']
def get_aws_client(client_type, config=None): if not config: config = Config(signature_version='s3v4') aws_access_key = settings.CONFIGURATION.lookup('aws:access_key') aws_secret_key = settings.CONFIGURATION.lookup('aws:secret_key') if aws_access_key and aws_secret_key: c = client(client_type, config=config, aws_access_key_id=aws_access_key, aws_secret_access_key=aws_secret_key) else: c = client(client_type, config=config) return c
def create_client(stage_info, use_accelerate_endpoint=False): """ Creates a client object with a stage credential :param stage_credentials: a stage credential :param use_accelerate_endpoint: is accelerate endpoint? :return: client """ logger = getLogger(__name__) stage_credentials = stage_info[u'creds'] security_token = stage_credentials.get(u'AWS_TOKEN', None) logger.debug(u"AWS_ID: %s", stage_credentials[u'AWS_ID']) config = Config( signature_version=u's3v4', s3={ 'use_accelerate_endpoint': use_accelerate_endpoint, }) client = boto3.resource( u's3', region_name=stage_info['region'], aws_access_key_id=stage_credentials[u'AWS_ID'], aws_secret_access_key=stage_credentials[u'AWS_KEY'], aws_session_token=security_token, config=config, ) return client
def getCert(self, payload): payload['kmsauth_token'] = self.kmsauth_token payload_json = json.dumps(payload) lambdabotoconfig = Config( connect_timeout=self.config['timeoutconfig']['connect'], read_timeout=self.config['timeoutconfig']['read'] ) try: mfa_lambda_client = boto3.client( 'lambda', region_name=self.region, aws_access_key_id=self.creds['AccessKeyId'], aws_secret_access_key=self.creds['SecretAccessKey'], aws_session_token=self.creds['SessionToken'], config=lambdabotoconfig ) response = mfa_lambda_client.invoke( FunctionName=self.config['functionname'], InvocationType='RequestResponse', LogType='Tail', Payload=payload_json, Qualifier=self.config['functionversion'] ) if response['StatusCode'] != 200: raise LambdaInvocationException('Error creating cert.') except ConnectTimeout: raise LambdaInvocationException('Timeout connecting to Lambda') except ReadTimeout: raise LambdaInvocationException('Timeout reading cert from Lambda') except SSLError: raise LambdaInvocationException('SSL error connecting to Lambda') except ValueError: # On a 404, boto tries to decode any body as json raise LambdaInvocationException('Invalid message format in Lambda response') payload = json.loads(response['Payload'].read()) if 'certificate' not in payload: raise LambdaInvocationException('No certificate in response.') return payload['certificate']
def __init__(self, endpoint_url, bucket_prefix): self.s3 = boto3.resource('s3', endpoint_url=endpoint_url, aws_access_key_id=os.environ['S3_KEY'], aws_secret_access_key=os.environ['S3_SECRET'], region_name='us-east-1', config=Config(signature_version='s3v4')) self.bucket_prefix = bucket_prefix
def connect(self, refresh=False): """ Establish S3 connection object. Parameters ---------- refresh : bool (True) Whether to use cached filelists, if already read """ anon, key, secret, kwargs, ckwargs, token, ssl = ( self.anon, self.key, self.secret, self.kwargs, self.client_kwargs, self.token, self.use_ssl) # Include the current PID in the connection key so that different # SSL connections are made for each process. tok = tokenize(anon, key, secret, kwargs, ckwargs, token, ssl, os.getpid()) if refresh: self._conn.pop(tok, None) if tok not in self._conn: logger.debug("Open S3 connection. Anonymous: %s", self.anon) if self.anon: from botocore import UNSIGNED conf = Config(connect_timeout=self.connect_timeout, read_timeout=self.read_timeout, signature_version=UNSIGNED, **self.config_kwargs) self.session = boto3.Session(**self.kwargs) else: conf = Config(connect_timeout=self.connect_timeout, read_timeout=self.read_timeout, **self.config_kwargs) self.session = boto3.Session(self.key, self.secret, self.token, **self.kwargs) s3 = self.session.client('s3', config=conf, use_ssl=ssl, **self.client_kwargs) self._conn[tok] = (s3, self.session) else: s3, session = self._conn[tok] self.session = session return s3
def get_s3_client(): return boto3.client( 's3', 'us-east-1', config=Config( s3={'addressing_style': 'path'} ) )