我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用boto3.Session()。
def rundeck_list_groups(): ''' Return the list of groups from all available profiles ''' resp_obj = {} awsconfig = aws_config.AwsConfig() profiles = awsconfig.get_profiles() for profile in profiles: session = boto3.Session(profile_name=profile) iamclient = session.client('iam') try: groupinfo = iamclient.list_groups() except botocore.exceptions.ClientError: groupinfo['Groups'] = [] for group in groupinfo['Groups']: grouptext = "(%s) %s" % (profile, group['GroupName']) resp_obj[grouptext] = group['GroupName'] return jsonify(resp_obj)
def rundeck_list_iam_policies(): ''' Return the list of profiles from all available profiles ''' resp_obj = {} awsconfig = aws_config.AwsConfig() profiles = awsconfig.get_profiles() for profile in profiles: session = boto3.Session(profile_name=profile) iamclient = session.client('iam') try: policyinfo = iamclient.list_policies() except botocore.exceptions.ClientError: policyinfo['Policies'] = [] for policy in policyinfo['Policies']: policytext = "(%s) %s" % (profile, policy['PolicyName']) resp_obj[policytext] = policy['PolicyName'] return jsonify(resp_obj)
def find_existing_record(env, zone_id, dns_name, check_key=None, check_value=None): """Check if a specific DNS record exists. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. dns_name (str): FQDN of application's dns entry to add/update. check_key(str): Key to look for in record. Example: "Type" check_value(str): Value to look for with check_key. Example: "CNAME" Returns: json: Found Record. Returns None if no record found """ client = boto3.Session(profile_name=env).client('route53') pager = client.get_paginator('list_resource_record_sets') existingrecord = None for rset in pager.paginate(HostedZoneId=zone_id): for record in rset['ResourceRecordSets']: if check_key: if record['Name'].rstrip('.') == dns_name and record.get(check_key) == check_value: LOG.info("Found existing record: %s", record) existingrecord = record break return existingrecord
def delete_existing_cname(env, zone_id, dns_name): """Delete an existing CNAME record. This is used when updating to multi-region for deleting old records. The record can not just be upserted since it changes types. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. dns_name (str): FQDN of application's dns entry to add/update. """ client = boto3.Session(profile_name=env).client('route53') startrecord = None newrecord_name = dns_name startrecord = find_existing_record(env, zone_id, newrecord_name, check_key='Type', check_value='CNAME') if startrecord: LOG.info("Deleting old record: %s", newrecord_name) _response = client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch={'Changes': [{ 'Action': 'DELETE', 'ResourceRecordSet': startrecord }]}) LOG.debug('Response from deleting %s: %s', dns_name, _response)
def find_elb_dns_zone_id(name='', env='dev', region='us-east-1'): """Get an application's AWS elb dns zone id. Args: name (str): ELB name env (str): Environment/account of ELB region (str): AWS Region Returns: str: elb DNS zone ID """ LOG.info('Find %s ELB DNS Zone ID in %s [%s].', name, env, region) client = boto3.Session(profile_name=env).client('elb', region_name=region) elbs = client.describe_load_balancers(LoadBalancerNames=[name]) return elbs['LoadBalancerDescriptions'][0]['CanonicalHostedZoneNameID']
def get_sns_subscriptions(app_name, env, region): """List SNS lambda subscriptions. Returns: list: List of Lambda subscribed SNS ARNs. """ session = boto3.Session(profile_name=env, region_name=region) sns_client = session.client('sns') lambda_alias_arn = get_lambda_alias_arn(app=app_name, account=env, region=region) lambda_subscriptions = [] subscriptions = sns_client.list_subscriptions() for subscription in subscriptions['Subscriptions']: if subscription['Protocol'] == "lambda" and subscription['Endpoint'] == lambda_alias_arn: lambda_subscriptions.append(subscription['SubscriptionArn']) if not lambda_subscriptions: LOG.debug('SNS subscription for function %s not found', lambda_alias_arn) return lambda_subscriptions
def __init__(self, app='', env='', region='', rules={}, prop_path=''): self.log = logging.getLogger(__name__) self.generated = get_details(app=app, env=env) self.trigger_settings = rules self.app_name = self.generated.app_name() self.env = env self.account_id = get_env_credential(env=self.env)['accountId'] self.region = region self.properties = get_properties(properties_file=prop_path, env=self.env) session = boto3.Session(profile_name=env, region_name=region) self.client = session.client('apigateway') self.lambda_client = session.client('lambda') self.api_version = self.lambda_client.meta.service_model.api_version self.api_id = self.find_api_id() self.resource_id, self.parent_id = self.find_resource_ids()
def destroy_cloudwatch_event(app='', env='dev', region=''): """Destroy Cloudwatch event subscription. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: bool: True upon successful completion. """ session = boto3.Session(profile_name=env, region_name=region) cloudwatch_client = session.client('events') event_rules = get_cloudwatch_event_rule(app_name=app, account=env, region=region) for rule in event_rules: cloudwatch_client.remove_targets(Rule=rule, Ids=[app]) return True
def destroy_cloudwatch_log_event(app='', env='dev', region=''): """Destroy Cloudwatch log event. Args: app (str): Spinnaker Application name. env (str): Deployment environment. region (str): AWS region. Returns: bool: True upon successful completion. """ session = boto3.Session(profile_name=env, region_name=region) cloudwatch_client = session.client('logs') # FIXME: see below # TODO: Log group name is required, where do we get it if it is not in application-master-env.json? cloudwatch_client.delete_subscription_filter(logGroupName='/aws/lambda/awslimitchecker', filterName=app) return True
def __init__(self, app=None, env=None, region='us-east-1', prop_path=None): """AWS Data Pipeline object. Args: app (str): Application name env (str): Environment/Account region (str): AWS Region prop_path (str): Path of environment property file """ self.app_name = app self.env = env self.region = region self.properties = get_properties(prop_path) self.datapipeline_data = self.properties[self.env]['datapipeline'] generated = get_details(app=self.app_name) self.group = generated.data['project'] session = boto3.Session(profile_name=self.env, region_name=self.region) self.client = session.client('datapipeline') self.pipeline_id = None
def handler(event, context): log.debug("Received event {}".format(json.dumps(event))) stage = event['ResourceProperties']['Stage'] topicNamePrefix = event['ResourceProperties']['TopicNamePrefix'] topicName = topicNamePrefix + '-' + stage requestType = event['RequestType'] # Initialize a Session object in order to look up Config regions boto3Session = boto3.Session() # All Config regions snsRegions = boto3Session.get_available_regions( service_name='sns', partition_name='aws', ) if "Create" in requestType: create_topics(snsRegions, topicName, context, event, stage) elif "Update" in requestType: create_topics(snsRegions, topicName, context, event, stage) elif "Delete" in requestType: delete_topics(snsRegions, topicName, context, event)
def main(region, profile='default'): session = boto3.Session( profile_name=profile, region_name=region ) apex_json = json.load(open('project.json')) raml = ramlfications.parse('api_schema.raml') gateway = ApiGateway(raml, apex_json, session) print 'Creating Api Gateway' gateway.create() gateway.load() print 'Creating Authorizers' gateway.create_authorizers() print 'Creating Resources' gateway.create_resources() print 'Deploying Stage' print gateway.create_deployment()
def get_session(profile_config): session_profile = profile_config['profile_name'] if 'source_profile' in profile_config: session_profile = profile_config['source_profile'] if 'region' in profile_config: os.putenv('AWS_DEFAULT_REGION', profile_config['region']) os.putenv('AWS_REGION', profile_config['region']) # Create a session using profile or EC2 Instance Role # To use Instance Role set `source_profile` to empty string in aws profile # configuration file session = boto3.Session(profile_name=session_profile) return session
def __init__(self, queue_url, worker, session=None, use_short_polling=False, polling_timeout=10, polling_count=10): self.use_short_polling = use_short_polling self.polling_timeout = polling_timeout self.polling_count = polling_count if not session: region_name = self.parse_region_name(queue_url) session = Session(region_name=region_name) self.session = session self.sqs = self.session.resource('sqs') self.queue = self.sqs.Queue(url=queue_url) self.logger = get_logger(__name__) self.should_stop = Event() self.poller_thread = Thread(group=None, target=self._poll_messages) self.worker = worker
def test_botocore_instrumentation(mock_make_request, elasticapm_client): mock_response = mock.Mock() mock_response.status_code = 200 mock_make_request.return_value = (mock_response, {}) elasticapm_client.begin_transaction("transaction.test") with capture_span("test_pipeline", "test"): session = boto3.Session(aws_access_key_id='foo', aws_secret_access_key='bar', region_name='us-west-2') ec2 = session.client('ec2') ec2.describe_instances() elasticapm_client.end_transaction("MyView") transactions = elasticapm_client.instrumentation_store.get_all() spans = transactions[0]['spans'] assert 'ec2:DescribeInstances' in map(lambda x: x['name'], spans)
def validate_regions(self, regions, action_name, ): action_properties = actions.get_action_properties(action_name) service_name = action_properties[actions.ACTION_SERVICE] if self.service_is_regional(service_name) and action_properties.get(actions.ACTION_MULTI_REGION, True): if regions is None or len(regions) == 0: return [boto3.Session().region_name] else: available_regions = self.service_regions(service_name) if len(regions) == 1 and list(regions)[0] == "*": return available_regions for region in regions: if region not in available_regions: raise ValueError(MSG_BAD_REGION.format(region, service_name, ",".join(available_regions))) return list(regions) else: if regions is not None and len(regions) != 0: msg = WARN_NOT_REGIONAL_SERVICE.format(",".join(regions), service_name, action_name) self._warn(msg) return []
def get_session(role_arn=None, sts_client=None): """ Created a session for the specified role :param role_arn: Role arn :param sts_client: Optional sts client, if not specified a (cache) sts client instance is used :return: Session for the specified role """ if role_arn is not None: sts = sts_client if sts_client is not None else boto3.client("sts") account = AwsService.account_from_role_arn(role_arn) token = sts.assume_role(RoleArn=role_arn, RoleSessionName="{}-{}".format(account, str(uuid.uuid4()))) credentials = token["Credentials"] return boto3.Session(aws_access_key_id=credentials["AccessKeyId"], aws_secret_access_key=credentials["SecretAccessKey"], aws_session_token=credentials["SessionToken"]) else: return boto3.Session()
def describe(self, as_tuple=None, **kwargs): """ This method is to retrieve a pseudo UTC time resource, method parameters are only used signature compatibility :param as_tuple: Set to true to return results as immutable named dictionaries instead of dictionaries :return: Pseudo time resource """ def use_tuple(): return (as_tuple is not None and as_tuple) or (as_tuple is None and self._as_tuple) region = kwargs.get("region") result = { "Time": datetime.datetime.now(pytz.timezone("UTC")), "AwsAccount": self.aws_account, "Region": region if region else boto3.Session().region_name } return [as_namedtuple("Time", result)] if use_tuple() else [result]
def get_session(config): if config.profile_name not in _session_cache: print('Creating new Boto3 Session for profile {0}'.format(config.profile_name)) _session_cache[config.profile_name] = boto3.Session(profile_name=config.profile_name) session = _session_cache[config.profile_name] if config.role_arn: if config.role_arn not in _session_cache: sts_client = session.client('sts') role_session_name = '{0}.session-{1}'.format(__name__, time.time()) print('Assuming role {0}'.format(config.role_arn)) assumed_role = sts_client.assume_role(RoleArn=config.role_arn, ExternalId=config.external_id, RoleSessionName=role_session_name) _session_cache[config.role_arn] = boto3.Session(aws_access_key_id=assumed_role['Credentials']['AccessKeyId'], aws_secret_access_key=assumed_role['Credentials']['SecretAccessKey'], aws_session_token=assumed_role['Credentials']['SessionToken']) session = _session_cache[config.role_arn] return session
def test_execute_plan_to_create_user_with_downloaded_yaml(): """ Create a new user from downloaded YAML file """ delete_test_user_and_group() session = Session() s3_client = session.client('s3') test_user_1 = open(os.path.join(os.path.dirname(__file__), 'test_user_1.yml')).read() s3_client.create_bucket(Bucket='test') s3_client.put_object(Bucket='test', Key='test.yml',Body=test_user_1) response = s3_client.get_object(Bucket='test', Key='test.yml') contents = response['Body'].read() yaml_content = yaml.load(contents) current_users = Users.from_passwd() provided_users = Users.from_dict(yaml_content) plan = create_plan(existing_users=current_users, proposed_users=provided_users, manage_home=False, protected_users=['travis', 'couchdb', 'ubuntu', 'nginx', 'hadfielj', 'vagrant', CURRENT_USER]) execute_plan(plan=plan) updated_users = Users.from_passwd() updated_user = updated_users.describe_users(users_filter=dict(name='dummy')) assert len(updated_user) == 1 assert updated_user[0].name == 'dummy' assert updated_user[0].gecos == '\"dummy (user) test\"' assert not updated_user[0].public_keys assert updated_user[0].sudoers_entry == 'ALL=(ALL:ALL) ALL' delete_test_user_and_group()
def __init__(self, options=None): self.options = options if options else {} self.debug = options.get('--debug', False) self.profile = options.get('--profile', None) self.region = options.get('--region', None) self.table_cache_ttl = int(options.get('--table-cache-ttl', 300)) self.last_refresh_time = defaultdict(int) self.boto3_session = boto3.Session(profile_name=self.profile) # dash (-) is not allowed in database name so we use underscore (_) instead in region name # throughout this module region name will *always* use underscore if self.region: self.default_region = self.region.replace('-', '_') elif self.boto3_session.region_name: self.default_region = self.boto3_session.region_name.replace('-', '_') else: self.default_region = DEFAULT_REGION self.boto3_session = boto3.Session(profile_name=self.profile, region_name=self.default_region.replace('_', '-')) self.db = self.init_db() # attach the default region too self.attach_region(self.default_region)
def records(account_id): """Fetch locks data """ s = boto3.Session() table = s.resource('dynamodb').Table('Sphere11.Dev.ResourceLocks') results = table.scan() for r in results['Items']: if 'LockDate' in r: r['LockDate'] = datetime.fromtimestamp(r['LockDate']) if 'RevisionDate' in r: r['RevisionDate'] = datetime.fromtimestamp(r['RevisionDate']) print(tabulate.tabulate( results['Items'], headers="keys", tablefmt='fancy_grid'))
def config_status(): """ Check config status in an account. """ s = boto3.Session() client = s.client('config') channels = client.describe_delivery_channel_status()[ 'DeliveryChannelsStatus'] for c in channels: print(yaml.safe_dump({ c['name']: dict( snapshot=str( c['configSnapshotDeliveryInfo'].get('lastSuccessfulTime')), history=str( c['configHistoryDeliveryInfo'].get('lastSuccessfulTime')), stream=str( c['configStreamDeliveryInfo'].get('lastStatusChangeTime')) ), }, default_flow_style=False))
def __init__(self, msg_file, config, msg_plain=False, json_dump_file=None): if not os.path.exists(msg_file): raise RuntimeError("File does not exist: %s" % msg_file) logger.debug('Reading message from: %s', msg_file) with open(msg_file, 'r') as fh: raw = fh.read() logger.debug('Read %d byte message', len(raw)) if msg_plain: raw = raw.strip() else: logger.debug('base64-decoding and zlib decompressing message') raw = zlib.decompress(base64.b64decode(raw)) if json_dump_file is not None: with open(json_dump_file, 'w') as fh: fh.write(raw) self.data = json.loads(raw) logger.debug('Loaded message JSON') self.config = config self.session = boto3.Session()
def ls(args): """ List S3 buckets. See also "aws s3 ls". Use "aws s3 ls NAME" to list bucket contents. """ table = [] for bucket in filter_collection(resources.s3.buckets, args): bucket.LocationConstraint = clients.s3.get_bucket_location(Bucket=bucket.name)["LocationConstraint"] cloudwatch = resources.cloudwatch bucket_region = bucket.LocationConstraint or "us-east-1" if bucket_region != cloudwatch.meta.client.meta.region_name: cloudwatch = boto3.Session(region_name=bucket_region).resource("cloudwatch") data = get_cloudwatch_metric_stats("AWS/S3", "NumberOfObjects", start_time=datetime.utcnow()-timedelta(days=2), end_time=datetime.utcnow(), period=3600, BucketName=bucket.name, StorageType="AllStorageTypes", resource=cloudwatch) bucket.NumberOfObjects = int(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None data = get_cloudwatch_metric_stats("AWS/S3", "BucketSizeBytes", start_time=datetime.utcnow()-timedelta(days=2), end_time=datetime.utcnow(), period=3600, BucketName=bucket.name, StorageType="StandardStorage", resource=cloudwatch) bucket.BucketSizeBytes = format_number(data["Datapoints"][-1]["Average"]) if data["Datapoints"] else None table.append(bucket) page_output(tabulate(table, args))
def audit_2_3(self): """2.3 Ensure the S3 bucket CloudTrail logs to is not publicly accessible (Scored)""" raise NotImplementedError() import boto3 s3 = boto3.session.Session(region_name="us-east-1").resource("s3") # s3 = boto3.resource("s3") # for trail in self.trails: # for grant in s3.Bucket(trail["S3BucketName"]).Acl().grants: # print(s3.Bucket(trail["S3BucketName"]).Policy().policy) for bucket in s3.buckets.all(): print(bucket) try: print(" Policy:", bucket.Policy().policy) except: pass for grant in bucket.Acl().grants: try: print(" Grant:", grant) except: pass
def __init__(self): """ Initialize the data base Returns: """ self.__local_dynamo = os.environ.get('USING_DJANGO_TESTRUNNER') is not None if not self.__local_dynamo: session = get_session() dynamodb = session.resource('dynamodb') if 'test' in sys.argv: # TODO: This needs to be made more robust. Parameters should be mocked, not assumed. tablename = 'intTest.' + config["aws"]["meta-db"] else: tablename = config["aws"]["meta-db"] else: tablename = config["aws"]["meta-db"] session = boto3.Session(aws_access_key_id='foo', aws_secret_access_key='foo') dynamodb = session.resource('dynamodb', region_name='us-east-1', endpoint_url='http://localhost:8000') self.table = dynamodb.Table(tablename)
def github_webhook_to_sqs(event, context): """ When called, dumps the content of event.body into an sqs queue """ if "body" not in event: return { "message" : "Hook was called with no body" } try: sqs = boto3.Session( aws_access_key_id=AWS_USER, aws_secret_access_key=AWS_KEY, region_name='eu-west-1' ).resource('sqs') queue = sqs.get_queue_by_name(QueueName=QUEUE_NAME) queue.send_message(MessageBody=event["body"]) return { "message" : "Message posted to %s" % QUEUE_NAME } except: return { "message" : "Message posting failed" }
def create_boto_sts_client(profileName=None, secretAccessKey=None, accessKeyId=None, sessionToken=None): """ profileName - the name of the profile to create the client with; secretAccessKey - secret access key that can be passed into the session; accessKeyId - access key id that can be passed into the session; sessionToken - session token that can be passed into the session; return a boto3 session client """ log.info('Creating a Boto3 STS client') log.debug('profile_name=' + str(profileName)) log.debug('aws_access_key_id=' + str(accessKeyId)) log.debug('aws_secret_access_key=' + str(secretAccessKey)) log.debug('aws_session_token=' + str(sessionToken)) #establish the boto session with given credentials botoSession = boto3.Session(profile_name=profileName, aws_access_key_id=accessKeyId, aws_secret_access_key=secretAccessKey, aws_session_token=sessionToken) #create an sts client, always defaulted to us-east-1 return botoSession.client('sts', region_name='us-east-1')
def is_valid_awsume_session(awsumeSession): """ awsumeSession - the session to validate; return whether or not the session is valid; if credentials are expired, or don't exist, they're invalid; else they are valid """ log.info('Checking if the session is valid') #check if the session has an expiration if awsumeSession.get('Expiration'): #check if the session is expired if awsumeSession.get('Expiration') > datetime.datetime.now().replace(): return True log.debug('Session is expired') return False return False
def __init__(self, session, key_prefix, table_name, aws_access_key_id=None, aws_secret_access_key=None, region=None, endpoint_url=None, use_signer=False, permanent=True): if session is None: import boto3 session = boto3.Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=region) self.client = session.client('dynamodb', endpoint_url=endpoint_url) self.key_prefix = key_prefix self.use_signer = use_signer self.permanent = permanent if table_name not in self.client.list_tables().get(u'TableNames'): raise RuntimeError("The table {0!s} does not exist in DynamoDB for the requested region of {1!s}. Please " "ensure that the table has a PrimaryKey of \"SessionID\"".format( table_name, session.region_name )) self.table_name = table_name
def main(): args = parse_args() set_logging_level(args.quiet, args.verbose) log.debug('Args: %r', args) session = boto3.Session(profile_name=args.profile) exported_keys = [ 'AccessKeyId', 'SecretAccessKey', 'SessionToken', 'Expiration', ] sts = session.client('sts') assumed_role = sts.assume_role( RoleArn = args.role, RoleSessionName = args.session_name, ) credentials = assumed_role.get('Credentials', {}) credentials['Expiration'] = credentials['Expiration'].isoformat() out = sys.stdout if args.output is None else open(args.output, 'w') for k in exported_keys: out.write('{0}={1}\n'.format(k, credentials.get(k)))
def _get_s3_files(file_paths): ACCESS_KEY = os.getenv("PYFIDDLE_S3_KEY") SECRET_KEY = os.getenv("PYFIDDLE_S3_SECRET") session_get_files = boto3.Session( aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY, region_name=os.getenv("PYFIDDLE_S3_REGION"), ) s3 = session_get_files.resource('s3') file_dir = os.getenv("PYFIDDLE_WRITE_DIR") for file_path in file_paths: s3.Bucket( os.getenv("PYFIDDLE_S3_BUCKET")).download_file( str(file_path.script.id)+"/"+file_path.name, file_dir+file_path.name )
def check_aws_credentials(): session = boto3.Session() credentials = None try: credentials = session.get_credentials() except Exception: pass if not credentials: # set temporary dummy credentials os.environ['AWS_ACCESS_KEY_ID'] = 'LocalStackDummyAccessKey' os.environ['AWS_SECRET_ACCESS_KEY'] = 'LocalStackDummySecretKey' session = boto3.Session() credentials = session.get_credentials() assert credentials # ----------------------------- # INFRASTRUCTURE HEALTH CHECKS # -----------------------------
def rundeck_list_resources(): ''' Return a list of S3 and EC2 Resources from all available profiles ''' resp_obj = {} awsconfig = aws_config.AwsConfig() profiles = awsconfig.get_profiles() # Populate s3 buckets. for profile in profiles: session = boto3.Session(profile_name=profile) s3client = session.client('s3') try: s3info = s3client.list_buckets() except botocore.exceptions.ClientError: s3info['Buckets'] = [] for bucket in s3info['Buckets']: bucket_text = "s3: (%s) %s" % (profile, bucket['Name']) resp_obj[bucket_text] = bucket['Name'] # Populate ec2 instances. for profile in profiles: session = boto3.Session(profile_name=profile) ec2client = session.client('ec2', region_name="us-east-1") try: ec2info = ec2client.describe_instances() except botocore.exceptions.ClientError: ec2info['Instances'] = [] for reservation in ec2info['Reservations']: for instance in reservation['Instances']: instance_text = "ec2: (%s) %s" % \ (profile, instance['InstanceId']) resp_obj[instance_text] = instance['InstanceId'] return jsonify(resp_obj)
def list_groups(profile): ''' Return all the groups. ''' resp_obj = {} resp_obj['status'] = 'OK' awsconfig = aws_config.AwsConfig() profiles = awsconfig.get_profiles() profile_valid = False for configuredprofile in profiles: if profile == configuredprofile: profile_valid = True if not profile_valid: resp_obj['status'] = 'FAIL' return jsonify(resp_obj) session = boto3.Session(profile_name=profile) iamclient = session.client('iam') try: groupinfo = iamclient.list_groups() except botocore.exceptions.ClientError: groupinfo['Groups'] = [] groups = [] for group in groupinfo['Groups']: groups.append(group['GroupName']) resp_obj['groups'] = groups return jsonify(resp_obj)
def __init__(self, profile_names=None, access_key_id=None, secret_access_key=None, iam_role_discover=False): ''' Create a EC2 service client to one ore more environments by name. ''' service = 'ec2' orca_config = OrcaConfig() self.regions = orca_config.get_regions() self.clients = {} if profile_names is not None: for profile_name in profile_names: session = boto3.Session(profile_name=profile_name) self.clients[profile_name] = session.client(service) elif access_key_id is not None and secret_access_key is not None: self.clients['default'] = boto3.client( service, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) else: if iam_role_discover: session = boto3.Session() self.clients['default'] = {} for region in self.regions: self.clients['default'][region] = \ session.client(service, region_name=region) else: self.awsconfig = AwsConfig() profiles = self.awsconfig.get_profiles() for profile in profiles: session = boto3.Session(profile_name=profile) self.clients[profile] = {} for region in self.regions: self.clients[profile][region] = \ session.client(service, region_name=region)
def __init__(self, profile_names=None, access_key_id=None, secret_access_key=None, iam_role_discover=False): ''' Create a ELB service client to one ore more environments by name. ''' service = 'elb' orca_config = OrcaConfig() self.regions = orca_config.get_regions() self.clients = {} if profile_names is not None: for profile_name in profile_names: session = boto3.Session(profile_name=profile_name) self.clients[profile_name] = session.client(service) elif access_key_id is not None and secret_access_key is not None: self.clients['default'] = boto3.client( service, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) else: if iam_role_discover: session = boto3.Session() self.clients['default'] = {} for region in self.regions: self.clients['default'][region] = \ session.client(service, region_name=region) else: self.awsconfig = AwsConfig() profiles = self.awsconfig.get_profiles() for profile in profiles: session = boto3.Session(profile_name=profile) self.clients[profile] = {} for region in self.regions: self.clients[profile][region] = \ session.client(service, region_name=region)
def __init__(self, profile_names=None, access_key_id=None, secret_access_key=None, iam_role_discover=False): ''' Create a S3 service client to one ore more environments by name. ''' service = 's3' self.clients = {} if profile_names is not None: for profile_name in profile_names: session = boto3.Session(profile_name=profile_name) self.clients[profile_name] = session.client(service) elif access_key_id is not None and secret_access_key is not None: self.clients['default'] = boto3.client( service, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) else: if iam_role_discover: session = boto3.Session() self.clients['default'] = session.client(service) else: awsconfig = AwsConfig() profiles = awsconfig.get_profiles() for profile in profiles: session = boto3.Session(profile_name=profile) self.clients[profile] = session.client(service)
def __init__(self, profile_names=None, access_key_id=None, secret_access_key=None, iam_role_discover=False): """ Create a Autoscaling service client to one ore more environments by name. """ service = 'application-autoscaling' orca_config = OrcaConfig() self.regions = orca_config.get_regions() self.clients = {} if profile_names is not None: for profile_name in profile_names: session = boto3.Session(profile_name=profile_name) self.clients[profile_name] = session.client(service) elif access_key_id is not None and secret_access_key is not None: self.clients['default'] = boto3.client( service, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) else: if iam_role_discover: session = boto3.Session() self.clients['default'] = {} for region in self.regions: self.clients['default'][region] = \ session.client(service, region_name=region) else: awsconfig = AwsConfig() profiles = awsconfig.get_profiles() for profile in profiles: session = boto3.Session(profile_name=profile) self.clients[profile] = {} for region in self.regions: self.clients[profile][region] = \ session.client(service, region_name=region)
def __init__(self, profile_names=None, access_key_id=None, secret_access_key=None, iam_role_discover=False): ''' Create a cloudwatch service client to one ore more environments by name. ''' service = 'cloudwatch' orca_config = OrcaConfig() self.regions = orca_config.get_regions() self.clients = {} if profile_names is not None: for profile_name in profile_names: session = boto3.Session(profile_name=profile_name) self.clients[profile_name] = session.client(service) elif access_key_id is not None and secret_access_key is not None: self.clients['default'] = boto3.client( service, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) else: if iam_role_discover: session = boto3.Session() self.clients['default'] = {} for region in self.regions: self.clients['default'][region] = \ session.client(service, region_name=region) else: awsconfig = AwsConfig() profiles = awsconfig.get_profiles() for profile in profiles: session = boto3.Session(profile_name=profile) self.clients[profile] = {} for region in self.regions: self.clients[profile][region] = \ session.client(service, region_name=region)
def __init__(self, profile_names=None, access_key_id=None, secret_access_key=None, iam_role_discover=False): ''' Create a iam service client to one ore more environments by name. ''' service = 'iam' self.clients = {} if profile_names is not None: for profile_name in profile_names: session = boto3.Session(profile_name=profile_name) self.clients[profile_name] = session.client(service) elif access_key_id is not None and secret_access_key is not None: self.clients['default'] = boto3.client( service, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key) else: if iam_role_discover: session = boto3.Session() self.client['default'] = session.client(service) else: awsconfig = AwsConfig() profiles = awsconfig.get_profiles() for profile in profiles: session = boto3.Session(profile_name=profile) self.clients[profile] = session.client(service)
def check_sts_token(self, profile): """ Verifies that STS credentials are valid """ # Don't check for creds if profile is blank if not profile: return False parser = RawConfigParser() parser.read(self.creds_file) if not os.path.exists(self.creds_dir): if self.verbose: print("AWS credentials path does not exit. Not checking.") return False elif not os.path.isfile(self.creds_file): if self.verbose: print("AWS credentials file does not exist. Not checking.") return False elif not parser.has_section(profile): if self.verbose: print("No existing credentials found. Requesting new credentials.") return False session = boto3.Session(profile_name=profile) sts = session.client('sts') try: sts.get_caller_identity() except ClientError as ex: if ex.response['Error']['Code'] == 'ExpiredToken': print("Temporary credentials have expired. Requesting new credentials.") return False if self.verbose: print("STS credentials are valid. Nothing to do.") return True
def destroy_dns(app='', env='dev', **_): """Destroy DNS records. Args: app (str): Spinnaker Application name. env (str): Deployment environment. regions (str): AWS region. Returns: bool: True upon successful completion. """ client = boto3.Session(profile_name=env).client('route53') generated = get_details(app=app, env=env) record = generated.dns_elb() zone_ids = get_dns_zone_ids(env=env, facing='external') for zone_id in zone_ids: record_sets = client.list_resource_record_sets( HostedZoneId=zone_id, StartRecordName=record, StartRecordType='CNAME', MaxItems='1') for found_record in record_sets['ResourceRecordSets']: assert destroy_record(client=client, found_record=found_record, record=record, zone_id=zone_id) return True
def destroy_s3(app='', env='dev', **_): """Destroy S3 Resources for _app_ in _env_. Args: app (str): Application name env (str): Deployment environment/account name Returns: boolean: True if destroyed sucessfully """ session = boto3.Session(profile_name=env) client = session.resource('s3') generated = get_details(app=app, env=env) archaius = generated.archaius() bucket = client.Bucket(archaius['bucket']) for item in bucket.objects.filter(Prefix=archaius['path']): item.Object().delete() LOG.info('Deleted: %s/%s', item.bucket_name, item.key) return True
def get_lambda_arn(app, account, region): """Get lambda ARN. Args: account (str): AWS account name. region (str): Region name, e.g. us-east-1 app (str): Lambda function name Returns: str: ARN for requested lambda function """ session = boto3.Session(profile_name=account, region_name=region) lambda_client = session.client('lambda') lambda_arn = None paginator = lambda_client.get_paginator('list_functions') for lambda_functions in paginator.paginate(): for lambda_function in lambda_functions['Functions']: if lambda_function['FunctionName'] == app: lambda_arn = lambda_function['FunctionArn'] LOG.debug("Lambda ARN for lambda function %s is %s.", app, lambda_arn) break if lambda_arn: break if not lambda_arn: LOG.fatal('Lambda function with name %s not found in %s %s', app, account, region) raise LambdaFunctionDoesNotExist( 'Lambda function with name {0} not found in {1} {2}'.format(app, account, region)) return lambda_arn
def get_lambda_alias_arn(app, account, region): """Get lambda alias ARN. Assumes that account name is equal to alias name. Args: account (str): AWS account name. region (str): Region name, e.g. us-east-1 app (str): Lambda function name Returns: str: ARN for requested lambda alias """ session = boto3.Session(profile_name=account, region_name=region) lambda_client = session.client('lambda') lambda_aliases = lambda_client.list_aliases(FunctionName=app) matched_alias = None for alias in lambda_aliases['Aliases']: if alias['Name'] == account: lambda_alias_arn = alias['AliasArn'] LOG.info('Found ARN for alias %s for function %s', account, app) matched_alias = lambda_alias_arn break else: fatal_message = 'Lambda alias {0} of function {1} not found'.format(account, app) LOG.fatal(fatal_message) raise LambdaAliasDoesNotExist(fatal_message) return matched_alias
def get_dns_zone_ids(env='dev', facing='internal'): """Get Route 53 Hosted Zone IDs for _env_. Args: env (str): Deployment environment. facing (str): Type of ELB, external or internal. Returns: list: Hosted Zone IDs for _env_. Only *PrivateZone* when _facing_ is internal. """ client = boto3.Session(profile_name=env).client('route53') zones = client.list_hosted_zones_by_name(DNSName='.'.join([env, DOMAIN])) zone_ids = [] for zone in zones['HostedZones']: LOG.debug('Found Hosted Zone: %s', zone) if facing == 'external' or zone['Config']['PrivateZone']: LOG.info('Using %(Id)s for "%(Name)s", %(Config)s', zone) zone_ids.append(zone['Id']) LOG.debug('Zone IDs: %s', zone_ids) return zone_ids
def update_dns_zone_record(env, zone_id, **kwargs): """Create a Route53 CNAME record in _env_ zone. Args: env (str): Deployment environment. zone_id (str): Route53 zone id. Keyword Args: dns_name (str): FQDN of application's dns entry to add/update. dns_name_aws (str): FQDN of AWS resource dns_ttl (int): DNS time-to-live (ttl) """ client = boto3.Session(profile_name=env).client('route53') response = {} hosted_zone_info = client.get_hosted_zone(Id=zone_id) zone_name = hosted_zone_info['HostedZone']['Name'].rstrip('.') dns_name = kwargs.get('dns_name') if dns_name and dns_name.endswith(zone_name): dns_name_aws = kwargs.get('dns_name_aws') # This is what will be added to DNS dns_json = get_template(template_file='infrastructure/dns_upsert.json.j2', **kwargs) LOG.info('Attempting to create DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) try: response = client.change_resource_record_sets( HostedZoneId=zone_id, ChangeBatch=json.loads(dns_json), ) LOG.info('Upserted DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) except botocore.exceptions.ClientError as error: LOG.info('Error creating DNS record %s (%s) in Hosted Zone %s (%s)', dns_name, dns_name_aws, zone_id, zone_name) LOG.debug(error) else: LOG.info('Skipping creating DNS record %s in non-matching Hosted Zone %s (%s)', dns_name, zone_id, zone_name) LOG.debug('Route53 JSON Response: \n%s', pformat(response))
def get_sns_topic_arn(topic_name, account, region): """Get SNS topic ARN. Args: topic_name (str): Name of the topic to lookup. account (str): Environment, e.g. dev region (str): Region name, e.g. us-east-1 Returns: str: ARN for requested topic name """ if topic_name.count(':') == 5 and topic_name.startswith('arn:aws:sns:'): return topic_name session = boto3.Session(profile_name=account, region_name=region) sns_client = session.client('sns') topics = sns_client.list_topics()['Topics'] matched_topic = None for topic in topics: topic_arn = topic['TopicArn'] if topic_name == topic_arn.split(':')[-1]: matched_topic = topic_arn break else: LOG.critical("No topic with name %s found.", topic_name) raise SNSTopicNotFound('No topic with name {0} found'.format(topic_name)) return matched_topic