我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用googleapiclient.discovery.build()。
def title_from_youtube(bot, url): try: youtube_api_key = bot.config.get_by_path(["spotify", "youtube"]) youtube_client = build("youtube", "v3", developerKey=youtube_api_key) except (KeyError, TypeError) as e: logger.error("<b>YouTube API key isn't configured:</b> {}".format(e)) return "" # Regex by mantish from http://stackoverflow.com/a/9102270 to get the # video id from a YouTube URL. match = re.match( r"^.*(youtu.be\/|v\/|u\/\w\/|embed\/|watch\?v=|\&v=)([^#\&\?]*).*", url) if match and len(match.group(2)) == 11: video_id = match.group(2) else: logger.error("Unable to extract video id: {}".format(url)) return "" # YouTube response is JSON. try: response = youtube_client.videos().list( # pylint: disable=no-member part="snippet", id=video_id).execute() items = response.get("items", []) if items: return items[0]["snippet"]["title"] else: logger.error("<b>YouTube response was empty:</b> {}" .format(response)) return "" except YouTubeHTTPError as e: logger.error("Unable to get video entry from {}, {}".format(url, e)) return ""
def create_service(self, host): credentials = oauth.get_or_create_credentials( scope=OAUTH_SCOPES, storage_key=STORAGE_KEY) http = httplib2.Http(ca_certs=utils.get_cacerts_path()) http = credentials.authorize(http) # Kintaro's server doesn't seem to be able to refresh expired tokens # properly (responds with a "Stateless token expired" error). So we # manage state ourselves and refresh slightly more often than once # per hour. now = datetime.datetime.now() if self._last_run is None \ or now - self._last_run >= datetime.timedelta(minutes=50): credentials.refresh(http) self._last_run = now url = DISCOVERY_URL.replace('{host}', host) return discovery.build('content', 'v1', http=http, discoveryServiceUrl=url)
def tear_down_gce_cluster(conf): credentials = GoogleCredentials.get_application_default() gce = discovery.build("compute", "v1", credentials=credentials) zone_operations = [] for node in conf["nodes"]: print("Deleting node on virtual machine {}...".format(node["vmID"])) zone_operations.append(delete_instance(gce, node["vmID"])) for op in zone_operations: while True: result = gce.zoneOperations().get(project=GCP_PROJECT_ID, zone=GCE_ZONE_ID, operation=op["name"]).execute() if result["status"] == "DONE": # if "error" in result: raise Exception(result["error"]) # TODO handle error print("Deleted node on virtual machine {}".format(result["targetLink"].split("/")[-1])) break sleep(1) print("Cluster torn down correctly. Bye!")
def get(self, destination): service = build('container', 'v1') cl = service.projects().zones().clusters() self._zone = self._data.get('zone') self._project = self._data.get('project') self._destination = destination cluster_name = self._data.get('name') if cluster_name: cluster = cl.get(projectId=self._project, zone=self._zone, clusterId=cluster_name).execute() Cluster(self.parse_cluster(cluster)).add(cluster_name) self.add_cluster_nodepools(cluster_name, cluster.get('nodePools')) else: clusters = cl.list(projectId=self._project, zone=self._zone).execute() for cluster in clusters['clusters']: Cluster(self.parse_cluster(cluster)).add(cluster['name']) self.add_cluster_nodepools(cluster.get('name'), cluster.get('nodePools'), self._data.get('get_default_nodepools'))
def find(self, item): try: log.info('search for {item} through {class_name}'.format(item=item, class_name=__name__)) service = build("customsearch", "v1", developerKey=google_developer_key) #https://developers.google.com/custom-search/json-api/v1/reference/cse/list#response response = service.cse().list( q='how much is the ' + item, cx=google_custom_search_engine_key, ).execute() #log.debug('RESPONSE = {response}'.format(pprint.pprint(response))) original_description, price, currency = parse_response(response) return original_description, price, currency #TODO identify proper Exception to expect except Exception as e: log.error(e)
def authorize(self): """ Connect with api and build youtube service """ print 'Authorizing...' if self.youtube: print 'Already authorized' return False self.youtube = build(self.YOUTUBE_API_SERVICE_NAME, self.YOUTUBE_API_VERSION, developerKey=self.DEVELOPER_KEY) # # # returns boolean # True if skip video (not a candidate) #
def insert_entity(projectId,product, categories, table_name, version="v1",prefix="",items="items"): db = TinyDB("project_dbs/" + projectId + ".json") service = discovery.build(product, version, credentials=storage.get()) while categories: api_entity = getattr(service, categories.pop(0))() service = api_entity request = api_entity.list(project=prefix+projectId) try: while request is not None: response = request.execute() for item in response[items]: db.table(table_name).insert(item) try: request = api_entity.list_next(previous_request=request, previous_response=response) except AttributeError: request = None except KeyError: pass
def list_projects(project_or_org,specifier): service = discovery.build('cloudresourcemanager', 'v1',credentials=storage.get()) if project_or_org=="organization": request = service.projects().list(filter='parent.id:%s' % specifier) elif project_or_org=="project": request = service.projects().list(filter='name:%s' % specifier) else: raise Exception('Organization or Project not specified.') while request is not None: response = request.execute() for project in response['projects']: if (project['lifecycleState'] != "DELETE_REQUESTED"): db.table('Project').insert(project) request = service.projects().\ list_next(previous_request=request,previous_response=response)
def main(): takephoto() # First take a picture """Run a label request on a single image""" credentials = GoogleCredentials.get_application_default() service = discovery.build('vision', 'v1', credentials=credentials) with open('image.jpg', 'rb') as image: image_content = base64.b64encode(image.read()) service_request = service.images().annotate(body={ 'requests': [{ 'image': { 'content': image_content.decode('UTF-8') }, 'features': [{ 'type': 'FACE_DETECTION', 'maxResults': 10 }] }] }) response = service_request.execute() print json.dumps(response, indent=4, sort_keys=True) #Print it out and make it somewhat pretty.
def main(): takephoto() # First take a picture """Run a label request on a single image""" credentials = GoogleCredentials.get_application_default() service = discovery.build('vision', 'v1', credentials=credentials) with open('image.jpg', 'rb') as image: image_content = base64.b64encode(image.read()) service_request = service.images().annotate(body={ 'requests': [{ 'image': { 'content': image_content.decode('UTF-8') }, 'features': [{ 'type': 'LABEL_DETECTION', 'maxResults': 10 }] }] }) response = service_request.execute() print json.dumps(response, indent=4, sort_keys=True) #Print it out and make it somewhat pretty.
def youtube_search(keyword, page_token, max_results=10): """ youtube_search?? ?? 1. youtube_search ??? arguments? pageToken ?? 2. ?? pageToken?? youtube.search()?? ? list? ??? ?? 3. search??? request.GET? pageToken?? ????? ?? 4. template?? ?????/????? a?? href? GET parameter? ????? ?? """ youtube = build( YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY ) search_response = youtube.search().list( q=keyword, part="id,snippet", maxResults=max_results, pageToken=page_token ).execute() return search_response
def wait_for_operation(self, zone, op_response, retry_count=MAX_API_RETRY_COUNT): try: while True and op_response is not None: compute = discovery.build(API_TYPE, API_VERSION, credentials=self.config.credentials) result = compute.zoneOperations().get(project=self.config.PROJECT_ID, zone=zone, operation=op_response['name']).execute() if result['status'] == 'DONE' or self.abort_all: return result['status'] else: time.sleep(1) except Exception, exception: if retry_count > 0 and not self.abort_all: self._log(API_RETRY_MESSAGE % (sys._getframe().f_code.co_name, exception)) return self.wait_for_operation(zone, op_response, (retry_count - 1)) else: self._log(API_MAX_RETRY_NESSAGE % (sys._getframe().f_code.co_name, MAX_API_RETRY_COUNT, exception))
def get_drive_service(): ''' Returns an object used to interact with the Google Drive API. ''' flow = client.flow_from_clientsecrets( get_credentials_path('secret.json'), 'https://www.googleapis.com/auth/drive') flow.user_agent = USER_AGENT_NAME store = Storage(get_credentials_path('storage.dat', False)) credentials = store.get() if not credentials or credentials.invalid: flags = tools.argparser.parse_args(args=[]) credentials = tools.run_flow(flow, store, flags) http = credentials.authorize(httplib2.Http()) service = discovery.build('drive', 'v3', http=http) return service
def __init__(self): """ Generate an authorized YouTube API client and S3 client """ credentials = oauth2client.client.GoogleCredentials( settings.YT_ACCESS_TOKEN, settings.YT_CLIENT_ID, settings.YT_CLIENT_SECRET, settings.YT_REFRESH_TOKEN, None, 'https://accounts.google.com/o/oauth2/token', None) authorization = credentials.authorize(httplib2.Http()) credentials.refresh(authorization) self.client = build('youtube', 'v3', credentials=credentials) self.s3 = boto3.client('s3')
def _get_bigquery_service(self): """ Connect to the BigQuery service. Calling ``GoogleCredentials.get_application_default`` requires that you either be running in the Google Cloud, or have the ``GOOGLE_APPLICATION_CREDENTIALS`` environment variable set to the path to a credentials JSON file. :return: authenticated BigQuery service connection object :rtype: `googleapiclient.discovery.Resource <http://google.github.io/\ google-api-python-client/docs/epy/googleapiclient.discovery.\ Resource-class.html>`_ """ logger.debug('Getting Google Credentials') credentials = GoogleCredentials.get_application_default() logger.debug('Building BigQuery service instance') bigquery_service = build('bigquery', 'v2', credentials=credentials) return bigquery_service
def get_google_service(service_type=None,version=None): ''' get_url will use the requests library to get a url :param service_type: the service to get (default is storage) :param version: version to use (default is v1) ''' if service_type == None: service_type = "storage" if version == None: version = "v1" credentials = GoogleCredentials.get_application_default() return build(service_type, version, credentials=credentials) ########################################################################################## # GOOGLE STORAGE API ##################################################################### ##########################################################################################
def get_build_params(metadata): '''get_build_params uses get_build_metadata to retrieve corresponding meta data values for a build :param metadata: a list, each item a dictionary of metadata, in format: metadata = [{'key': 'repo_url', 'value': repo_url }, {'key': 'repo_id', 'value': repo_id }, {'key': 'credential', 'value': credential }, {'key': 'response_url', 'value': response_url }, {'key': 'token', 'value': token}, {'key': 'commit', 'value': commit }] ''' params = dict() for item in metadata: if item['value'] == None: response = get_build_metadata(key=item['key']) item['value'] = response params[item['key']] = item['value'] if item['key'] not in ['token', 'secret', 'credential']: bot.info('%s is set to %s' %(item['key'],item['value'])) return params
def wait_operation(operation): # NOT thread safe credentials = GoogleCredentials.get_application_default() compute = discovery.build('compute', 'v1', credentials=credentials) # Wait for confirmation that the instance is created while True: result = compute.zoneOperations().get( project=project, zone=zone, operation=operation['name']).execute() if result['status'] == 'DONE': return False if ('error' in result) else True sys.stdout.write(".") sys.stdout.flush() time.sleep(2) # [END wait_operation] # [START list_instances]
def list_instances(project, zone, globalinstances, distro, includeterm): # NOT thread safe credentials = GoogleCredentials.get_application_default() compute = discovery.build('compute', 'v1', credentials=credentials) result = compute.instances().list(project=project, zone=zone).execute() if ('items' in result): print('%s instances in zone %s:' % (project, zone)) instancenames = [] name = prefix + '-' + distro if not globalinstances: name += '-' + format(str(uuid.getnode())[:8:-1]) for instance in result['items']: if name in instance['name']: print(' - ' + instance['name'] + ' - ' + instance['status']) if (instance['status'] == 'RUNNING' or includeterm): instancenames.append(instance['name']) return instancenames if (len(instancenames) > 0) else False return False # [END list_instances] # [START check_gceproject]
def _build_request(self, verb, verb_arguments): """Builds HttpRequest object. Args: verb (str): Request verb (ex. insert, update, delete). verb_arguments (dict): Arguments to be passed with the request. Returns: httplib2.HttpRequest: HttpRequest to be sent to the API. """ method = getattr(self._component, verb) # Python insists that keys in **kwargs be strings (not variables). # Since we initially build our kwargs as a dictionary where one of the # keys is a variable (target), we need to convert keys to strings, # even though the variable in question is of type str. method_args = {str(k): v for k, v in verb_arguments.iteritems()} return method(**method_args)
def __init__(self, db_filename=None): credentials = GoogleCredentials.get_application_default() scoped_credentials = credentials.create_scoped( ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() scoped_credentials.authorize(http) self.service = discovery.build('language', 'v1', http=http, credentials=credentials) # This list will store the entity information gleaned from the # image files. self.entity_info = [] # This is the filename of the sqlite3 database to save to self.db_filename = db_filename or 'entities{}.db'.format( int(time.time()))
def main(project_id, job_name): """Review the transfer operations associated with a transfer job.""" credentials = GoogleCredentials.get_application_default() storagetransfer = discovery.build( 'storagetransfer', 'v1', credentials=credentials) filterString = ( '{{"project_id": "{project_id}", ' '"job_names": ["{job_name}"]}}' ).format(project_id=project_id, job_name=job_name) result = storagetransfer.transferOperations().list( name="transferOperations", filter=filterString).execute() print('Result of transferOperations/list: {}'.format( json.dumps(result, indent=4, sort_keys=True))) # [END main]
def __init__(self, source_language: str, target_language: str, key: str, translator_name: str = 'Google', quality: int = 50, service_name: str = 'Google') -> None: super(GoogleTranslator, self).__init__( source_language=source_language, target_language=target_language, service_name=service_name, translator_name=translator_name, quality=quality ) self.key = key self.translation_service = build('translate', 'v2', developerKey=key) self.add_query_processor(EscapeHtml()) self.add_response_processor(UnescapeHtml())
def post(self): if self.json_body: event_id = self.json_body.get('event_id') language = self.json_body.get('language') or self.json_body.get('locale') if not event_id: self.add_error('Need to pass event_id argument') if not language: self.add_error('Need to pass language/locale argument') else: self.add_error('Need to pass a post body of json params') # Remap our traditional/simplified chinese languages if language == 'zh': language = 'zh-TW' elif language == 'zh-Hant': language = 'zh-TW' elif language == 'zh-Hans': language = 'zh-CN' self.errors_are_fatal() db_event = eventdata.DBEvent.get_by_id(event_id) service = build('translate', 'v2', developerKey=keys.get('google_server_key')) result = service.translations().list(target=language, format='text', q=[db_event.name or '', db_event.description or '']).execute() translations = [x['translatedText'] for x in result['translations']] self.write_json_success({'name': translations[0], 'description': translations[1]})
def analize(self, text): http = httplib2.Http() self.scoped_credentials.authorize(http) service = discovery.build('language', 'v1beta1', http=http) body = { 'document': { 'type': 'PLAIN_TEXT', 'content': text, }, 'features': { 'extract_syntax': True, }, 'encodingType': 'UTF16', } request = service.documents().annotateText(body=body) return request.execute()
def main(max_results): credentials = get_credentials() http = credentials.authorize(httplib2.Http()) global service service = discovery.build('gmail', 'v1', http=http) try: user_id = "me" label_id = ["INBOX","IMPORTANT"] query = "is:unread" response = service.users().messages().list(userId=user_id,labelIds=label_id,maxResults=max_results,q=query).execute() messages = [] if 'messages' in response: messages.extend(response['messages']) while 'nextPageToken' in response: page_token = response['nextPageToken'] response = service.users().messages().list(userId=user_id,labelIds=label_id,maxResults=max_results,q=query,pageToken=page_token).execute() return messages except errors.HttpError, error: print 'An error occurred: %s' % error
def setUp(self): self.clientquery = {'property_uri': 'https://www.example.com/', 'siteMode': 'en-us', 'clientName': 'Example', 'query_date': '2016-09-01'} self.emptyresponse = '''{"responseAggregationType": "byPage"}''' self.p = (os.path.dirname(os.path.abspath(__file__))) self.build_response_data = '%s/build_response_data.json' % (self.p) self.http_auth = HttpMockSequence([ ({'status': '200'}, open(self.build_response_data, 'rb').read()), ({'status': '200'}, self.emptyresponse.encode('UTF-8')) ]) self.service = build('webmasters', 'v3', http=self.http_auth, developerKey='mocked_api_key_1234') # Mock the service attribute within Apiclient self.mocked_prop = PropertyMock(return_value=self.service)
def setUp(self): self.clientquery = {'property_uri': 'https://www.example.com/', 'siteMode': 'en-us', 'clientName': 'Example', 'query_date': '2016-09-01'} self.response = '''{"error": {"errors": [{"domain": "global","reason": "forbidden", "message": "User does not have sufficient permission for site 'https://www.example.com/'. See also: https://support.google.com/webmasters/answer/2451999."}], "code": 403, "message": "User does not have sufficient permission for site 'https://www.example.com/'. See also: https://support.google.com/webmasters/answer/2451999."}}''' self.p = (os.path.dirname(os.path.abspath(__file__))) self.build_response_data = '%s/build_response_data.json' % (self.p) self.http_auth = HttpMockSequence([ ({'status': '403'}, open(self.build_response_data, 'rb').read()), ({'status': '403'}, self.response.encode('UTF-8')) ]) self.service = build('webmasters', 'v3', http=self.http_auth, developerKey='mocked_api_key_1234') self.mocked_prop = PropertyMock(return_value=self.service)
def flush(self): if not self.entries: return for _repeat in range(6): try: self.body['entries'] = self.entries resp = self.connection.entries().write( body=self.body).execute() self.entries = [] break except IOError as e: sleep(_repeat * 2 + 1) if e.errno == errno.EPIPE: credentials = GoogleCredentials.get_application_default() self.connection = build('logging', 'v2beta1', credentials=credentials) except Exception: sleep(_repeat * 2 + 5)
def __init__(self, model_name, project_id=None): """ Args: model_name: the name of the model. It can be a model full name ("projects/[project_id]/models/[model_name]") or just [model_name]. project_id: project_id of the models. If not provided and model_name is not a full name (not including project_id), default project_id will be used. """ if project_id is None: self._project_id = datalab.Context.default().project_id self._credentials = datalab.Context.default().credentials self._api = discovery.build('ml', 'v1', credentials=self._credentials) if not model_name.startswith('projects/'): model_name = ('projects/%s/models/%s' % (self._project_id, model_name)) self._full_model_name = model_name self._model_name = self._full_model_name.split('/')[-1]
def __init__(self, name, context=None): """Initializes an instance of a CloudML Job. Args: name: the name of the job. It can be an operation full name ("projects/[project_id]/jobs/[operation_name]") or just [operation_name]. context: an optional Context object providing project_id and credentials. """ super(Job, self).__init__(name) if context is None: context = datalab.Context.default() self._context = context self._api = discovery.build('ml', 'v1', credentials=self._context.credentials) if not name.startswith('projects/'): name = 'projects/' + self._context.project_id + '/jobs/' + name self._name = name self._refresh_state()
def delete(config, disk_name=None, disk_zone=None): # TODO: implement # submit a request to the gce api for a new disk with the given parameters # if inputs is not None, run a pipeline job to populate the disk projectId = config.project_id zones = [disk_zone if disk_zone is not None else x for x in config.zones.split(',')] credentials = GoogleCredentials.get_application_default() http = credentials.authorize(httplib2.Http()) if credentials.access_token_expired: credentials.refresh(http) gce = discovery.build('compute', 'v1', http=http) for z in zones: try: resp = gce.disks().delete(project=projectId, zone=z, disk=disk_name).execute() except HttpError as e: raise DataDiskError("Couldn't delete data disk {n}: {reason}".format(n=disk_name, reason=e)) while True: try: result = gce.zoneOperations().get(project=projectId, zone=z, operation=resp['name']).execute() except HttpError: break else: if result['status'] == 'DONE': break
def create(apiName, apiVersion): credentials = GoogleCredentials.get_application_default() http = credentials.authorize(httplib2.Http()) if credentials.access_token_expired: credentials.refresh(http) return discovery.build(apiName, apiVersion, http)
def get_gcloud_storage(): credentials = GoogleCredentials.get_application_default() return discovery.build('storage', 'v1', credentials=credentials)
def search(q): DEVELOPER_KEY = "AIzaSyACCLlnn_hlOpNk5XUBpRqs-iZWpbTm-J4" YOUTUBE_API_SERVICE_NAME = "youtube" YOUTUBE_API_VERSION = "v3" youtube = build( YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION, developerKey=DEVELOPER_KEY ) # Call the search.list method to retrieve results matching the specified # query term. search_response = youtube.search().list( q=q, part="id,snippet", maxResults=10, type='video', ).execute() return search_response
def create_service(): credentials = appengine.AppAssertionCredentials(SCOPE) http = httplib2.Http() http = credentials.authorize(http) credentials.refresh(http) return discovery.build('content', 'v1', http=http, discoveryServiceUrl=DISCOVERY_URL)
def get_client_from_credentials(credentials): """Creates Pub/Sub client from a given credentials and returns it.""" if credentials.create_scoped_required(): credentials = credentials.create_scoped(PUBSUB_SCOPES) http = httplib2.Http(memcache) credentials.authorize(http) return discovery.build('pubsub', 'v1', http=http)
def __init__(self, api_discovery_file='vision_api.json'): self.credentials = GoogleCredentials.get_application_default() self.service = discovery.build( 'vision', 'v1', credentials=self.credentials, discoveryServiceUrl=DISCOVERY_URL)
def _create_client(self): credentials = GoogleCredentials.get_application_default() return discovery.build( 'vision', 'v1', credentials=credentials, discoveryServiceUrl=DISCOVERY_URL)
def main(input_dir): """Walk through all the not-yet-processed image files in the given directory, extracting any text from them and adding that text to an inverted index. """ # Create a client object for the Vision API vision = VisionApi() # Create an Index object to build query the inverted index. index = Index() allfileslist = [] # Recursively construct a list of all the files in the given input # directory. for folder, subs, files in os.walk(input_dir): for filename in files: allfileslist.append(os.path.join(folder, filename)) fileslist = [] for filename in allfileslist: # Look for text in any files that have not yet been processed. if index.document_is_processed(filename): continue fileslist.append(filename) for filenames in batch(fileslist): get_text_from_files(vision, index, filenames) # [END get_text]
def get_vision_service(): credentials = GoogleCredentials.get_application_default() return discovery.build('vision', 'v1', credentials=credentials, discoveryServiceUrl=DISCOVERY_URL) # [END get_vision_service] # [START identify_landmark]
def get_speech_service(): credentials = GoogleCredentials.from_stream('api/googleapi_auth/LecRec-a4f4c7931558.json').create_scoped( ['https://www.googleapis.com/auth/cloud-platform']) http = httplib2.Http() credentials.authorize(http) return discovery.build('speech', 'v1beta1', http=http) # [END authenticating]
def fetch(self, **kwargs): ''' Fetches an email using the Gmail API users.messages.get() method. It leverages the IsThisLegit service account to impersonate the user in order to retrieve the email by message ID. This prevents users from having to manually accept the OAuth permission dialog before reporting phishing emails. Expected kwargs: userId - The userID who reported the email messageId - The Gmail message ID to fetch ''' userId = kwargs.get('userId') messageId = kwargs.get('messageId') scopes = ['https://www.googleapis.com/auth/gmail.readonly'] credentials = ServiceAccountCredentials.from_json_keyfile_name( config['gae']['service_account_key'], scopes=scopes) delegated_credentials = credentials.create_delegated(userId) http_auth = delegated_credentials.authorize(Http()) service = build('gmail', 'v1', http=http_auth) response = service.users().messages().get( userId=userId, id=messageId, format='raw').execute() if not response or 'raw' not in response: raise EmailFetchError('Error fetching email: User {}, thread {}'. format(userId, messageId)) message = base64.urlsafe_b64decode(str(response['raw'])) return message
def main(argv): # Load label file try: new_lables = json.load(open(argv[1])) except IndexError: print("%s <lables.json> required!" % __file__, file=sys.stderr) sys.exit(1) except ValueError as err: print("%s invalid json: %s" % (sys.argv[1], err), file=sys.stderr) sys.exit(1) # Pull defaults from metadata metadata = get_metadata() project, zone = itemgetter(1, 3)(metadata['zone'].split("/")) instance_name = metadata['name'] # Google Creds creds = GoogleCredentials.get_application_default() # Describe Instance conn = discovery.build('compute', 'beta', credentials=creds) instance = conn.instances().get(project=project, zone=zone, instance=instance_name).execute() # Label Instance label(instance['selfLink'], creds.get_access_token().access_token, label_merge(instance['labels'] if 'labels' in instance else {}, instance["labelFingerprint"], new_lables)) # Label Disks for i in instance['disks']: # Skip local disk if 'source' not in i: continue disk = conn.disks().get(project=project, zone=zone, disk=i['source'].split('/')[-1]).execute() label(disk['selfLink'], creds.get_access_token().access_token, label_merge(disk['labels'] if 'labels' in disk else {}, disk["labelFingerprint"], new_lables))
def __get__(self, instance, instance_type): """Construct the API client.""" if instance is None: return self thread_local = None try: app = webapp2.get_app() # Python Google API clients aren't threadsafe as they use httplib2 # which isn't threadsafe. thread_local = app.registry.get(self) if thread_local is None: thread_local = threading.local() app.registry[self] = thread_local except AssertionError: # When not in a request context, use class thread local. thread_local = ThreadsafeClientLocal._class_thread_local cached_client = getattr(thread_local, 'api', None) if cached_client is None: credentials = client.GoogleCredentials.get_application_default() if credentials.create_scoped_required(): credentials = credentials.create_scoped( 'https://www.googleapis.com/auth/cloud-platform') cached_client = discovery.build( self.service, self.version, http=credentials.authorize(self.http), cache_discovery=self.cache_discovery) thread_local.api = cached_client return cached_client
def get_google_auth(service, version='v2'): credentials = GoogleCredentials.get_application_default() service_conn = discovery.build(service, version, credentials=credentials) return service_conn
def get(self): is_cron = self.request.headers.get('X-Appengine-Cron', False) # logging.info("is_cron is %s", is_cron) # Comment out the following check to allow non-cron-initiated requests. if not is_cron: return 'Blocked.' # These env vars are set in app.yaml. PROJECT = os.environ['PROJECT'] BUCKET = os.environ['BUCKET'] TEMPLATE = os.environ['TEMPLATE_NAME'] # Because we're using the same job name each time, if you try to launch one # job while another is still running, the second will fail. JOBNAME = PROJECT + '-twproc-template' credentials = GoogleCredentials.get_application_default() service = build('dataflow', 'v1b3', credentials=credentials) BODY = { "jobName": "{jobname}".format(jobname=JOBNAME), "gcsPath": "gs://{bucket}/templates/{template}".format( bucket=BUCKET, template=TEMPLATE), "parameters": {"timestamp": str(datetime.datetime.utcnow())}, "environment": { "tempLocation": "gs://{bucket}/temp".format(bucket=BUCKET), "zone": "us-central1-f" } } dfrequest = service.projects().templates().create( projectId=PROJECT, body=BODY) dfresponse = dfrequest.execute() logging.info(dfresponse) self.response.write('Done')
def __init__(self, account_json): scopes = ['https://www.googleapis.com/auth/ndev.clouddns.readwrite'] credentials = ServiceAccountCredentials.from_json_keyfile_name(account_json, scopes) self.dns = discovery.build('dns', 'v1', credentials=credentials, cache_discovery=False) with open(account_json) as account: self.project_id = json.load(account)['project_id']
def cloud_service(credentials, service, version='v1'): return discovery.build(service, version, credentials=credentials)