我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用httplib2.HttpLib2Error()。
def _refresh(self, http_request): """Refreshes the access_token. Skip all the storage hoops and just refresh using the API. Args: http_request: callable, a callable that matches the method signature of httplib2.Http.request, used to make the refresh request. Raises: HttpAccessTokenRefreshError: When the refresh fails. """ try: self._retrieve_info(http_request) self.access_token, self.token_expiry = _metadata.get_token( http_request, service_account=self.service_account_email) except httplib2.HttpLib2Error as e: raise HttpAccessTokenRefreshError(str(e))
def get_instances(self, project_id): """Gets all CloudSQL instances for a project. Args: project_id (int): The project id for a GCP project. Returns: list: A list of database Instance resource dicts for a project_id. https://cloud.google.com/sql/docs/mysql/admin-api/v1beta4/instances [{"kind": "sql#instance", "name": "sql_instance1", ...} {"kind": "sql#instance", "name": "sql_instance2", ...}, {...}] Raises: ApiExecutionError: ApiExecutionError is raised if the call to the GCP ClodSQL API fails """ try: paged_results = self.repository.instances.list(project_id) return api_helpers.flatten_list_results(paged_results, 'items') except (errors.HttpError, HttpLib2Error) as e: LOGGER.warn(api_errors.ApiExecutionError(project_id, e)) raise api_errors.ApiExecutionError('instances', e)
def get_global_operation(self, project_id, operation_id): """Get the Operations Status Args: project_id (str): The project id. operation_id (str): The operation id. Returns: dict: Global Operation status and info. https://cloud.google.com/compute/docs/reference/latest/globalOperations/get Raises: ApiNotEnabledError: Returns if the api is not enabled. ApiExecutionError: Returns if the api is not executable. """ try: return self.repository.global_operations.get( project_id, operation_id) except (errors.HttpError, HttpLib2Error) as e: api_not_enabled, details = _api_not_enabled(e) if api_not_enabled: raise api_errors.ApiNotEnabledError(details, e) raise api_errors.ApiExecutionError(project_id, e)
def get_project(self, project_id): """Returns the specified Project resource. Args: project_id (str): The project id. Returns: dict: A Compute Project resource dict. https://cloud.google.com/compute/docs/reference/latest/projects/get """ try: return self.repository.projects.get(project_id) except (errors.HttpError, HttpLib2Error) as e: api_not_enabled, details = _api_not_enabled(e) if api_not_enabled: raise api_errors.ApiNotEnabledError(details, e) raise api_errors.ApiExecutionError(project_id, e)
def get_group_members(self, group_key): """Get all the members for specified groups. Args: group_key (str): The group's unique id assigned by the Admin API. Returns: list: A list of member objects from the API. Raises: api_errors.ApiExecutionError: If group member retrieval fails. """ try: paged_results = self.repository.members.list(group_key) return api_helpers.flatten_list_results(paged_results, 'members') except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(group_key, e)
def get_groups(self, customer_id='my_customer'): """Get all the groups for a given customer_id. A note on customer_id='my_customer'. This is a magic string instead of using the real customer id. See: https://developers.google.com/admin-sdk/directory/v1/guides/manage-groups#get_all_domain_groups Args: customer_id (str): The customer id to scope the request to. Returns: list: A list of group objects returned from the API. Raises: api_errors.ApiExecutionError: If groups retrieval fails. """ try: paged_results = self.repository.groups.list(customer=customer_id) return api_helpers.flatten_list_results(paged_results, 'groups') except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError('groups', e)
def get_bigquery_projectids(self): """Request and page through bigquery projectids. Returns: list: A list of project_ids enabled for bigquery. ['project-id', 'project-id', '...'] If there are no project_ids enabled for bigquery an empty list will be returned. """ try: results = self.repository.projects.list( fields='nextPageToken,projects/id') flattened = api_helpers.flatten_list_results(results, 'projects') except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError('bigquery', e) project_ids = [result.get('id') for result in flattened if 'id' in result] return project_ids
def get_datasets_for_projectid(self, project_id): """Return BigQuery datasets stored in the requested project_id. Args: project_id (str): String representing the project id. Returns: list: A list of datasetReference objects for a given project_id. [{'datasetId': 'dataset-id', 'projectId': 'project-id'}, {...}] """ try: results = self.repository.datasets.list( resource=project_id, fields='datasets/datasetReference,nextPageToken', all=True) flattened = api_helpers.flatten_list_results(results, 'datasets') except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(project_id, e) datasets = [result.get('datasetReference') for result in flattened if 'datasetReference' in result] return datasets
def get_service(self, project_id, service_id): """Gets information about a specific service. Args: project_id (str): The id of the project. service_id (str): The id of the service to query. Returns: dict: A Service resource dict for a given project_id and service_id. """ try: return self.repository.app_services.get( project_id, target=service_id) except (errors.HttpError, HttpLib2Error) as e: if isinstance(e, errors.HttpError) and e.resp.status == 404: return {} raise api_errors.ApiExecutionError(project_id, e)
def list_services(self, project_id): """Lists services of a project. Args: project_id (str): The id of the project. Returns: list: A list of Service resource dicts for a project_id. """ try: paged_results = self.repository.app_services.list(project_id) return api_helpers.flatten_list_results(paged_results, 'services') except (errors.HttpError, HttpLib2Error) as e: if isinstance(e, errors.HttpError) and e.resp.status == 404: return [] raise api_errors.ApiExecutionError(project_id, e)
def get_version(self, project_id, service_id, version_id): """Gets information about a specific version of a service. Args: project_id (str): The id of the project. service_id (str): The id of the service to query. version_id (str): The id of the version to query. Returns: dict: A Version resource dict for a given project_id and service_id. """ try: return self.repository.service_versions.get( project_id, target=version_id, services_id=service_id) except (errors.HttpError, HttpLib2Error) as e: if isinstance(e, errors.HttpError) and e.resp.status == 404: return {} raise api_errors.ApiExecutionError(project_id, e)
def list_versions(self, project_id, service_id): """Lists versions of a given service. Args: project_id (str): The id of the project. service_id (str): The id of the service to query. Returns: list: A list of Version resource dicts for a given Service. """ try: paged_results = self.repository.service_versions.list( project_id, services_id=service_id) return api_helpers.flatten_list_results(paged_results, 'versions') except (errors.HttpError, HttpLib2Error) as e: if isinstance(e, errors.HttpError) and e.resp.status == 404: return [] raise api_errors.ApiExecutionError(project_id, e)
def get_instance(self, project_id, service_id, version_id, instances_id): """Gets information about a specific instance of a service. Args: project_id (str): The id of the project. service_id (str): The id of the service to query. version_id (str): The id of the version to query. instances_id (str): The id of the instance to query. Returns: dict: An Instance resource dict for a given project_id, service_id and version_id. """ try: return self.repository.version_instances.get( project_id, target=instances_id, services_id=service_id, versions_id=version_id) except (errors.HttpError, HttpLib2Error) as e: if isinstance(e, errors.HttpError) and e.resp.status == 404: return {} raise api_errors.ApiExecutionError(project_id, e)
def get_project(self, project_id): """Get all the projects from organization. Args: project_id (str): The project id (not project number). Returns: dict: The project response object. Raises: ApiExecutionError: An error has occurred when executing the API. """ try: return self.repository.projects.get(project_id) except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(project_id, e)
def get_org_iam_policies(self, resource_name, org_id): """Get all the iam policies of an org. Args: resource_name (str): The resource type. org_id (int): The org id number. Returns: dict: Organization IAM policy for given org_id. https://cloud.google.com/resource-manager/reference/rest/Shared.Types/Policy Raises: ApiExecutionError: An error has occurred when executing the API. """ resource_id = 'organizations/%s' % org_id try: iam_policy = ( self.repository.organizations.get_iam_policy(resource_id)) return {'org_id': org_id, 'iam_policy': iam_policy} except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(resource_name, e)
def get_folder(self, folder_name): """Get a folder. Args: folder_name (str): The unique folder name, with the format "folders/{folderId}". Returns: dict: The folder API response. Raises: ApiExecutionError: An error has occurred when executing the API. """ name = self.repository.folders.get_name(folder_name) try: return self.repository.folders.get(name) except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(folder_name, e)
def get_folder_iam_policies(self, resource_name, folder_id): """Get all the iam policies of an folder. Args: resource_name (str): The resource name (type). folder_id (int): The folder id. Returns: dict: Folder IAM policy for given folder_id. Raises: ApiExecutionError: An error has occurred when executing the API. """ resource_id = 'folders/%s' % folder_id try: iam_policy = self.repository.folders.get_iam_policy(resource_id) return {'folder_id': folder_id, 'iam_policy': iam_policy} except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(resource_name, e)
def get_objects(self, bucket): """Gets all GCS buckets for a project. Args: bucket (str): The bucket to list to objects in. Returns: list: a list of object resource dicts. https://cloud.google.com/storage/docs/json_api/v1/objects Raises: ApiExecutionError: ApiExecutionError is raised if the call to the GCP ClodSQL API fails """ try: paged_results = self.repository.objects.list(bucket, projection='full') return api_helpers.flatten_list_results(paged_results, 'items') except (errors.HttpError, HttpLib2Error) as e: LOGGER.warn(api_errors.ApiExecutionError(bucket, e)) raise api_errors.ApiExecutionError('objects', e)
def get_service_accounts(self, project_id): """Get Service Accounts associated with a project. Args: project_id (str): The project ID to get Service Accounts for. Returns: list: List of service accounts associated with the project. """ name = self.repository.projects_serviceaccounts.get_name(project_id) try: paged_results = self.repository.projects_serviceaccounts.list(name) return api_helpers.flatten_list_results(paged_results, 'accounts') except (errors.HttpError, HttpLib2Error) as e: LOGGER.warn(api_errors.ApiExecutionError(name, e)) raise api_errors.ApiExecutionError('serviceAccounts', e)
def __init__(self, **kwargs): """ Initializes object for interacting with Google Cloud Storage API. | By default, Application Default Credentials are used. | If gcloud SDK isn't installed, credential files have to be specified using the kwargs *json_credentials_path* and *client_id*. :keyword max_retries: Argument specified with each API call to natively handle retryable errors. :type max_retries: integer :keyword chunksize: Upload/Download chunk size :type chunksize: integer :keyword client_secret_path: File path for client secret JSON file. Only required if credentials are invalid or unavailable. :keyword json_credentials_path: File path for automatically generated credentials. :keyword client_id: Credentials are stored as a key-value pair per client_id to facilitate multiple clients using the same credentials file. For simplicity, using one's email address is sufficient. """ self._service = get_service('storage', **kwargs) self._max_retries = kwargs.get('max_retries', 3) # Number of bytes to send/receive in each request. self._chunksize = kwargs.get('chunksize', 2 * 1024 * 1024) # Retry transport and file IO errors. self._RETRYABLE_ERRORS = (HttpLib2Error, IOError)
def get_web_exchange(self, line_date): '''??????????????''' http = httplib2.Http() if self.name not in currency_code: raise UserError(u'?????????(%s)????' % self.currency_id.name) url = 'http://srh.bankofchina.com/search/whpj/search.jsp' body = { 'erectDate': line_date, 'nothing': line_date, 'pjname': currency_code[self.name] } headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.6 Safari/537.36', 'Content-type': 'application/x-www-form-urlencoded' } try: response, content = http.request( url, 'POST', headers=headers, body=urllib.urlencode(body)) result = etree.HTML(content.decode('utf8')).xpath( '//table[@cellpadding="0"]/tr[4]/td/text()') except httplib2.HttpLib2Error: raise UserError(u'??????(%s)???????qq?2201864?' % url) return result[5]
def updatePhoto(users): cd = buildGAPIObject(API.DIRECTORY) filenamePattern = getString(Cmd.OB_PHOTO_FILENAME_PATTERN) checkForExtraneousArguments() p = re.compile(u'^(ht|f)tps?://.*$') i, count, users = getEntityArgument(users) for user in users: i += 1 user, userName, _ = splitEmailAddressOrUID(user) filename = _substituteForUser(filenamePattern, user, userName) if p.match(filename): try: status, image_data = httplib2.Http(disable_ssl_certificate_validation=GC.Values[GC.NO_VERIFY_SSL]).request(filename, u'GET') if status[u'status'] != u'200': entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], Msg.NOT_ALLOWED, i, count) continue if status[u'content-location'] != filename: entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], Msg.NOT_FOUND, i, count) continue except (httplib2.HttpLib2Error, httplib2.ServerNotFoundError, httplib2.CertificateValidationUnsupported) as e: entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], str(e), i, count) continue else: image_data = readFile(filename, mode=u'rb', continueOnError=True, displayError=True) if image_data is None: entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], None, i, count) continue body = {u'photoData': base64.urlsafe_b64encode(image_data)} try: callGAPI(cd.users().photos(), u'update', throw_reasons=[GAPI.USER_NOT_FOUND, GAPI.FORBIDDEN, GAPI.INVALID_INPUT], userKey=user, body=body, fields=u'') entityActionPerformed([Ent.USER, user, Ent.PHOTO, filename], i, count) except GAPI.invalidInput as e: entityActionFailedWarning([Ent.USER, user, Ent.PHOTO, filename], str(e), i, count) except (GAPI.userNotFound, GAPI.forbidden): entityUnknownWarning(Ent.USER, user, i, count) # gam <UserTypeEntity> delete photo
def next_chunk(self): """Get the next chunk of the download. Returns: (status, done): (MediaDownloadStatus, boolean) The value of 'done' will be True when the media has been fully downloaded. Raises: apiclient.errors.HttpError if the response was not a 2xx. httplib2.HttpLib2Error if a transport error has occured. """ headers = { 'range': 'bytes=%d-%d' % ( self._progress, self._progress + self._chunksize) } http = self._request.http http.follow_redirects = False resp, content = http.request(self._uri, headers=headers) if resp.status in [301, 302, 303, 307, 308] and 'location' in resp: self._uri = resp['location'] resp, content = http.request(self._uri, headers=headers) if resp.status in [200, 206]: self._progress += len(content) self._fd.write(content) if 'content-range' in resp: content_range = resp['content-range'] length = content_range.rsplit('/', 1)[1] self._total_size = int(length) if self._progress == self._total_size: self._done = True return MediaDownloadProgress(self._progress, self._total_size), self._done else: raise HttpError(resp, content, uri=self._uri)
def get_source(url): """Return the source of the supplied url argument""" http = httplib2.Http() try: status, response = http.request(url, headers={'User-Agent':' Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0'}) if status.status == 200: return response else: return None except httplib2.HttpLib2Error as e: return None
def get(http_request, path, root=METADATA_ROOT, recursive=None): """Fetch a resource from the metadata server. Args: path: A string indicating the resource to retrieve. For example, 'instance/service-accounts/defualt' http_request: A callable that matches the method signature of httplib2.Http.request. Used to make the request to the metadataserver. root: A string indicating the full path to the metadata server root. recursive: A boolean indicating whether to do a recursive query of metadata. See https://cloud.google.com/compute/docs/metadata#aggcontents Returns: A dictionary if the metadata server returns JSON, otherwise a string. Raises: httplib2.Httplib2Error if an error corrured while retrieving metadata. """ url = urlparse.urljoin(root, path) url = util._add_query_parameter(url, 'recursive', recursive) response, content = http_request( url, headers=METADATA_HEADERS ) if response.status == http_client.OK: decoded = _from_bytes(content) if response['content-type'] == 'application/json': return json.loads(decoded) else: return decoded else: raise httplib2.HttpLib2Error( 'Failed to retrieve {0} from the Google Compute Engine' 'metadata service. Response:\n{1}'.format(url, response))
def check(self, instance): host, port, user, password, connect_timeout, server_name = self._get_config(instance) tags = instance.get('tags', []) tags = tags + ['server_name:%s' % server_name] service_check_tags = tags + ['url:%s' % host] version = self._get_version(instance, service_check_tags) usrPass = user + ":" + password b64Val = base64.b64encode(usrPass) if version > 2: checkURL = host + ":" + str(port) + "/db/data/transaction/commit" else: checkURL = host + ":" + str(port) + "/v1/service/metrics" # Neo specific # Create payload using built-in Neo4j queryJmx stored procedure try: payload = {"statements" : [{"statement" : "CALL dbms.queryJmx('org.neo4j:*') yield attributes with keys(attributes) as k, attributes unwind k as row return row, attributes[row]['value'];"}]} headers_sent = {'Content-Type':'application/json','Authorization':'Basic ' + b64Val + '','Content-Type':'application/json'} r = requests.post(checkURL, data=json.dumps(payload),headers=headers_sent) except (socket.timeout, socket.error, HttpLib2Error) as e: msg = "Unable to fetch Neo4j stats: %s" % str(e) self._critical_service_check(service_check_tags, msg) raise if r.status_code != 200: msg = "nexpected status of {0} when fetching Neo4j stats, response: {1}" msg = msg.format(r.status_code, r.text) self._critical_service_check(service_check_tags, msg) r.raise_for_status() stats = r.json() self.service_check( self.SERVICE_CHECK_NAME, AgentCheck.OK, tags=service_check_tags) for doc in stats['results'][0]['data']: if doc['row'][0].lower() in self.keys: self.gauge(self.display.get(doc['row'][0].lower(),""), doc['row'][1], tags=tags)
def _flatten_aggregated_list_results(project_id, paged_results, item_key, sort_key='name'): """Flatten results and handle exceptions. Args: project_id (str): The project id the results are for. paged_results (list): A list of paged API response objects. [{page 1 results}, {page 2 results}, {page 3 results}, ...] item_key (str): The name of the key within the inner "items" lists containing the objects of interest. sort_key (str): The name of the key to sort the results by before returning. Returns: list: A sorted list of items. Raises: ApiNotEnabledError: Raised if the API is not enabled for the project. ApiExecutionError: Raised if there is another error while calling the API method. """ try: return sorted( api_helpers.flatten_aggregated_list_results(paged_results, item_key), key=lambda d: d.get(sort_key, '')) except (errors.HttpError, HttpLib2Error) as e: api_not_enabled, details = _api_not_enabled(e) if api_not_enabled: raise api_errors.ApiNotEnabledError(details, e) raise api_errors.ApiExecutionError(project_id, e) # pylint: enable=invalid-name
def _flatten_list_results(project_id, paged_results, item_key): """Flatten results and handle exceptions. Args: project_id (str): The project id the results are for. paged_results (list): A list of paged API response objects. [{page 1 results}, {page 2 results}, {page 3 results}, ...] item_key (str): The name of the key within the inner "items" lists containing the objects of interest. Returns: list: A list of items. Raises: ApiNotEnabledError: Raised if the API is not enabled for the project. ApiExecutionError: Raised if there is another error while calling the API method. """ try: return api_helpers.flatten_list_results(paged_results, item_key) except (errors.HttpError, HttpLib2Error) as e: api_not_enabled, details = _api_not_enabled(e) if api_not_enabled: raise api_errors.ApiNotEnabledError(details, e) raise api_errors.ApiExecutionError(project_id, e) # pylint: disable=too-many-instance-attributes
def get_projects(self, resource_name, parent_id=None, parent_type=None, **filterargs): """Get all the projects the authenticated account has access to. If no parent is passed in, then all projects the caller has visibility to are returned. This is significantly less efficient then listing by parent. Args: resource_name (str): The resource type. parent_id (str): The id of the organization or folder parent object. parent_type (str): Either folder or organization. **filterargs (dict): Extra project filter args. Yields: dict: The projects.list() response. https://cloud.google.com/resource-manager/reference/rest/v1/projects/list#response-body Raises: ApiExecutionError: An error has occurred when executing the API. """ filters = [] for key, value in filterargs.items(): filters.append('{}:{}'.format(key, value)) if parent_id: filters.append('parent.id:{}'.format(parent_id)) if parent_type: filters.append('parent.type:{}'.format(parent_type)) try: for response in self.repository.projects.list( filter=' '.join(filters)): yield response except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(resource_name, e)
def get_project_ancestry(self, project_id): """Get the full folder ancestry for a project. Args: project_id (str): Either the project number or the project id. Returns: list: The ancesters of the project, in order from direct parent to root organization id. """ try: results = self.repository.projects.get_ancestry(project_id) return results.get('ancestor', []) except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(project_id, e)
def get_project_iam_policies(self, resource_name, project_id): """Get all the iam policies of given project numbers. Args: resource_name (str): The resource type. project_id (str): Either the project number or the project id. Returns: list: IAM policies of the project. https://cloud.google.com/resource-manager/reference/rest/Shared.Types/Policy """ try: return self.repository.projects.get_iam_policy(project_id) except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(resource_name, e)
def get_organization(self, org_name): """Get organization by org_name. Args: org_name (str): The org name with format "organizations/$ORG_ID" Returns: dict: The org response object if found, otherwise False. """ name = self.repository.organizations.get_name(org_name) try: return self.repository.organizations.get(name) except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(org_name, e)
def get_folders(self, resource_name, parent=None, show_deleted=False): """Find all folders that the authenticated account has access to. If no parent is passed in, then all folders the caller has visibility to are returned. This is significantly less efficient then listing by parent. Args: resource_name (str): The resource type. parent (str): Optional parent resource, either 'organizations/{org_id}' or 'folders/{folder_id}'. show_deleted (bool): Determines if deleted folders should be returned in the results. Returns: list: A list of Folder dicts as returned by the API. Raises: ApiExecutionError: An error has occurred when executing the API. """ if parent: paged_results = self.repository.folders.list( parent, showDeleted=show_deleted) else: query = '' if not show_deleted: query = 'lifecycleState=ACTIVE' paged_results = self.repository.folders.search(query=query) try: return api_helpers.flatten_list_results(paged_results, 'folders') except (errors.HttpError, HttpLib2Error) as e: raise api_errors.ApiExecutionError(resource_name, e)
def get_billing_info(self, project_id): """Gets the biling information for a project. Args: project_id (int): The project id for a GCP project. Returns: dict: A ProjectBillingInfo resource. https://cloud.google.com/billing/reference/rest/v1/ProjectBillingInfo { "name": string, "projectId": string, "billingAccountName": string, "billingEnabled": boolean, } Raises: ApiExecutionError: ApiExecutionError is raised if the call to the GCP ClodSQL API fails """ try: name = self.repository.projects.get_name(project_id) return self.repository.projects.get_billing_info(name) except (errors.HttpError, HttpLib2Error) as e: LOGGER.warn(api_errors.ApiExecutionError(project_id, e)) raise api_errors.ApiExecutionError('billing_info', e)
def get_bucket_iam_policy(self, bucket): """Gets the IAM policy for a bucket. Args: bucket (str): The bucket to fetch the policy for. Returns: dict: The IAM policies for the bucket. """ try: return self.repository.buckets.get_iam_policy(bucket) except (errors.HttpError, HttpLib2Error) as e: LOGGER.warn(api_errors.ApiExecutionError(bucket, e)) raise api_errors.ApiExecutionError('bucketIamPolicy', e)
def get_object_iam_policy(self, bucket, object_name): """Gets the IAM policy for an object. Args: bucket (str): The bucket to fetch the policy for. object_name (str): The object name to fetch the policy for. Returns: dict: The IAM policies for the object. """ try: return self.repository.objects.get_iam_policy(bucket, object_name) except (errors.HttpError, HttpLib2Error) as e: LOGGER.warn(api_errors.ApiExecutionError(bucket, e)) raise api_errors.ApiExecutionError('objectIamPolicy', e)
def get_service_account_iam_policy(self, name): """Get IAM policy associated with a service account. Args: name (str): The service account name to query, must be in the format projects/{PROJECT_ID}/serviceAccounts/{SERVICE_ACCOUNT_EMAIL} Returns: dict: The IAM policies for the service account. """ try: return self.repository.projects_serviceaccounts.get_iam_policy(name) except (errors.HttpError, HttpLib2Error) as e: LOGGER.warn(api_errors.ApiExecutionError(name, e)) raise api_errors.ApiExecutionError('serviceAccountIamPolicy', e)
def testHttpFailure(self, mock_run_flow, mock_storage): mock_storage.return_value = mock_store = mock.MagicMock() mock_store.get.return_value = None mock_run_flow.side_effect = httplib2.HttpLib2Error output = _GetCommandOutput('fetch', self.json_args + ['userinfo.email']) self.assertIn('Communication error creating credentials', output) self.assertEqual(1, mock_store.get.call_count) self.assertEqual(0, self.mock_test.call_count)
def _GetCredentialsVia3LO(client_info, credentials_filename=None): credential_store = _GetCredentialStore(credentials_filename, client_info['client_id'], client_info['scope']) credentials = credential_store.get() if credentials is None or credentials.invalid: for _ in range(10): # If authorization fails, we want to retry, rather # than let this cascade up and get caught elsewhere. # If users want out of the retry loop, they can ^C. try: flow = client.OAuth2WebServerFlow(**client_info) flags, _ = tools.argparser.parse_known_args( ['--noauth_local_webserver']) credentials = tools.run_flow( flow, credential_store, flags) break except (SystemExit, client.FlowExchangeError) as e: # Here SystemExit is "no credential at all", and the # FlowExchangeError is "invalid" -- usually because # you reused a token. pass except httplib2.HttpLib2Error as e: raise ValueError( 'Communication error creating credentials:' '{}'.format(e)) else: credentials = None return credentials
def next_chunk(self, num_retries=0): """Get the next chunk of the download. Args: num_retries: Integer, number of times to retry 500's with randomized exponential backoff. If all retries fail, the raised HttpError represents the last request. If zero (default), we attempt the request only once. Returns: (status, done): (MediaDownloadStatus, boolean) The value of 'done' will be True when the media has been fully downloaded. Raises: googleapiclient.errors.HttpError if the response was not a 2xx. httplib2.HttpLib2Error if a transport error has occured. """ headers = { 'range': 'bytes=%d-%d' % ( self._progress, self._progress + self._chunksize) } http = self._request.http resp, content = _retry_request( http, num_retries, 'media download', self._sleep, self._rand, self._uri, 'GET', headers=headers) if resp.status in [200, 206]: if 'content-location' in resp and resp['content-location'] != self._uri: self._uri = resp['content-location'] self._progress += len(content) self._fd.write(content) if 'content-range' in resp: content_range = resp['content-range'] length = content_range.rsplit('/', 1)[1] self._total_size = int(length) elif 'content-length' in resp: self._total_size = int(resp['content-length']) if self._progress == self._total_size: self._done = True return MediaDownloadProgress(self._progress, self._total_size), self._done else: raise HttpError(resp, content, uri=self._uri)
def execute(self, http=None, num_retries=0): """Execute the request. Args: http: httplib2.Http, an http object to be used in place of the one the HttpRequest request object was constructed with. num_retries: Integer, number of times to retry 500's with randomized exponential backoff. If all retries fail, the raised HttpError represents the last request. If zero (default), we attempt the request only once. Returns: A deserialized object model of the response body as determined by the postproc. Raises: googleapiclient.errors.HttpError if the response was not a 2xx. httplib2.HttpLib2Error if a transport error has occured. """ if http is None: http = self.http if self.resumable: body = None while body is None: _, body = self.next_chunk(http=http, num_retries=num_retries) return body # Non-resumable case. if 'content-length' not in self.headers: self.headers['content-length'] = str(self.body_size) # If the request URI is too long then turn it into a POST request. if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET': self.method = 'POST' self.headers['x-http-method-override'] = 'GET' self.headers['content-type'] = 'application/x-www-form-urlencoded' parsed = urlparse(self.uri) self.uri = urlunparse( (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None, None) ) self.body = parsed.query self.headers['content-length'] = str(len(self.body)) # Handle retries for server-side errors. resp, content = _retry_request( http, num_retries, 'request', self._sleep, self._rand, str(self.uri), method=str(self.method), body=self.body, headers=self.headers) for callback in self.response_callbacks: callback(resp) if resp.status >= 300: raise HttpError(resp, content, uri=self.uri) return self.postproc(resp, content)
def next_chunk(self, num_retries=0): """Get the next chunk of the download. Args: num_retries: Integer, number of times to retry 500's with randomized exponential backoff. If all retries fail, the raised HttpError represents the last request. If zero (default), we attempt the request only once. Returns: (status, done): (MediaDownloadStatus, boolean) The value of 'done' will be True when the media has been fully downloaded. Raises: apiclient.errors.HttpError if the response was not a 2xx. httplib2.HttpLib2Error if a transport error has occured. """ headers = { 'range': 'bytes=%d-%d' % ( self._progress, self._progress + self._chunksize) } http = self._request.http for retry_num in xrange(num_retries + 1): if retry_num > 0: self._sleep(self._rand() * 2**retry_num) logging.warning( 'Retry #%d for media download: GET %s, following status: %d' % (retry_num, self._uri, resp.status)) resp, content = http.request(self._uri, headers=headers) if resp.status < 500: break if resp.status in [200, 206]: if 'content-location' in resp and resp['content-location'] != self._uri: self._uri = resp['content-location'] self._progress += len(content) self._fd.write(content) if 'content-range' in resp: content_range = resp['content-range'] length = content_range.rsplit('/', 1)[1] self._total_size = int(length) if self._progress == self._total_size: self._done = True return MediaDownloadProgress(self._progress, self._total_size), self._done else: raise HttpError(resp, content, uri=self._uri)
def _execute(self, http, order, requests): """Serialize batch request, send to server, process response. Args: http: httplib2.Http, an http object to be used to make the request with. order: list, list of request ids in the order they were added to the batch. request: list, list of request objects to send. Raises: httplib2.HttpLib2Error if a transport error has occured. apiclient.errors.BatchError if the response is the wrong format. """ message = MIMEMultipart('mixed') # Message should not write out it's own headers. setattr(message, '_write_headers', lambda self: None) # Add all the individual requests. for request_id in order: request = requests[request_id] msg = MIMENonMultipart('application', 'http') msg['Content-Transfer-Encoding'] = 'binary' msg['Content-ID'] = self._id_to_header(request_id) body = self._serialize_request(request) msg.set_payload(body) message.attach(msg) body = message.as_string() headers = {} headers['content-type'] = ('multipart/mixed; ' 'boundary="%s"') % message.get_boundary() resp, content = http.request(self._batch_uri, method='POST', body=body, headers=headers) if resp.status >= 300: raise HttpError(resp, content, uri=self._batch_uri) # Now break out the individual responses and store each one. boundary, _ = content.split(None, 1) # Prepend with a content-type header so FeedParser can handle it. header = 'content-type: %s\r\n\r\n' % resp['content-type'] for_parser = header + content parser = FeedParser() parser.feed(for_parser) mime_response = parser.close() if not mime_response.is_multipart(): raise BatchError("Response not in multipart/mixed format.", resp=resp, content=content) for part in mime_response.get_payload(): request_id = self._header_to_id(part['Content-ID']) response, content = self._deserialize_response(part.get_payload()) self._responses[request_id] = (response, content)
def doGAMCheckForUpdates(forceCheck=False): import calendar def _gamLatestVersionNotAvailable(): if forceCheck: systemErrorExit(NETWORK_ERROR_RC, Msg.GAM_LATEST_VERSION_NOT_AVAILABLE) current_version = __version__ now_time = calendar.timegm(time.gmtime()) if forceCheck: check_url = GAM_ALL_RELEASES # includes pre-releases else: last_check_time_str = readFile(GM.Globals[GM.LAST_UPDATE_CHECK_TXT], continueOnError=True, displayError=False) last_check_time = int(last_check_time_str) if last_check_time_str and last_check_time_str.isdigit() else 0 if last_check_time > now_time-604800: return check_url = GAM_LATEST_RELEASE # latest full release try: _, c = httplib2.Http(disable_ssl_certificate_validation=GC.Values[GC.NO_VERIFY_SSL]).request(check_url, u'GET', headers={u'Accept': u'application/vnd.github.v3.text+json'}) try: release_data = json.loads(c) except ValueError: _gamLatestVersionNotAvailable() return if isinstance(release_data, list): release_data = release_data[0] # only care about latest release if not isinstance(release_data, dict) or u'tag_name' not in release_data: _gamLatestVersionNotAvailable() return latest_version = release_data[u'tag_name'] if latest_version[0].lower() == u'v': latest_version = latest_version[1:] if forceCheck or (latest_version > current_version): printKeyValueList([u'Version Check', None]) Ind.Increment() printKeyValueList([u'Current', current_version]) printKeyValueList([u' Latest', latest_version]) Ind.Decrement() if latest_version <= current_version: writeFile(GM.Globals[GM.LAST_UPDATE_CHECK_TXT], str(now_time), continueOnError=True, displayError=forceCheck) return announcement = release_data.get(u'body_text', u'No details about this release') writeStderr(u'\nGAM %s release notes:\n\n' % latest_version) writeStderr(announcement) try: printLine(Msg.HIT_CONTROL_C_TO_UPDATE) time.sleep(15) except KeyboardInterrupt: import webbrowser webbrowser.open(release_data[u'html_url']) printLine(Msg.GAM_EXITING_FOR_UPDATE) sys.exit(0) writeFile(GM.Globals[GM.LAST_UPDATE_CHECK_TXT], str(now_time), continueOnError=True, displayError=forceCheck) return except (httplib2.HttpLib2Error, httplib2.ServerNotFoundError, httplib2.CertificateValidationUnsupported): return
def next_chunk(self, num_retries=0): """Get the next chunk of the download. Args: num_retries: Integer, number of times to retry with randomized exponential backoff. If all retries fail, the raised HttpError represents the last request. If zero (default), we attempt the request only once. Returns: (status, done): (MediaDownloadStatus, boolean) The value of 'done' will be True when the media has been fully downloaded. Raises: googleapiclient.errors.HttpError if the response was not a 2xx. httplib2.HttpLib2Error if a transport error has occured. """ headers = { 'range': 'bytes=%d-%d' % ( self._progress, self._progress + self._chunksize) } http = self._request.http resp, content = _retry_request( http, num_retries, 'media download', self._sleep, self._rand, self._uri, 'GET', headers=headers) if resp.status in [200, 206]: if 'content-location' in resp and resp['content-location'] != self._uri: self._uri = resp['content-location'] self._progress += len(content) self._fd.write(content) if 'content-range' in resp: content_range = resp['content-range'] length = content_range.rsplit('/', 1)[1] self._total_size = int(length) elif 'content-length' in resp: self._total_size = int(resp['content-length']) if self._progress == self._total_size: self._done = True return MediaDownloadProgress(self._progress, self._total_size), self._done else: raise HttpError(resp, content, uri=self._uri)
def execute(self, http=None, num_retries=0): """Execute the request. Args: http: httplib2.Http, an http object to be used in place of the one the HttpRequest request object was constructed with. num_retries: Integer, number of times to retry with randomized exponential backoff. If all retries fail, the raised HttpError represents the last request. If zero (default), we attempt the request only once. Returns: A deserialized object model of the response body as determined by the postproc. Raises: googleapiclient.errors.HttpError if the response was not a 2xx. httplib2.HttpLib2Error if a transport error has occured. """ if http is None: http = self.http if self.resumable: body = None while body is None: _, body = self.next_chunk(http=http, num_retries=num_retries) return body # Non-resumable case. if 'content-length' not in self.headers: self.headers['content-length'] = str(self.body_size) # If the request URI is too long then turn it into a POST request. # Assume that a GET request never contains a request body. if len(self.uri) > MAX_URI_LENGTH and self.method == 'GET': self.method = 'POST' self.headers['x-http-method-override'] = 'GET' self.headers['content-type'] = 'application/x-www-form-urlencoded' parsed = urlparse(self.uri) self.uri = urlunparse( (parsed.scheme, parsed.netloc, parsed.path, parsed.params, None, None) ) self.body = parsed.query self.headers['content-length'] = str(len(self.body)) # Handle retries for server-side errors. resp, content = _retry_request( http, num_retries, 'request', self._sleep, self._rand, str(self.uri), method=str(self.method), body=self.body, headers=self.headers) for callback in self.response_callbacks: callback(resp) if resp.status >= 300: raise HttpError(resp, content, uri=self.uri) return self.postproc(resp, content)