我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用google.appengine.api.urlfetch.GET。
def getAddress(longitude, latitude): gsp_key = "gps-" + str(longitude) + "," + str(latitude) resultData = memcache.get(key=gsp_key) if resultData == None: url = "https://maps.googleapis.com/maps/api/geocode/json?language=ja&sensor=false&key=" + const.GOOGLE_API_KEY + "&latlng=" + str( longitude) + "," + str(latitude) logging.debug(url) result = urlfetch.fetch( url=url, method=urlfetch.GET, headers={ } ) if result.status_code == 200: logging.debug(result.content) else: logging.debug(result.content) jsonstr = result.content jsonobj = json.loads(jsonstr) if len(jsonobj["results"]) > 0: memcache.set(key=gsp_key, value=jsonobj, time=3600) resultData = jsonobj; else: logging.debug(resultData) return resultData["results"]
def getUserProfine(mid): # midstr= ','.join(mids) url = "https://api.line.me/v2/bot/profile/"+mid result = urlfetch.fetch( url=url, method=urlfetch.GET, headers={ 'Authorization': 'Bearer '+const.ChannelAccessToken } ) if result.status_code == 200: logging.debug(result.content) else: logging.debug(result.content) jsonstr = result.content jsonobj = json.loads(jsonstr) return jsonobj
def get(self): auth_token, _ = app_identity.get_access_token( 'https://www.googleapis.com/auth/cloud-platform') logging.info( 'Using token {} to represent identity {}'.format( auth_token, app_identity.get_service_account_name())) response = urlfetch.fetch( 'https://www.googleapis.com/storage/v1/b?project={}'.format( app_identity.get_application_id()), method=urlfetch.GET, headers={ 'Authorization': 'Bearer {}'.format(auth_token) } ) if response.status_code != 200: raise Exception( 'Call failed. Status code {}. Body {}'.format( response.status_code, response.content)) result = json.loads(response.content) self.response.headers['Content-Type'] = 'application/json' self.response.write(json.dumps(result, indent=2))
def list_bucket_files(bucket_name, prefix, max_keys=1000): """Returns a listing of of a bucket that matches the given prefix.""" scope = config.GoogleApiScope('devstorage.read_only') bucket_url = config.GsBucketURL(bucket_name) url = bucket_url + '?' query = [('max-keys', max_keys)] if prefix: query.append(('prefix', prefix)) url += urllib.urlencode(query) auth_token, _ = app_identity.get_access_token(scope) result = urlfetch.fetch(url, method=urlfetch.GET, headers={ 'Authorization': 'OAuth %s' % auth_token, 'x-goog-api-version': '2'}) if result and result.status_code == 200: doc = xml.dom.minidom.parseString(result.content) return [node.childNodes[0].data for node in doc.getElementsByTagName('Key')] raise BackupValidationError('Request to Google Cloud Storage failed')
def get_gs_object(bucket_name, path): """Returns a listing of of a bucket that matches the given prefix.""" scope = config.GoogleApiScope('devstorage.read_only') bucket_url = config.GsBucketURL(bucket_name) url = bucket_url + path auth_token, _ = app_identity.get_access_token(scope) result = urlfetch.fetch(url, method=urlfetch.GET, headers={ 'Authorization': 'OAuth %s' % auth_token, 'x-goog-api-version': '2'}) if result and result.status_code == 200: return result.content if result and result.status_code == 403: raise BackupValidationError( 'Requested path %s is not accessible/access denied' % url) if result and result.status_code == 404: raise BackupValidationError('Requested path %s was not found' % url) raise BackupValidationError('Error encountered accessing requested path %s' % url)
def authenticated_get( url, customer_key=CUSTOMER_KEY, customer_secret=CUSTOMER_SECRET): """Performs an authenticated GET to the given URL, returns the response's content. See https://dev.twitter.com/oauth/application-only """ token = get_access_token() response = urlfetch.fetch( url, method=urlfetch.GET, headers={'Authorization': 'Bearer ' + token}) if response.status_code == urlfetch.httplib.OK: return response.content elif response.status_code == urlfetch.httplib.UNAUTHORIZED: return response.content # User is probably suspended else: message = 'Url ' + url + ' returned ' + response.content logging.warning(message) raise urlfetch.httplib.HTTPException(message)
def urlread(url, data=None, headers=None): if data is not None: if headers is None: headers = {"Content-type": "application/x-www-form-urlencoded"} method = urlfetch.POST else: if headers is None: headers = {} method = urlfetch.GET result = urlfetch.fetch(url, method=method, payload=data, headers=headers) if result.status_code == 200: return result.content else: raise urllib2.URLError("fetch error url=%s, code=%d" % (url, result.status_code))
def fetch(url, data=None, headers=None, cookie=Cookie.SimpleCookie(), user_agent='Mozilla/5.0'): headers = headers or {} if data is not None: data = urllib.urlencode(data) if user_agent: headers['User-agent'] = user_agent headers['Cookie'] = ' '.join( ['%s=%s;' % (c.key, c.value) for c in cookie.values()]) try: from google.appengine.api import urlfetch except ImportError: req = urllib2.Request(url, data, headers) html = urllib2.urlopen(req).read() else: method = ((data is None) and urlfetch.GET) or urlfetch.POST while url is not None: response = urlfetch.fetch(url=url, payload=data, method=method, headers=headers, allow_truncated=False, follow_redirects=False, deadline=10) # next request will be a get, so no need to send the data again data = None method = urlfetch.GET # load cookies from the response cookie.load(response.headers.get('set-cookie', '')) url = response.headers.get('location') html = response.content return html
def obtain_bearer_token(host, path): """Given a bearer token, send a GET request to the API. Args: host (str): The domain host of the API. path (str): The path of the API after the domain. params (dict): An optional set of query parameters in the request. Returns: str: OAuth bearer token, obtained using client_id and client_secret. Raises: HTTPError: An error occurs from the HTTP request. """ url = '{0}{1}'.format(host, quote(path.encode('utf8'))) data = urlencode({ 'client_id': CLIENT_ID, 'client_secret': CLIENT_SECRET, 'grant_type': GRANT_TYPE, }) print('@@@@@@@@@' + CLIENT_ID) headers = { 'content-type': 'application/x-www-form-urlencoded', } result = urlfetch.fetch( url=url, payload=data, method=urlfetch.POST, headers=headers) print('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@' + result.content) return "BIO6_LpbIcFkeKDB9SsSAONt3lE2IwrdiTxUeq-Ag1MKOzSc4m-8QyPjdV6WmI27ySuLEKv7czHoJmJjFHrCyjfgxucTvKPpJG9JCsg_08KCz4J-WrEfeaiACoJ2WXYx"
def request(host, path, bearer_token, params): """Given a bearer token, send a GET request to the API. Args: host (str): The domain host of the API. path (str): The path of the API after the domain. bearer_token (str): OAuth bearer token, obtained using client_id and client_secret. params (dict): An optional set of query parameters in the request. Returns: dict: The JSON response from the request. Raises: HTTPError: An error occurs from the HTTP request. """ url = '{0}{1}?{2}'.format( host, quote(path.encode('utf8')), urllib.urlencode(params)) headers = { 'Authorization': 'Bearer %s' % bearer_token, } logging.info(u'Querying {0} ...'.format(url)) result = urlfetch.fetch( url=url, method=urlfetch.GET, headers=headers) logging.info('@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@' + result.content) return json.loads(result.content)
def fetch(url, data=None, headers=None, cookie=Cookie.SimpleCookie(), user_agent='Mozilla/5.0'): headers = headers or {} if not data is None: data = urllib.urlencode(data) if user_agent: headers['User-agent'] = user_agent headers['Cookie'] = ' '.join( ['%s=%s;' % (c.key, c.value) for c in cookie.values()]) try: from google.appengine.api import urlfetch except ImportError: req = urllib2.Request(url, data, headers) html = urllib2.urlopen(req).read() else: method = ((data is None) and urlfetch.GET) or urlfetch.POST while url is not None: response = urlfetch.fetch(url=url, payload=data, method=method, headers=headers, allow_truncated=False, follow_redirects=False, deadline=10) # next request will be a get, so no need to send the data again data = None method = urlfetch.GET # load cookies from the response cookie.load(response.headers.get('set-cookie', '')) url = response.headers.get('location') html = response.content return html
def getImageDataExternal(self, width, heigh): logging.debug(self.request.path) picture_key = self.request.path[11:75] encodeUrl = self.request.path[76:] originalUrl = urllib.unquote(encodeUrl) logging.debug(picture_key + " " + encodeUrl + " " + originalUrl) if originalUrl == None: logging.debug("URL Failure(" + picture_key + ")") self.response.status = 301 self.response.headers['Location'] = "https://ifttt-line.appspot.com/images/preview_image.jpg?" + str( uuid.uuid4()) elif picture_key == getHash(originalUrl): logging.debug("Key is correct! " + picture_key + ")") result = urlfetch.fetch( url=originalUrl, method=urlfetch.GET, headers={ } ) if result.status_code == 200: logging.debug(result.content) photo_data = result.content thumb = image_Transform(photo_data, width, heigh) contentLegth = len(thumb) self.response.headers['Content-Type'] = result.headers['Content-Type'] if result.headers.has_key("content-disposition"): self.response.headers['content-disposition'] = result.headers['content-disposition'] self.response.headers['date'] = result.headers['date'] self.response.headers['content-length'] = contentLegth self.response.out.write(thumb) else: self.response.status = 301 self.response.headers['Location'] = "https://ifttt-line.appspot.com/images/preview_image.jpg?" + str( uuid.uuid4()) logging.debug("Image Load Failure(" + originalUrl + ")") else: logging.debug("Key Failure(" + picture_key + ")") self.response.status = 301 self.response.headers['Location'] = "https://ifttt-line.appspot.com/images/preview_image.jpg?" + str( uuid.uuid4())
def returnContent(self): logging.debug(self.request.path) picture_key= self.request.path[9:73] originalid= self.request.path[74:] logging.debug(picture_key+" "+ " "+originalid) if originalid == None: logging.debug("URL Failure("+picture_key+")") self.response.status = 301 self.response.headers['Location'] ="https://ifttt-line.appspot.com/images/preview_image.jpg?"+str(uuid.uuid4()) elif picture_key == utility.getHash(originalid): url = "https://api.line.me/v2/bot/message/"+originalid+"/content" result = urlfetch.fetch( url=url, method=urlfetch.GET, headers={ 'Authorization': 'Bearer '+const.ChannelAccessToken } ) logging.debug(result.headers) if result.status_code == 200: logging.debug(result.content) self.response.headers['Content-Type'] = result.headers['Content-Type'] if result.headers.has_key("content-disposition"): self.response.headers['content-disposition'] = result.headers['content-disposition'] self.response.headers['date'] = result.headers['date'] self.response.headers['content-length'] = result.headers['content-length'] self.response.out.write(result.content) else: logging.debug("Content Load Error") logging.debug(result.content) self.response.status = 301 self.response.headers['Location'] ="https://ifttt-line.appspot.com/images/preview_image.jpg?"+str(uuid.uuid4()) else: logging.debug("Key Failure")
def __init__(self, host, port=None, strict=False, timeout=None): from google.appengine.api import urlfetch self._fetch = urlfetch.fetch self._method_map = { 'GET': urlfetch.GET, 'POST': urlfetch.POST, 'HEAD': urlfetch.HEAD, 'PUT': urlfetch.PUT, 'DELETE': urlfetch.DELETE, 'PATCH': urlfetch.PATCH, } self.host = host self.port = port self._method = self._url = None self._body = '' self.headers = [] if not isinstance(timeout, (float, int, long)): timeout = None self.timeout = timeout
def _get_html_page(self): """Return the name of the HTML page for HTTP/GET requests.""" raise NotImplementedError
def _PopulateX509(self): with self._x509_init_lock: if self._x509 is None: url = ('https://www.googleapis.com/service_accounts/v1/metadata/x509/%s' % urllib.unquote_plus(self._credentials.service_account_email)) response = urlfetch.fetch( url=url, validate_certificate=True, method=urlfetch.GET) if response.status_code != 200: raise apiproxy_errors.ApplicationError( app_identity_service_pb.AppIdentityServiceError.UNKNOWN_ERROR, 'Unable to load X509 cert: %s Response code: %i, Content: %s' % ( url, response.status_code, response.content)) message = 'dummy' _, signature = self._credentials.sign_blob(message) for signing_key, x509 in json.loads(response.content).items(): der = rsa.pem.load_pem(x509, 'CERTIFICATE') asn1_cert, _ = decoder.decode(der, asn1Spec=Certificate()) key_bitstring = ( asn1_cert['tbsCertificate'] ['subjectPublicKeyInfo'] ['subjectPublicKey']) key_bytearray = BitStringToByteString(key_bitstring) public_key = rsa.PublicKey.load_pkcs1(key_bytearray, 'DER') try: if rsa.pkcs1.verify(message, signature, public_key): self._x509 = x509 self._signing_key = signing_key return except rsa.pkcs1.VerificationError: pass raise apiproxy_errors.ApplicationError( app_identity_service_pb.AppIdentityServiceError.UNKNOWN_ERROR, 'Unable to find matching X509 cert for private key: %s' % url)
def __init__(self, host, port=None, strict=None, timeout=_GLOBAL_DEFAULT_TIMEOUT, source_address=None, context=None): # net.proto.ProcotolBuffer relies on httplib so importing urlfetch at the # module level causes a failure on prod. That means the import needs to be # lazy. from google.appengine.api import urlfetch self._fetch = urlfetch.fetch self._method_map = { 'GET': urlfetch.GET, 'POST': urlfetch.POST, 'HEAD': urlfetch.HEAD, 'PUT': urlfetch.PUT, 'DELETE': urlfetch.DELETE, 'PATCH': urlfetch.PATCH, } self.host = host self.port = port # With urllib2 in Python 2.6, an object can be passed here. # The default is set to socket.GLOBAL_DEFAULT_TIMEOUT which is an object. # We only accept float, int or long values, otherwise it can be # silently ignored. if not isinstance(timeout, (float, int, long)): timeout = None self.timeout = timeout # Both 'strict' and 'source_address' are ignored. self._method = self._url = None self._body = '' self.headers = []
def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): """Send a request to the server. `method' specifies an HTTP request method, e.g. 'GET'. `url' specifies the object being requested, e.g. '/index.html'. `skip_host' if True does not add automatically a 'Host:' header `skip_accept_encoding' if True does not add automatically an 'Accept-Encoding:' header App Engine Note: `skip_host' and `skip_accept_encoding' are not honored by the urlfetch service. """ self._method = method self._url = url
def fetch(self): from secrets import NYT_API_KEY params = urllib.urlencode({ 'api-key': NYT_API_KEY, 'fq': "section_name:(\"World\" \"U.S.\")", 'begin_date': datetime.strftime(self.date_dt, "%Y%m%d"), 'end_date': datetime.strftime(self.next_date_dt, "%Y%m%d") }) url = "https://api.nytimes.com/svc/search/v2/articlesearch.json?" + params logging.debug(url) response = urlfetch.fetch(url, method=urlfetch.GET) items = [] IMAGE_BASE = "http://www.nytimes.com/" MIN_WIDTH = 300 if response.status_code == 200: j_response = json.loads(response.content) results = j_response.get('response', {}).get('docs', []) for news in results: cat = news.get('news_desk') multimedia = news.get('multimedia') image = None for mm in multimedia: w = mm.get('width') if w > MIN_WIDTH: image = IMAGE_BASE + mm.get('url') item = Item( svc=SERVICE.NYT_NEWS, title=news.get( 'headline', {}).get('main'), image=image, details=news.get('snippet'), link=news.get('web_url'), id=news.get('_id'), type=SERVICE.NEWS) items.append(item.json()) return items
def get_routes( cls ): routes = [ webapp2.Route( cls.AUTHREQUEST, handler = 'enki.handlersoauth.HandlerOAuthGoogle:auth_request', methods = [ 'GET' ] ), webapp2.Route( cls.AUTHCALLBACK, handler = 'enki.handlersoauth.HandlerOAuthGoogle:auth_callback', methods = [ 'GET' ] ), ] return routes
def get_routes( cls ): routes = [ webapp2.Route( cls.AUTHREQUEST, handler = 'enki.handlersoauth.HandlerOAuthFacebook:auth_request', methods = [ 'GET' ] ), webapp2.Route( cls.AUTHCALLBACK, handler = 'enki.handlersoauth.HandlerOAuthFacebook:auth_callback', methods = [ 'GET' ] ), ] return routes
def get_routes( cls ): routes = [ webapp2.Route( cls.AUTHREQUEST, handler = 'enki.handlersoauth.HandlerOAuthGithub:auth_request', methods = [ 'GET' ] ), webapp2.Route( cls.AUTHCALLBACK, handler = 'enki.handlersoauth.HandlerOAuthGithub:auth_callback', methods = [ 'GET' ] ), ] return routes
def get_routes( cls ): routes = [ webapp2.Route( cls.AUTHREQUEST, handler = 'enki.handlersoauth.HandlerOAuthSteam:auth_request', methods = [ 'GET' ] ), webapp2.Route( cls.AUTHCALLBACK, handler = 'enki.handlersoauth.HandlerOAuthSteam:auth_callback', methods = [ 'GET' ] ), ] return routes
def get_routes( cls ): routes = [ webapp2.Route( cls.AUTHREQUEST, handler = 'enki.handlersoauth.HandlerOAuthTwitter:auth_request', methods = [ 'GET' ] ), webapp2.Route( cls.AUTHCALLBACK, handler = 'enki.handlersoauth.HandlerOAuthTwitter:auth_callback', methods = [ 'GET' ] ), ] return routes
def get_profile(screen_name='', twitter_id=''): """Returns a JSON text from the Twitter GET users/show API. See https://dev.twitter.com/rest/reference/get/users/show""" if screen_name != '': profile = get_profile_by_screen_name(screen_name) elif twitter_id != '': profile = get_profile_by_twitter_id(twitter_id) return profile
def get_profile_by_screen_name(screen_name): """Returns a JSON text from the Twitter GET users/show API. See https://dev.twitter.com/rest/reference/get/users/show""" return authenticated_get( 'https://api.twitter.com/1.1/users/show.json?screen_name={}'.format( screen_name))
def get_profile_by_twitter_id(twitter_id): """Returns a JSON text from the Twitter GET users/show API. See https://dev.twitter.com/rest/reference/get/users/show""" return authenticated_get( 'https://api.twitter.com/1.1/users/show.json?id={}'.format( twitter_id))
def get_timeline_by_screen_name(screen_name): """Returns a dict from the Twitter GET statuses/user_timeline API. See https://dev.twitter.com/rest/reference/get/statuses/user_timeline""" return authenticated_get( 'https://api.twitter.com/1.1/statuses/user_timeline.json?count=90&' 'screen_name={}'.format( screen_name))
def get_url(self, page, **args): """ Returns one of the Facebook URLs (www.facebook.com/SOMEPAGE.php). Named arguments are passed as GET query string parameters. """ return 'http://www.facebook.com/%s.php?%s' % (page, urllib.urlencode(args))
def update_article(access_token, item_id, action='favorite'): ''' Favorite or archive (mark read) an article ''' actions = json.dumps( [ { "action": action, "item_id": item_id, "time": str(int(tools.unixtime(ms=False))) } ] ) data = urllib.urlencode({ 'access_token': access_token, 'consumer_key': POCKET_CONSUMER_KEY, 'actions': actions }) logging.debug(data) res = urlfetch.fetch( url=MODIFY_ENDPOINT + "?" + data, method=urlfetch.GET, validate_certificate=True) logging.debug(res.content) if res.status_code == 200: result = json.loads(res.content) ok = result.get('status', 0) == 1 return ok else: logging.debug(res.headers) return False
def request(self, operation, url, data=None, headers=None): """Performs an HTTP call to the server, supports GET, POST, PUT, and DELETE. Usage example, perform and HTTP GET on http://www.google.com/: import atom.http client = atom.http.HttpClient() http_response = client.request('GET', 'http://www.google.com/') Args: operation: str The HTTP operation to be performed. This is usually one of 'GET', 'POST', 'PUT', or 'DELETE' data: filestream, list of parts, or other object which can be converted to a string. Should be set to None when performing a GET or DELETE. If data is a file-like object which can be read, this method will read a chunk of 100K bytes at a time and send them. If the data is a list of parts to be sent, each part will be evaluated and sent. url: The full URL to which the request should be sent. Can be a string or atom.url.Url. headers: dict of strings. HTTP headers which should be sent in the request. """ all_headers = self.headers.copy() if headers: all_headers.update(headers) # Construct the full payload. # Assume that data is None or a string. data_str = data if data: if isinstance(data, list): # If data is a list of different objects, convert them all to strings # and join them together. converted_parts = [__ConvertDataPart(x) for x in data] data_str = ''.join(converted_parts) else: data_str = __ConvertDataPart(data) # If the list of headers does not include a Content-Length, attempt to # calculate it based on the data object. if data and 'Content-Length' not in all_headers: all_headers['Content-Length'] = len(data_str) # Set the content type to the default value if none was set. if 'Content-Type' not in all_headers: all_headers['Content-Type'] = 'application/atom+xml' # Lookup the urlfetch operation which corresponds to the desired HTTP verb. if operation == 'GET': method = urlfetch.GET elif operation == 'POST': method = urlfetch.POST elif operation == 'PUT': method = urlfetch.PUT elif operation == 'DELETE': method = urlfetch.DELETE else: method = None return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str, method=method, headers=all_headers))
def search(self, tags=None, query=None, cloud=True, preview=True, limitby=(0, 100), orderby=None): if not self.can_search(): return self.not_authorized() request = current.request content = CAT() if tags is None and query is None: form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(), value=request.vars.q), INPUT(_type="submit", _value=current.T('Search')), _method='GET') content.append(DIV(form, _class='w2p_wiki_form')) if request.vars.q: tags = [v.strip() for v in request.vars.q.split(',')] tags = [v.lower() for v in tags if v] if tags or query is not None: db = self.auth.db count = db.wiki_tag.wiki_page.count() fields = [db.wiki_page.id, db.wiki_page.slug, db.wiki_page.title, db.wiki_page.tags, db.wiki_page.can_read, db.wiki_page.can_edit] if preview: fields.append(db.wiki_page.body) if query is None: query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\ (db.wiki_tag.name.belongs(tags)) query = query | db.wiki_page.title.contains(request.vars.q) if self.settings.restrict_search and not self.can_manage(): query = query & (db.wiki_page.created_by == self.auth.user_id) pages = db(query).select(count, *fields, **dict(orderby=orderby or ~count, groupby=reduce(lambda a, b: a | b, fields), distinct=True, limitby=limitby)) if request.extension in ('html', 'load'): if not pages: content.append(DIV(current.T("No results"), _class='w2p_wiki_form')) def link(t): return A(t, _href=URL(args='_search', vars=dict(q=t))) items = [DIV(H3(A(p.wiki_page.title, _href=URL( args=p.wiki_page.slug))), MARKMIN(self.first_paragraph(p.wiki_page)) if preview else '', DIV(_class='w2p_wiki_tags', *[link(t.strip()) for t in p.wiki_page.tags or [] if t.strip()]), _class='w2p_wiki_search_item') for p in pages] content.append(DIV(_class='w2p_wiki_pages', *items)) else: cloud = False content = [p.wiki_page.as_dict() for p in pages] elif cloud: content.append(self.cloud()['content']) if request.extension == 'load': return content return dict(content=content)
def search(self, tags=None, query=None, cloud=True, preview=True, limitby=(0, 100), orderby=None): if not self.can_search(): return self.not_authorized() request = current.request content = CAT() if tags is None and query is None: form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(), value=request.vars.q), INPUT(_type="submit", _value=current.T('Search')), _method='GET') content.append(DIV(form, _class='w2p_wiki_form')) if request.vars.q: tags = [v.strip() for v in request.vars.q.split(',')] tags = [v.lower() for v in tags if v] if tags or not query is None: db = self.auth.db count = db.wiki_tag.wiki_page.count() fields = [db.wiki_page.id, db.wiki_page.slug, db.wiki_page.title, db.wiki_page.tags, db.wiki_page.can_read, db.wiki_page.can_edit] if preview: fields.append(db.wiki_page.body) if query is None: query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\ (db.wiki_tag.name.belongs(tags)) query = query | db.wiki_page.title.contains(request.vars.q) if self.settings.restrict_search and not self.manage(): query = query & (db.wiki_page.created_by == self.auth.user_id) pages = db(query).select(count, *fields, **dict(orderby=orderby or ~count, groupby=reduce(lambda a, b: a | b, fields), distinct=True, limitby=limitby)) if request.extension in ('html', 'load'): if not pages: content.append(DIV(current.T("No results"), _class='w2p_wiki_form')) def link(t): return A(t, _href=URL(args='_search', vars=dict(q=t))) items = [DIV(H3(A(p.wiki_page.title, _href=URL( args=p.wiki_page.slug))), MARKMIN(self.first_paragraph(p.wiki_page)) if preview else '', DIV(_class='w2p_wiki_tags', *[link(t.strip()) for t in p.wiki_page.tags or [] if t.strip()]), _class='w2p_wiki_search_item') for p in pages] content.append(DIV(_class='w2p_wiki_pages', *items)) else: cloud = False content = [p.wiki_page.as_dict() for p in pages] elif cloud: content.append(self.cloud()['content']) if request.extension == 'load': return content return dict(content=content)
def search(self, tags=None, query=None, cloud=True, preview=True, limitby=(0, 100), orderby=None): if not self.can_search(): return self.not_authorized() request = current.request content = CAT() if tags is None and query is None: form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(), value=request.vars.q), INPUT(_type="submit", _value=current.T('Search')), _method='GET') content.append(DIV(form, _class='w2p_wiki_form')) if request.vars.q: tags = [v.strip() for v in request.vars.q.split(',')] tags = [v.lower() for v in tags if v] if tags or query is not None: db = self.auth.db count = db.wiki_tag.wiki_page.count() fields = [db.wiki_page.id, db.wiki_page.slug, db.wiki_page.title, db.wiki_page.tags, db.wiki_page.can_read, db.wiki_page.can_edit] if preview: fields.append(db.wiki_page.body) if query is None: query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\ (db.wiki_tag.name.belongs(tags)) query = query | db.wiki_page.title.contains(request.vars.q) if self.settings.restrict_search and not self.manage(): query = query & (db.wiki_page.created_by == self.auth.user_id) pages = db(query).select(count, *fields, **dict(orderby=orderby or ~count, groupby=reduce(lambda a, b: a | b, fields), distinct=True, limitby=limitby)) if request.extension in ('html', 'load'): if not pages: content.append(DIV(current.T("No results"), _class='w2p_wiki_form')) def link(t): return A(t, _href=URL(args='_search', vars=dict(q=t))) items = [DIV(H3(A(p.wiki_page.title, _href=URL( args=p.wiki_page.slug))), MARKMIN(self.first_paragraph(p.wiki_page)) if preview else '', DIV(_class='w2p_wiki_tags', *[link(t.strip()) for t in p.wiki_page.tags or [] if t.strip()]), _class='w2p_wiki_search_item') for p in pages] content.append(DIV(_class='w2p_wiki_pages', *items)) else: cloud = False content = [p.wiki_page.as_dict() for p in pages] elif cloud: content.append(self.cloud()['content']) if request.extension == 'load': return content return dict(content=content)
def auth_user(fn): """ Decorator to force user to be logged in with GAE """ @functools.wraps(fn) def _wrapped(request, *args, **kwargs): temp_request = request bearer = request.META['HTTP_AUTHORIZATION'] url = "https://www.googleapis.com/userinfo/v2/me" result = urlfetch.fetch(url=url, method=urlfetch.GET, headers={"Authorization" : bearer}) contents = json.loads(result.content) gae_user = users.get_current_user() is_admin = users.is_current_user_admin() User = get_user_model() django_user = None try: logging.debug("Getting django user") django_user = User.objects.get( email=contents['email']) except User.DoesNotExist: logging.info("User does not exist in Montage. Checking pending users") try: pending_user = PendingUser.objects.get( email=contents['email']) except PendingUser.DoesNotExist: logging.info("No pending user record for this email") user, created = get_user_model().objects.get_or_create( email=email, defaults={ 'username': email.split('@')[0], 'is_active': True } ) return user else: logging.info("Pending user record found. Activating user.") django_user = activate_pending_user( pending_user, gae_user, is_admin) except AttributeError: return HttpResponseForbidden() else: logging.info("User found. Updating gaia_id and superuser status") request = temp_request # update_user(django_user, is_admin) if django_user: request.user = django_user else: return HttpResponseForbidden() return fn(request, *args, **kwargs) return _wrapped
def get_remote_app_id(remote_url, extra_headers=None): """Get the app_id from the remote_api endpoint. This also has the side effect of verifying that it is a remote_api endpoint. Args: remote_url: The url to the remote_api handler. extra_headers: Headers to send (for authentication). Returns: app_id: The app_id of the target app. Raises: FetchFailed: Urlfetch call failed. ConfigurationError: URLfetch succeeded but results were invalid. """ rtok = str(random.random())[2:] url = remote_url + '?rtok=' + rtok if not extra_headers: extra_headers = {} if 'X-appcfg-api-version' not in extra_headers: extra_headers['X-appcfg-api-version'] = '1' try: urlfetch_response = urlfetch.fetch(url, None, urlfetch.GET, extra_headers, follow_redirects=False, deadline=10) except Exception, e: logging.exception('Fetch failed to %s', remote_url) raise FetchFailed('Fetch to %s failed: %r' % (remote_url, e)) if urlfetch_response.status_code != 200: logging.error('Fetch failed to %s; Status %s; body %s', remote_url, urlfetch_response.status_code, urlfetch_response.content) raise FetchFailed('Fetch to %s failed with status %s' % (remote_url, urlfetch_response.status_code)) response = urlfetch_response.content if not response.startswith('{'): logging.info('Response unparasable: %s', response) raise ConfigurationError( 'Invalid response received from server: %s' % response) app_info = yaml.load(response) if not app_info or 'rtok' not in app_info or 'app_id' not in app_info: logging.info('Response unparsable: %s', response) raise ConfigurationError('Error parsing app_id lookup response') if str(app_info['rtok']) != rtok: logging.info('Response invalid token (expected %s): %s', rtok, response) raise ConfigurationError('Token validation failed during app_id lookup. ' '(sent %s, got %s)' % (repr(rtok), repr(app_info['rtok']))) return app_info['app_id']
def auth_callback_provider( self ): # STEP 3 oauth_verifier = self.request.get( 'oauth_verifier' ) params = [( 'oauth_consumer_key' , settings.secrets.CLIENT_ID_TWITTER ), ( 'oauth_nonce' , webapp2_extras.security.generate_random_string( length = 42, pool = webapp2_extras.security.ALPHANUMERIC ).encode( 'utf-8' )), ( 'oauth_signature_method' , "HMAC-SHA1" ), ( 'oauth_timestamp' , str( int( time.time()))), ( 'oauth_token', self.session.get( 'twitter_oauth_token' )), ( 'oauth_version' , "1.0" )] normalised_url = 'https://api.twitter.com/oauth/access_token/' oauth_signature = self.auth_sign( normalised_url, params, self.session.get( 'twitter_oauth_token_secret') ) params.append(( 'oauth_signature', oauth_signature )) params.append(( 'oauth_verifier', oauth_verifier )) url_params = enki.libutil.urlencode( params ) result = self.urlfetch_safe( url = normalised_url, payload = url_params, method = urlfetch.POST ) response = self.process_result_as_query_string( result ) oauth_token = response.get( 'oauth_token' ) oauth_token_secret = response.get('oauth_token_secret') user_id = response.get( 'user_id') if user_id and oauth_token: #get email address if we can verify_params = [('include_email', 'true'), ('include_entities','false'), ('oauth_consumer_key', settings.secrets.CLIENT_ID_TWITTER ), ('oauth_nonce', webapp2_extras.security.generate_random_string( length = 42, pool = webapp2_extras.security.ALPHANUMERIC ).encode( 'utf-8' )), ('oauth_signature_method', "HMAC-SHA1"), ('oauth_timestamp', str(int(time.time()))), ('oauth_token', oauth_token ), ('oauth_version', "1.0"), ('skip_status', 'true')] verify_oauth_signature = self.auth_sign('https://api.twitter.com/1.1/account/verify_credentials.json', verify_params,oauth_token_secret, method_get=True ) verify_params.append(('oauth_signature', verify_oauth_signature)) verify_url_params = enki.libutil.urlencode( verify_params ) full_url = 'https://api.twitter.com/1.1/account/verify_credentials.json?' + verify_url_params verify_credentials_result_json = self.urlfetch_safe( url = full_url, method = urlfetch.GET ) verify_credentials_result = self.process_result_as_JSON(verify_credentials_result_json) response['email'] = verify_credentials_result['email'] response['email_verified'] = True loginInfoSettings = { 'provider_uid': 'user_id', 'email': 'email', 'email_verified': 'email_verified' } loginInfo = self.process_login_info( loginInfoSettings, response ) self.provider_authenticated_callback( loginInfo ) else: self.abort( 401 ) return
def get_books_on_shelf(user, shelf='currently-reading'): ''' Return JSON array {title, author, isbn, image} ''' user_id = user.get_integration_prop('goodreads_user_id') readables = [] success = False if user_id: data = urllib.urlencode({ 'shelf': shelf, 'key': GR_API_KEY, 'v': 2 }) params = data url = "https://www.goodreads.com/review/list/%s.xml?%s" % (user_id, params) logging.debug("Fetching %s for %s" % (url, user)) res = urlfetch.fetch( url=url, method=urlfetch.GET, validate_certificate=True) logging.debug(res.status_code) if res.status_code == 200: xml = res.content data = etree.parse(StringIO(xml)) for r in data.getroot().find('reviews').findall('review'): book = r.find('book') isbn = book.find('isbn13').text image_url = book.find('image_url').text title = book.find('title').text authors = book.find('authors') link = book.find('link').text first_author = authors.find('author') if first_author is not None: name = first_author.find('name') if name is not None: author = name.text r = Readable.CreateOrUpdate(user, isbn, title=title, url=link, source='goodreads', image_url=image_url, author=author, type=READABLE.BOOK, read=False) readables.append(r) success = True logging.debug("Putting %d readable(s)" % len(readables)) ndb.put_multi(readables) Readable.put_sd_batch(readables) return (success, readables)
def search(self, tags=None, query=None, cloud=True, preview=True, limitby=(0, 100), orderby=None): if not self.can_search(): return self.not_authorized() request = current.request content = CAT() if tags is None and query is None: form = FORM(INPUT(_name='q', requires=IS_NOT_EMPTY(), value=request.vars.q), INPUT(_type="submit", _value=current.T('Search')), _method='GET') content.append(DIV(form, _class='w2p_wiki_form')) if request.vars.q: tags = [v.strip() for v in request.vars.q.split(',')] tags = [v.lower() for v in tags if v] if tags or not query is None: db = self.auth.db count = db.wiki_tag.wiki_page.count() fields = [db.wiki_page.id, db.wiki_page.slug, db.wiki_page.title, db.wiki_page.tags, db.wiki_page.can_read] if preview: fields.append(db.wiki_page.body) if query is None: query = (db.wiki_page.id == db.wiki_tag.wiki_page) &\ (db.wiki_tag.name.belongs(tags)) query = query | db.wiki_page.title.contains(request.vars.q) if self.settings.restrict_search and not self.manage(): query = query & (db.wiki_page.created_by == self.auth.user_id) pages = db(query).select(count, *fields, **dict(orderby=orderby or ~count, groupby=reduce(lambda a, b: a | b, fields), distinct=True, limitby=limitby)) if request.extension in ('html', 'load'): if not pages: content.append(DIV(current.T("No results"), _class='w2p_wiki_form')) def link(t): return A(t, _href=URL(args='_search', vars=dict(q=t))) items = [DIV(H3(A(p.wiki_page.title, _href=URL( args=p.wiki_page.slug))), MARKMIN(self.first_paragraph(p.wiki_page)) if preview else '', DIV(_class='w2p_wiki_tags', *[link(t.strip()) for t in p.wiki_page.tags or [] if t.strip()]), _class='w2p_wiki_search_item') for p in pages] content.append(DIV(_class='w2p_wiki_pages', *items)) else: cloud = False content = [p.wiki_page.as_dict() for p in pages] elif cloud: content.append(self.cloud()['content']) if request.extension == 'load': return content return dict(content=content)