我们从Python开源项目中,提取了以下8个代码示例,用于说明如何使用urllib.request.unquote()。
def parse_new_login_page(cls, res_xml): """Parse new login page xml response.""" data = xml2dict(res_xml)['error'] if 'pass_ticket' in data: data['pass_ticket'] = unquote(data['pass_ticket']) return data
def _unquote(self, response): return ' '.join(urllib.unquote(s) for s in response.split(' '))
def __pairs_from(self, response): """Split and unescape a response""" def demunge(string): s = urllib.unquote(string) return tuple(s.split(':', 1)) demunged = map(demunge, response.split(' ')) return [d for d in demunged if len(d) == 2]
def google(message, keywords): """ google ?????????? https://github.com/llimllib/limbo/blob/master/limbo/plugins/google.py """ if keywords == 'help': return query = quote(keywords) url = "https://encrypted.google.com/search?q={0}".format(query) soup = BeautifulSoup(requests.get(url).text, "html.parser") answer = soup.findAll("h3", attrs={"class": "r"}) if not answer: botsend(message, "`{}` ???????????????".format(keywords)) try: _, url = answer[0].a['href'].split('=', 1) url, _ = url.split('&', 1) botsend(message, unquote(url)) except IndexError: # in this case there is a first answer without a link, which is a # google response! Let's grab it and display it to the user. return ' '.join(answer[0].stripped_strings)
def xml_get_text(_node): """Helper function to get character data from an XML tree""" rc = list() for node in _node.childNodes: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return unquote(''.join(rc))
def search(self, search_term): #Perform the search and get the text of the page. params = {'q' : search_term, 'btnG' : 'Google Search'} text = self.connection.get(GoogleSearch.google_url, params) if not text: return None #Pull out the links of results start = text.find('<div id="res">') end = text.find('<div id="foot">') if text[start:end] == '': self.logger.warn("No results for `{}`".format(search_term)) return None links = [] text = text[start:end] start = 0 end = 0 while start>-1 and end>-1: start = text.find('<a href="/url?q=') text = text[start+len('<a href="/url?q='):] end = text.find('&sa=U&ei=') if start>-1 and end>-1: link = unquote(text[0:end]) text = text[end:len(text)] if link.find('http')==0: links.append(link) #If necessary, filter the links based on content. if len(self.restrict_to) > 0: filtered_links = [] for link in links: for domain in self.restrict_to: if domain in link: filtered_links.append(link) links = list(set(filtered_links)) return links
def parse_play_flash_cookie(response): flash_cookie = response.cookies['PLAY_FLASH'] messageType, message = flash_cookie.split("=") # Format message into user friendly string message = urllib2.unquote(message).replace("+", " ") # Discern error disposition if(messageType == "dominoFlashError"): error = True else: error = False return dict(messageType=messageType, message=message, error=error)
def fr_auth(self): data = self.SCLASS.SESSION r = requests.get(fr_generate_params(FLICKR_REQUEST_TOKEN, \ {"oauth_callback": CALLBACK}, False)) log.debug(r.text) response = createJSON(r.text) if "oauth_problem" in response: log.error(response["oauth_problem"]) return 1 # only temporary tokens data["fr_token"] = response["oauth_token"] data["fr_token_secret"] = response["oauth_token_secret"] url = "%s?oauth_token=%s" % (FLICKR_OAUTH, data["fr_token"]) code = self.get_authorization_code(url).split("=")[-1].split("#")[0] log.info(i18n_oauth_code, code) r = requests.get(fr_generate_params(FLICKR_ACCESS_TOKEN, { "oauth_verifier": code, "oauth_token": data["fr_token"] }, False, data["fr_token_secret"])) log.debug(r.text) response = createJSON(r.text) if "oauth_problem" in response: log.error(response["oauth_problem"]) return 1 data["fr_token"] = response["oauth_token"] data["fr_token_secret"] = response["oauth_token_secret"] data["fr_fullname"] = unquote(response["fullname"]) return self.SCLASS.session_write(data)