我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用urllib.parse.parse()。
def __init__(self, var): #: The original string that comes through with the variable self.original = var #: The operator for the variable self.operator = '' #: List of safe characters when quoting the string self.safe = '' #: List of variables in this variable self.variables = [] #: List of variable names self.variable_names = [] #: List of defaults passed in self.defaults = {} # Parse the variable itself. self.parse() self.post_parse()
def saveFailedTest(data, expect, filename): """Upload failed test images to web server to allow CI test debugging. """ commit = runSubprocess(['git', 'rev-parse', 'HEAD']) name = filename.split('/') name.insert(-1, commit.strip()) filename = '/'.join(name) host = 'data.pyqtgraph.org' # concatenate data, expect, and diff into a single image ds = data.shape es = expect.shape shape = (max(ds[0], es[0]) + 4, ds[1] + es[1] + 8 + max(ds[1], es[1]), 4) img = np.empty(shape, dtype=np.ubyte) img[..., :3] = 100 img[..., 3] = 255 img[2:2+ds[0], 2:2+ds[1], :ds[2]] = data img[2:2+es[0], ds[1]+4:ds[1]+4+es[1], :es[2]] = expect diff = makeDiffImage(data, expect) img[2:2+diff.shape[0], -diff.shape[1]-2:-2] = diff png = makePng(img) conn = httplib.HTTPConnection(host) req = urllib.urlencode({'name': filename, 'data': base64.b64encode(png)}) conn.request('POST', '/upload.py', req) response = conn.getresponse().read() conn.close() print("\nImage comparison failed. Test result: %s %s Expected result: " "%s %s" % (data.shape, data.dtype, expect.shape, expect.dtype)) print("Uploaded to: \nhttp://%s/data/%s" % (host, filename)) if not response.startswith(b'OK'): print("WARNING: Error uploading data to %s" % host) print(response)
def parse_args(): # parse the arguments parser = argparse.ArgumentParser(epilog='\tExample: \r\npython ' + sys.argv[0] + " -d google.com") parser.error = parser_error parser._optionals.title = "OPTIONS" parser.add_argument('-d', '--domain', help="Domain name to enumerate it's subdomains", required=True) parser.add_argument('-b', '--bruteforce', help='Enable the subbrute bruteforce module', nargs='?', default=False) parser.add_argument('-p', '--ports', help='Scan the found subdomains against specified tcp ports') parser.add_argument('-v', '--verbose', help='Enable Verbosity and display results in realtime', nargs='?', default=False) parser.add_argument('-t', '--threads', help='Number of threads to use for subbrute bruteforce', type=int, default=30) parser.add_argument('-e', '--engines', help='Specify a comma-separated list of search engines') parser.add_argument('-o', '--output', help='Save the results to text file') return parser.parse_args()
def _normalize(d): ''' The above parse function generates output of list in dict form i.e. {'abc' : {0: 'xyz', 1: 'pqr'}}. This function normalize it and turn them into proper data type, i.e. {'abc': ['xyz', 'pqr']} Note: if dict has element starts with 10, 11 etc.. this function won't fill blanks. for eg: {'abc': {10: 'xyz', 12: 'pqr'}} will convert to {'abc': ['xyz', 'pqr']} ''' newd = {} if isinstance(d, dict) == False: return d # if dictionary. iterate over each element and append to newd for k, v in six.iteritems(d): if isinstance(v, dict): first_key = next(iter(six.viewkeys(v))) if isinstance(first_key, int): temp_new = [] for k1, v1 in v.items(): temp_new.append(_normalize(v1)) newd[k] = temp_new elif first_key == '': newd[k] = v.values()[0] else: newd[k] = _normalize(v) else: newd[k] = v return newd
def query(self, query_=None, **kwargs): import requests query = query_.copy() if query_ else dict() query.update(kwargs) query['format'] = 'json' if 'from_' in query: query['from'] = query.pop('from_') qs = urllib.parse.urlencode(query) url = "%s?%s" % (self._url, qs) return requests.get(url).json()
def parse(self): """Parse the variable. This finds the: - operator, - set of safe characters, - variables, and - defaults. """ var_list = self.original if self.original[0] in URIVariable.operators: self.operator = self.original[0] var_list = self.original[1:] if self.operator in URIVariable.operators[:2]: self.safe = URIVariable.reserved var_list = var_list.split(',') for var in var_list: default_val = None name = var if '=' in var: name, default_val = tuple(var.split('=', 1)) explode = False if name.endswith('*'): explode = True name = name[:-1] prefix = None if ':' in name: name, prefix = tuple(name.split(':', 1)) prefix = int(prefix) if default_val: self.defaults[name] = default_val self.variables.append( (name, {'explode': explode, 'prefix': prefix}) ) self.variable_names = [varname for (varname, _) in self.variables]
def parse(query_string, unquote=True, normalized=False, encoding=DEFAULT_ENCODING): ''' Main parse function @param query_string: @param unquote: unquote html query string ? @param encoding: An optional encoding used to decode the keys and values. Defaults to utf-8, which the W3C declares as a defaul in the W3C algorithm for encoding. @see http://www.w3.org/TR/html5/forms.html#application/x-www-form-urlencoded-encoding-algorithm @param normalized: parse number key in dict to proper list ? ''' mydict = {} plist = [] if query_string == "": return mydict if type(query_string) == bytes: query_string = query_string.decode() for element in query_string.split("&"): try: if unquote: (var, val) = element.split("=") if sys.version_info[0] == 2: var = var.encode('ascii') val = val.encode('ascii') var = urllib.unquote_plus(var) val = urllib.unquote_plus(val) else: (var, val) = element.split("=") except ValueError: raise MalformedQueryStringError if encoding: var = var.decode(encoding) val = val.decode(encoding) plist.append(parser_helper(var, val)) for di in plist: (k, v) = di.popitem() tempdict = mydict while k in tempdict and type(v) is dict: tempdict = tempdict[k] (k, v) = v.popitem() if k in tempdict and type(tempdict[k]).__name__ == 'list': tempdict[k].append(v) elif k in tempdict: tempdict[k] = [tempdict[k], v] else: tempdict[k] = v if normalized == True: return _normalize(mydict) return mydict
def __http_end(self): """ Close the HTTPS connection and parse the JSON response. """ # make sure we add an final empty chunk for chunked encoding if self.__chunked: self.__http_add("") # get the response code and content http_response = self.__conn.getresponse() content = http_response.read() self.__debug("RESPONSE %s" % http_response.status) self.__debug("CONTENT %s" % content) # create a Status() object to describe the HTTP success/error status = Status(http_response.status) if status.status_code >= 300: status.error_message = http_response.reason self.__conn.close() # try to parse the server result as a JSON response try: import json content = json.loads(content.decode("utf-8")) # parse out errors reported back in the JSON error = content.get('error') if error: status.error_message = error.get('message', status.error_message) status.status_code = error.get('status', status.status_code) content = {} except Exception as e: self.__error("Failed to parse JSON response: %s" % content) status.error_message = content.strip() content = {} # convert the JSON response body to our Response object response = self.__json_to_response(content) response.status = status # save the last response to remember settings self.__last_response = response return response