我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用urllib.unquote()。
def can_fetch(self, useragent, url): """using the parsed robots.txt decide if useragent can fetch url""" if self.disallow_all: return False if self.allow_all: return True # search for given user agent matches # the first match counts parsed_url = urlparse.urlparse(urllib.unquote(url)) url = urlparse.urlunparse(('', '', parsed_url.path, parsed_url.params, parsed_url.query, parsed_url.fragment)) url = urllib.quote(url) if not url: url = "/" for entry in self.entries: if entry.applies_to(useragent): return entry.allowance(url) # try the default entry last if self.default_entry: return self.default_entry.allowance(url) # agent not found ==> access granted return True
def do_POST(self): dc = self.IFACE_CLASS uri = urlparse.urljoin(self.get_baseuri(dc), self.path) uri = urllib.unquote(uri) dbname, dburi = TrytonDAVInterface.get_dburi(uri) if dburi.startswith('Calendars'): # read the body body = None if 'Content-Length' in self.headers: l = self.headers['Content-Length'] body = self.rfile.read(atoi(l)) ct = None if 'Content-Type' in self.headers: ct = self.headers['Content-Type'] try: DATA = '%s\n' % dc._get_caldav_post(uri, body, ct) except DAV_Error, exception: ec, _ = exception return self.send_status(ec) self.send_body(DATA, 200, 'OK', 'OK') return return _prev_do_POST(self)
def _entity_path(self, state): """Calculate the path to an entity to be returned. *state* should be the dictionary returned by :func:`_parse_atom_entry`. :func:`_entity_path` extracts the link to this entity from *state*, and strips all the namespace prefixes from it to leave only the relative path of the entity itself, sans namespace. :rtype: ``string`` :return: an absolute path """ # This has been factored out so that it can be easily # overloaded by Configurations, which has to switch its # entities' endpoints from its own properties/ to configs/. raw_path = urllib.unquote(state.links.alternate) if 'servicesNS/' in raw_path: return _trailing(raw_path, 'servicesNS/', '/', '/') elif 'services/' in raw_path: return _trailing(raw_path, 'services/') else: return raw_path
def read(self, ifile): """ Reads an input header from an input file. The input header is read as a sequence of *<name>***:***<value>* pairs separated by a newline. The end of the input header is signalled by an empty line or an end-of-file. :param ifile: File-like object that supports iteration over lines. """ name, value = None, None for line in ifile: if line == '\n': break item = line.split(':', 1) if len(item) == 2: # start of a new item if name is not None: self[name] = value[:-1] # value sans trailing newline name, value = item[0], unquote(item[1]) elif name is not None: # continuation of the current item value += unquote(line) if name is not None: self[name] = value[:-1] if value[-1] == '\n' else value
def deletethread(project, thread_id): thread = Thread.get_by_id(thread_id) if Comment.query.filter_by(thread_id=thread_id).first(): flash(_('Thread is not empty'), 'error') else: thread = Thread.query.filter_by(id=thread_id).first() if not current_user.is_authenticated: flash(_('You must be logged in to delete a thread'), 'error') else: if (current_user != thread.owner and current_user != project.get_master().owner): flash(_('You are not allowed to delete this thread'), 'error') else: thread.delete() db.session.commit() flash(_('Thread successfully deleted'), 'info') if 'return_url' in request.args: return redirect(urllib.unquote(request.args['return_url'])) else: return redirect(url_for('branches.view', project=project.name, branch='master', filename='index'))
def editcomment(project, comment_id): comment = Comment.get_by_id(comment_id) form = CommentForm(request.form, comment=comment.content) if current_user != comment.owner: flash(_('You are not allowed to edit this comment'), 'error') if 'return_url' in request.args: return redirect(urllib.unquote(request.args['return_url'])) else: return redirect(url_for('branches.view', project=project.name, branch='master', filename='index')) if request.method == 'POST' and form.validate(): comment.content = form.comment.data db.session.commit() flash(_('Comment modified successfully'), 'info') if 'return_url' in request.args: return redirect(urllib.unquote(request.args['return_url'])) else: return redirect(url_for('branches.view', project=project.name, branch='master', filename='index')) threads = (Thread.query.filter_by(id=comment.thread.id) .order_by(desc(Thread.posted_at))) return render_template('threads/newcomment.html', form=form)
def deletecomment(project, comment_id): comment = Comment.get_by_id(comment_id) if comment.has_replies(): flash(_('This comment has replies and cannot be deleted'), 'error') else: if not current_user.is_authenticated: flash(_('You must be logged in to delete a comment'), 'error') else: if (current_user != comment.owner and current_user != project.get_master().owner): flash(_('You are not allowed ' 'to delete this thread'), 'error') else: comment.delete() db.session.commit() flash(_('Comment successfully deleted'), 'info') if 'return_url' in request.args: return redirect(urllib.unquote(request.args['return_url'])) else: return redirect(url_for('branches.view', project=project.name, branch='master', filename='index'))
def _header_to_id(self, header): """Convert a Content-ID header value to an id. Presumes the Content-ID header conforms to the format that _id_to_header() returns. Args: header: string, Content-ID header value. Returns: The extracted id value. Raises: BatchError if the header is not in the expected format. """ if header[0] != '<' or header[-1] != '>': raise BatchError("Invalid value for Content-ID: %s" % header) if '+' not in header: raise BatchError("Invalid value for Content-ID: %s" % header) base, id_ = header[1:-1].rsplit('+', 1) return urllib.unquote(id_)
def verifySignature(self): key = RSA.importKey(open(self.raven_public_key).read()) # Compile the parts to hash together parts = self.rav_str.split("!") parts.pop() # Remove the last two items related to signing parts.pop() to_hash = "!".join(parts) # Now hash it and verify our_hash = SHA.new(to_hash) #print our_hash verifier = PKCS1_v1_5.new(key) # Obtain the correct form of the signature signature = urllib.unquote(self.raven_signature) signature = signature.replace("-","+") signature = signature.replace(".","/") signature = signature.replace("_","=") signature = base64.b64decode(signature) if verifier.verify(our_hash, signature): return True else: return False
def translate_path(self, path): """Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. (XXX They should probably be diagnosed.) """ # abandon query parameters path = path.split('?', 1)[0] path = path.split('#', 1)[0] path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) path = config.get('jsonrpc', 'data') for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) return path
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, unquote=unquote): """like cgi.parse_qs, only with custom unquote function""" d = {} items = [s2 for s1 in qs.split("&") for s2 in s1.split(";")] for item in items: try: k, v = item.split("=", 1) except ValueError: if strict_parsing: raise continue if v or keep_blank_values: k = unquote(k.replace("+", " ")) v = unquote(v.replace("+", " ")) if k in d: d[k].append(v) else: d[k] = [v] return d
def process(self): "Process a request." # get site from channel self.site = self.channel.site # set various default headers self.setHeader('server', version) self.setHeader('date', http.datetimeToString()) self.setHeader('content-type', "text/html") # Resource Identification self.prepath = [] self.postpath = map(unquote, string.split(self.path[1:], '/')) try: resrc = self.site.getResourceFor(self) self.render(resrc) except: self.processingFailed(failure.Failure())
def translate_path(self, path): """Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. (XXX They should probably be diagnosed.) """ # abandon query parameters path = path.split('?',1)[0] path = path.split('#',1)[0] # Don't forget explicit trailing slash when normalizing. Issue17324 trailing_slash = path.rstrip().endswith('/') path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) path = os.getcwd() for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) if trailing_slash: path += '/' return path
def is_cgi(self): """Test whether self.path corresponds to a CGI script. Returns True and updates the cgi_info attribute to the tuple (dir, rest) if self.path requires running a CGI script. Returns False otherwise. If any exception is raised, the caller should assume that self.path was rejected as invalid and act accordingly. The default implementation tests whether the normalized url path begins with one of the strings in self.cgi_directories (and the next character is a '/' or the end of the string). """ collapsed_path = _url_collapse_path(urllib.unquote(self.path)) dir_sep = collapsed_path.find('/', 1) head, tail = collapsed_path[:dir_sep], collapsed_path[dir_sep+1:] if head in self.cgi_directories: self.cgi_info = head, tail return True return False
def get_wxjs_config(url, wx_tool): js_api_list = [] try: obj = Config.objects.get(kind=Config.KIND_JS_API_LIST) value = json.loads(obj.value) js_api_list = value.split() except Config.DoesNotExist: pass url = unquote(url) param = { "debug": False, "jsApiList": js_api_list, "url": url } config = wx_tool.get_js_config(param) return config
def do_GET(self): if "?payload" in self.path: query= urllib.splitquery(self.path) action = query[1].split('=')[1] print action action = urllib.unquote(action) print action try: x = cPickle.loads(action) #string argv content = "command executed" except Exception,e: print e content = e else: content = "hello World" self.send_response(200) self.send_header("Content-type","text/html") self.end_headers() self.wfile.write("<html>") self.wfile.write(" %s " % content) self.wfile.write("</html>")
def update_parameter(request): if request.method == 'GET': from urllib import unquote update_parameter_form = StepManipulateForm(request.GET) if update_parameter_form.is_valid(): cd = update_parameter_form.cleaned_data step = Protocol.objects.get(id=cd['id']) if (step.check_owner(request.user.id) or request.user.is_superuser): step.update_parameter(unquote(cd['parameter'])) step.save() return success('Your step has been updated.') else: return error('Your are not owner of the step.') else: return error(str(update_parameter_form.errors)) else: return error('Method error')
def update_reference(request): if request.method == 'GET': from urllib import unquote update_ref_form = RefManipulateForm(request.GET) if update_ref_form.is_valid(): cd = update_ref_form.cleaned_data ref = References.objects.get(id=cd['id']) if (ref.check_owner(request.user.id) or request.user.is_superuser): ref.path = unquote(cd['path']) ref.save() return success('Your reference has been updated.') else: return error('Your are not owner of the step.') else: return error(str(update_ref_form.errors)) else: return error('Method error')
def update_step_order(request): if request.method == 'GET': from urllib import unquote update_order_form = StepOrderManipulateForm(request.GET) if update_order_form.is_valid(): cd = update_order_form.cleaned_data relations = list(filter(None, cd['step_order'].split(';'))) error_tag = 0 for relation in relations: step_id, new_order = relation.split('=') step = Protocol.objects.get(id=int(step_id), parent=int(cd['protocol'])) if (step.check_owner(request.user.id) or request.user.is_superuser): step.update_order(int(new_order)) step.save() else: return error('Your are not owner of the step.') if not error_tag: return success('Your step has been updated.') else: return error(str(update_order_form.errors)) else: return error('Method error')
def oauth_token_info_from_url(url): """Exracts an OAuth access token from the redirected page's URL. Returns: A tuple of strings containing the OAuth token and the OAuth verifier which need to sent when upgrading a request token to an access token. """ if isinstance(url, (str, unicode)): url = atom.http_core.Uri.parse_uri(url) token = None verifier = None if 'oauth_token' in url.query: token = urllib.unquote(url.query['oauth_token']) if 'oauth_verifier' in url.query: verifier = urllib.unquote(url.query['oauth_verifier']) return (token, verifier)
def make_search_form(*args, **kwargs): """Factory that instantiates one of the search forms below.""" request = kwargs.pop('request', None) if request is not None: sparams_cookie = request.COOKIES.get('pootle-search') if sparams_cookie: import json import urllib try: initial_sparams = json.loads(urllib.unquote(sparams_cookie)) except ValueError: pass else: if (isinstance(initial_sparams, dict) and 'sfields' in initial_sparams): kwargs.update({ 'initial': initial_sparams, }) return SearchForm(*args, **kwargs)
def get_download_url(self): # fetch and return dict resp = requests.get('http://www.kirikiri.tv/?m=vod-play-id-4414-src-1-num-2.html').text data = re.findall("mac_url=unescape\('(.*)?'\)", resp) if not data: print_error('No data found, maybe the script is out-of-date.', exit_=False) return {} data = unquote(json.loads('["{}"]'.format(data[0].replace('%u', '\\u')))[0]) ret = {} for i in data.split('#'): title, url = i.split('$') ret[parse_episode(title)] = url return ret
def index(**kwargs): """ Main endpoint, get all templates """ filters = {} if kwargs.get('filters'): try: filters = json.loads(unquote(unquote(kwargs['filters']))) except (ValueError, SyntaxError, TypeError) as ex: raise BadRequest(str(ex.message)) try: where = generate_request_filter(filters) except (AttributeError, KeyError, IndexError, FieldError, SyntaxError, TypeError, ValueError) as ex: raise BadRequest(str(ex.message)) try: templates = MailTemplate.objects.filter(where).order_by('name') except (AttributeError, KeyError, IndexError, FieldError, SyntaxError, TypeError, ValueError) as ex: raise BadRequest(str(ex.message)) return [model_to_dict(t) for t in templates]
def _cheap_response_parse(arg1, arg2): """Silly parser for 'name=value; attr=attrvalue' format, to test out response renders """ def crumble(arg): "Break down string into pieces" lines = [line for line in arg if line] done = [] for line in lines: clauses = [clause for clause in line.split(';')] import logging logging.error("clauses %r", clauses) name, value = re.split(" *= *", clauses[0], 1) value = unquote(value.strip(' "')) attrs = [re.split(" *= *", clause, 1) \ for clause in clauses[1:] if clause] attrs = [attr for attr in attrs \ if attr[0] in Cookie.attribute_names] attrs = [(k, v.strip(' "')) for k, v in attrs] done.append((name, value, tuple(attrs))) return done result1 = crumble([arg1]) result2 = crumble(arg2) return result1, result2
def handle_request(self): try: data = urllib.unquote(self.get_argument('q')) with tempfile.NamedTemporaryFile(suffix='.latex') as raw: raw.write(data) raw.flush() filename = latex_to_dvi(raw.name) svg_data = dvi_to_svg(filename).strip() png_data, width, height = svg_to_png(svg_data) png_data = png_data.strip().encode('base64') return { 'success': True, 'svg': svg_data, 'png': png_data, 'meta': {'width': width, 'height': height} } except Exception as error: print error.message return { 'success': False, 'error': error.message }
def main(event, context): response = { "statusCode": 200, "body": 'Usage: curl -XPOST --data-urlencode "identity=$(curl -s http://169.254.169.254/latest/dynamic/instance-identity/pkcs7)" https://limn.company.com/' } logger.debug("event: {}".format(json.dumps(event))) if 'body' in event: identity = event['body'] if identity.startswith('identity='): identity = identity[9:] try: identity = urllib.unquote(identity).decode('utf8') trusted_doc = awstrust.verify_pkcs7(identity) instance = Instance( trusted_doc['accountId'], trusted_doc['region'], trusted_doc['instanceId']) response['body'] = json.dumps(instance.__dict__) except Exception as e: response['statusCode'] = 401 response['body'] = "Error: {}".format(e) raise logger.info("response: {}".format(json.dumps(response))) return response
def parseResponse(self, data): data = unquote(data) return dict(item.split('=', 1) for item in data.split('?')[-1].split('&'))
def _parse_qsl(qs): r = [] for pair in qs.replace(';', '&').split('&'): if not pair: continue nv = pair.split('=', 1) if len(nv) != 2: nv.append('') key = urlunquote(nv[0].replace('+', ' ')) value = urlunquote(nv[1].replace('+', ' ')) r.append((key, value)) return r
def current_user_id(self): if CONFIG.COOKIE_SECRET: user_id = self.get_secure_cookie('user_id', min_version=2) if user_id: return user_id # user_id = self.get_secure_cookie("user_id") # user_id # fixed no cookie value in User-Agent for Shockwave Flash and for lua upload if not user_id: secure_code = self.get_argument('code', '') # code = self.get_cookie('user_id') if secure_code: secure_user_id = unquote(secure_code) user_id = decode_signed_value(self.application.settings["cookie_secret"], 'user_id', secure_user_id) return user_id
def create_google_session(self): """Summary Returns: TYPE: Description """ session = requests.session () login_html = session.get ( DataManagement.__GOOGLE_ACCOUNT_URL ) #Check cookies returned because there is an issue with the authentication #GAPS , GALX , NID - These cookies are used to identify the user when using Google + functionality. #GAPS is still provided self.logger.debug(session.cookies.get_dict ().keys ()) try: galx = session.cookies['GALX'] except: self.logger.error('No cookie GALX') soup_login = BeautifulSoup ( login_html.content , 'html.parser' ).find ( 'form' ).find_all ( 'input' ) payload = {} for u in soup_login: if u.has_attr ( 'value' ): payload[u['name']] = u['value'] payload['Email'] = self.__username payload['Passwd'] = self.__password auto = login_html.headers.get ( 'X-Auto-Login' ) follow_up = unquote ( unquote ( auto ) ).split ( 'continue=' )[-1] #Commented as suggested in https://github.com/tracek/gee_asset_manager/issues/36 #galx = login_html.cookies['GALX'] payload['continue'] = follow_up # Commented as suggested in https://github.com/tracek/gee_asset_manager/issues/36 #payload['GALX'] = galx session.post ( DataManagement.__AUTHENTICATION_URL , data=payload ) return session
def push_state(self, request, title, url=''): if request.is_mobile(): # FIXME hack????webview?????document.title??? script = ''' (function(){ var $body = $('body'); var $iframe = $('<iframe src="/@@/img/favicon.ico" style="display:none;"></iframe>').on('load', function() { setTimeout(function() { $iframe.off('load').remove() }, 0) }).appendTo($body); })(); ''' self._append_script(script, False) title = self._escape_value(title) # ??ajax???pushState if not request.headers.has_key('kss'): self._append_script('document.title=%s' % title, False) return form = self.request.form # ????? if form.has_key('back'): return else: form['back'] = True kss = request.getURL() if form: kss += '?%s' % urllib.urlencode(form) data = json.dumps({'form':form, 'url':kss}) if not url: url = urllib.unquote(kss) script = "History.trigger=false;History.pushState(%s, %s, '%s')" % (data, title, url) self._append_script(script, False)
def displayContents(contents, isBase64=False): '''my hacky way to not display duplicate contents. for some reason xml sends back to back requests and i only want to show the first one''' global LAST_CONTENTS newContents = sha1(contents).hexdigest() if LAST_CONTENTS != newContents: print "[+] Received response, displaying\n" if not isBase64: print urllib.unquote(contents) else: print urllib.unquote(contents).decode('base64') LAST_CONTENTS = newContents print "------\n" return
def url2pathname(url): """OS-specific conversion from a relative URL of the 'file' scheme to a file system path; not recommended for general use.""" # e.g. # ///C|/foo/bar/spam.foo # becomes # C:\foo\bar\spam.foo import string, urllib # Windows itself uses ":" even in URLs. url = url.replace(':', '|') if not '|' in url: # No drive specifier, just convert slashes if url[:4] == '////': # path is something like ////host/path/on/remote/host # convert this to \\host\path\on\remote\host # (notice halving of slashes at the start of the path) url = url[2:] components = url.split('/') # make sure not to convert quoted slashes :-) return urllib.unquote('\\'.join(components)) comp = url.split('|') if len(comp) != 2 or comp[0][-1] not in string.ascii_letters: error = 'Bad URL: ' + url raise IOError, error drive = comp[0][-1].upper() path = drive + ':' components = comp[1].split('/') for comp in components: if comp: path = path + '\\' + urllib.unquote(comp) # Issue #11474: url like '/C|/' should convert into 'C:\\' if path.endswith(':') and url.endswith('/'): path += '\\' return path
def parseaddr(addr): addrs = _AddressList(addr).addresslist if not addrs: return '', '' return addrs[0] # rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
def unquote(str): """Remove quotes from a string.""" if len(str) > 1: if str.startswith('"') and str.endswith('"'): return str[1:-1].replace('\\\\', '\\').replace('\\"', '"') if str.startswith('<') and str.endswith('>'): return str[1:-1] return str # RFC2231-related functions - parameter encoding and decoding
def collapse_rfc2231_value(value, errors='replace', fallback_charset='us-ascii'): if isinstance(value, tuple): rawval = unquote(value[2]) charset = value[0] or 'us-ascii' try: return unicode(rawval, charset, errors) except LookupError: # XXX charset is unknown to Python. return unicode(rawval, fallback_charset, errors) else: return unquote(value)
def list_directory(self, path): """Helper to produce a directory listing (absent index.html). Return value is either a file object, or None (indicating an error). In either case, the headers are sent, making the interface the same as for send_head(). """ try: list = os.listdir(path) except os.error: self.send_error(404, "No permission to list directory") return None list.sort(key=lambda a: a.lower()) f = StringIO() displaypath = cgi.escape(urllib.unquote(self.path)) f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">') f.write("<html>\n<title>Directory listing for %s</title>\n" % displaypath) f.write("<body>\n<h2>Directory listing for %s</h2>\n" % displaypath) f.write("<hr>\n<ul>\n") for name in list: fullname = os.path.join(path, name) displayname = linkname = name # Append / for directories or @ for symbolic links if os.path.isdir(fullname): displayname = name + "/" linkname = name + "/" if os.path.islink(fullname): displayname = name + "@" # Note: a link to a directory displays with @ and links with / f.write('<li><a href="%s">%s</a>\n' % (urllib.quote(linkname), cgi.escape(displayname))) f.write("</ul>\n<hr>\n</body>\n</html>\n") length = f.tell() f.seek(0) self.send_response(200) encoding = sys.getfilesystemencoding() self.send_header("Content-type", "text/html; charset=%s" % encoding) self.send_header("Content-Length", str(length)) self.end_headers() return f
def translate_path(self, path): """Translate a /-separated PATH to the local filename syntax. Components that mean special things to the local file system (e.g. drive or directory names) are ignored. (XXX They should probably be diagnosed.) """ # abandon query parameters path = path.split('?',1)[0] path = path.split('#',1)[0] path = posixpath.normpath(urllib.unquote(path)) words = path.split('/') words = filter(None, words) path = os.getcwd() for word in words: drive, word = os.path.splitdrive(word) head, word = os.path.split(word) if word in (os.curdir, os.pardir): continue path = os.path.join(path, word) return path
def get_environ(self): env = self.server.base_environ.copy() env['SERVER_PROTOCOL'] = self.request_version env['REQUEST_METHOD'] = self.command if '?' in self.path: path,query = self.path.split('?',1) else: path,query = self.path,'' env['PATH_INFO'] = urllib.unquote(path) env['QUERY_STRING'] = query host = self.address_string() if host != self.client_address[0]: env['REMOTE_HOST'] = host env['REMOTE_ADDR'] = self.client_address[0] if self.headers.typeheader is None: env['CONTENT_TYPE'] = self.headers.type else: env['CONTENT_TYPE'] = self.headers.typeheader length = self.headers.getheader('content-length') if length: env['CONTENT_LENGTH'] = length for h in self.headers.headers: k,v = h.split(':',1) k=k.replace('-','_').upper(); v=v.strip() if k in env: continue # skip content length, type,etc. if 'HTTP_'+k in env: env['HTTP_'+k] += ','+v # comma-separate multiple headers else: env['HTTP_'+k] = v return env
def get_host_info(self, host): x509 = {} if isinstance(host, TupleType): host, x509 = host import urllib auth, host = urllib.splituser(host) if auth: import base64 auth = base64.encodestring(urllib.unquote(auth)) auth = string.join(string.split(auth), "") # get rid of whitespace extra_headers = [ ("Authorization", "Basic " + auth) ] else: extra_headers = None return host, extra_headers, x509 ## # Connect to server. # # @param host Target host. # @return A connection handle.
def __handle_unescape(self, key): start = 0 while True: start_js = self.js offset = self.js.find(key, start) if offset == -1: break offset += len(key) expr = '' extra = '' last_c = self.js[offset - 1] abort = False for i, c in enumerate(self.js[offset:]): extra += c if c == ')': break elif (i > 0 and c == '(') or (c == '[' and last_c != '+'): abort = True break elif c == '%' or c in string.hexdigits: expr += c last_c = c if not abort: self.js = self.js.replace(key + extra, urllib.unquote(expr)) if start_js == self.js: break else: start = offset
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] ; year = data['year'] h = {'User-Agent': client.randomagent()} v = '%s_%s' % (cleantitle.geturl(title).replace('-', '_'), year) url = '/watch?v=%s' % v url = urlparse.urljoin(self.base_link, url) #c = client.request(url, headers=h, output='cookie') #c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url) #c = client.request(url, cookie=c, headers=h, referer=url, output='cookie') post = urllib.urlencode({'v': v}) u = urlparse.urljoin(self.base_link, '/video_info/iframe') #r = client.request(u, post=post, cookie=c, headers=h, XHR=True, referer=url) r = client.request(u, post=post, headers=h, XHR=True, referer=url) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): try: sources = [] if url == None: return sources data = urlparse.parse_qs(url) data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data]) title = data['title'] ; year = data['year'] h = {'User-Agent': client.randomagent()} v = '%s_%s' % (cleantitle.geturl(title).replace('-', '_'), year) url = '/watch_%s.html' % v url = urlparse.urljoin(self.base_link, url) c = client.request(url, headers=h, output='cookie') c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h, referer=url) #c = client.request(url, cookie=c, headers=h, referer=url, output='cookie') post = urllib.urlencode({'v': v}) u = urlparse.urljoin(self.base_link, '/video_info/frame') #r = client.request(u, post=post, cookie=c, headers=h, XHR=True, referer=url) r = client.request(u, post=post, headers=h, XHR=True, referer=url) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def sources(self, url, hostDict, hostprDict): sources = [] try: if not url: return sources referer = urlparse.urljoin(self.base_link, url) c, h = self.__get_cookies(referer) try: post = urlparse.parse_qs(urlparse.urlparse(referer).query).values()[0][0] except: post = referer.strip('/').split('/')[-1].split('watch_', 1)[-1].rsplit('#')[0].rsplit('.')[0] post = urllib.urlencode({'v': post}) url = urlparse.urljoin(self.base_link, '/video_info/iframe') r = client.request(url, post=post, headers=h, cookie=c, XHR=True, referer=referer) r = json.loads(r).values() r = [urllib.unquote(i.split('url=')[-1]) for i in r] for i in r: try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en', 'url': i, 'direct': True, 'debridonly': False}) except: pass return sources except: return sources
def post(self): if pubsub_utils.SUBSCRIPTION_UNIQUE_TOKEN != self.request.get('token'): self.response.status = 404 return # Store the message in the datastore. message = json.loads(urllib.unquote(self.request.body).rstrip('=')) message_body = base64.b64decode(str(message['message']['data'])) message = message_body.split(',') d = datetime.strptime(message[0][:-5],'%Y-%m-%dT%H:%M:%S') timestamp = time.mktime(d.timetuple()) message = message[1:] entities = zip(message[::2],map(int,message[1::2])) data_raw = memcache.get(MC_OSCARS_TOP10) if data_raw: data = json.loads(memcache.get(MC_OSCARS_TOP10)) else: data = None if data is None or data['timestamp'] < timestamp: memcache.set(MC_OSCARS_TOP10,json.dumps({ 'timestamp': timestamp, 'entities': entities }))
def register(request): """Register the user.""" from .models import RegisteredUser, RegisteredUserForm email = request.user.email username = request.user.username u = RegisteredUser.objects.get(username=username) next = unquote(request.GET.get('next', reverse('webtzite_dashboard'))) if next == reverse('webtzite_register'): next = reverse('webtzite_dashboard') if request.method == "GET": if u.is_registered: return redirect(next) form = RegisteredUserForm() else: form = RegisteredUserForm(request.POST, instance=u) if form.is_valid(): u.is_registered = True u.institution = form.cleaned_data['institution'] u.first_name = form.cleaned_data['first_name'] u.last_name = form.cleaned_data['last_name'] if os.environ.get('JPY_USER'): from git.config import GitConfigParser cfg = os.path.normpath(os.path.expanduser("~/.gitconfig")) gcp = GitConfigParser(cfg, read_only=False) full_name = ' '.join([u.first_name, u.last_name]) gcp.set_value('user', 'name', full_name) gcp.set_value('user', 'email', u.email) u.is_superuser = bool(RegisteredUser.objects.count() == 1) u.save() return redirect(next) ctx = RequestContext(request) return render_to_response('register.html', locals(), ctx)