我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用os.path.startswith()。
def get_browser_locale(self, default="en_US"): """Determines the user's locale from ``Accept-Language`` header. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4 """ if "Accept-Language" in self.request.headers: languages = self.request.headers["Accept-Language"].split(",") locales = [] for language in languages: parts = language.strip().split(";") if len(parts) > 1 and parts[1].startswith("q="): try: score = float(parts[1][2:]) except (ValueError, TypeError): score = 0.0 else: score = 1.0 locales.append((parts[0], score)) if locales: locales.sort(key=lambda pair: pair[1], reverse=True) codes = [l[0] for l in locales] return locale.get(*codes) return locale.get(default)
def visitNode_head(self, node): authorNodes = domhelpers.findElementsWithAttribute(node, 'rel', 'author') authorNodes = [n for n in authorNodes if n.tagName == 'link'] if authorNodes: self.writer('\\author{') authors = [] for aNode in authorNodes: name = aNode.getAttribute('title', '') href = aNode.getAttribute('href', '') if href.startswith('mailto:'): href = href[7:] if href: if name: name += ' ' name += '$<$' + href + '$>$' if name: authors.append(name) self.writer(' \\and '.join(authors)) self.writer('}') self.visitNodeDefault(node)
def ignore_file(info): path = info.filename if info.file_size > 256 * 1024: return True if path.endswith('.png') or path.endswith('.jpeg') or path.endswith('.ttf') or path.endswith('.otf') or path.endswith('.gif'): return True if path.endswith('.mf') or path.endswith('.sf') or path.endswith('.rsa'): return True if path.startswith('resources/addon-sdk/'): return True if 'jquery' in path: return True if 'bootstrap' in path and 'css' in path: return True return False
def extract_docker_layer(img: tarfile.TarFile, layer_id: str, extract_path: str): with tarfile.open(fileobj=img.extractfile('%s/layer.tar' % layer_id), errorlevel=0, dereference=True) as layer: layer.extractall(path=extract_path) log.debug('processing whiteouts') for member in layer.getmembers(): path = member.path if path.startswith('.wh.') or '/.wh.' in path: if path.startswith('.wh.'): newpath = path[4:] else: newpath = path.replace('/.wh.', '/') try: log.debug('removing path %s', newpath) os.unlink(path) os.unlink(newpath) except OSError as err: if err.errno != errno.ENOENT: raise
def _get_absolute_path(self, path): original_path = path if path.startswith('rsync://'): return path if self._task._role is not None: path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path) else: path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path) if original_path and original_path[-1] == '/' and path[-1] != '/': # make sure the dwim'd path ends in a trailing "/" # if the original path did path += '/' return path
def _override_module_replaced_vars(self, task_vars): """ Some vars are substituted into the modules. Have to make sure that those are correct for localhost when synchronize creates its own connection to localhost.""" # Clear the current definition of these variables as they came from the # connection to the remote host if 'ansible_syslog_facility' in task_vars: del task_vars['ansible_syslog_facility'] for key in list(task_vars.keys()): if key.startswith("ansible_") and key.endswith("_interpreter"): del task_vars[key] # Add the definitions from localhost for host in C.LOCALHOST: if host in task_vars['hostvars']: localhost = task_vars['hostvars'][host] break if 'ansible_syslog_facility' in localhost: task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility'] for key in localhost: if key.startswith("ansible_") and key.endswith("_interpreter"): task_vars[key] = localhost[key]
def GypPathToNinja(self, path, env=None): """Translate a gyp path to a ninja path, optionally expanding environment variable references in |path| with |env|. See the above discourse on path conversions.""" if env: if self.flavor == 'mac': path = gyp.xcode_emulation.ExpandEnvVars(path, env) elif self.flavor == 'win': path = gyp.msvs_emulation.ExpandMacros(path, env) if path.startswith('$!'): expanded = self.ExpandSpecial(path) if self.flavor == 'win': expanded = os.path.normpath(expanded) return expanded if '$|' in path: path = self.ExpandSpecial(path) assert '$' not in path, path return os.path.normpath(os.path.join(self.build_to_base, path))
def WriteCopies(self, copies, prebuild, mac_bundle_depends): outputs = [] env = self.GetToolchainEnv() for copy in copies: for path in copy['files']: # Normalize the path so trailing slashes don't confuse us. path = os.path.normpath(path) basename = os.path.split(path)[1] src = self.GypPathToNinja(path, env) dst = self.GypPathToNinja(os.path.join(copy['destination'], basename), env) outputs += self.ninja.build(dst, 'copy', src, order_only=prebuild) if self.is_mac_bundle: # gyp has mac_bundle_resources to copy things into a bundle's # Resources folder, but there's no built-in way to copy files to other # places in the bundle. Hence, some targets use copies for this. Check # if this file is copied into the current bundle, and if so add it to # the bundle depends so that dependent targets get rebuilt if the copy # input changes. if dst.startswith(self.xcode_settings.GetBundleContentsFolderPath()): mac_bundle_depends.append(dst) return outputs
def listdir(self, path: str, parent_span) -> List[Entry]: if path.endswith('/'): path = path[:-1] path_parts = path.split('/') entries = {} for p, v in self.contents.items(): if not p.startswith(path): continue p_parts = p.split('/') if len(p_parts) <= len(path_parts): continue if p_parts[len(path_parts) - 1] != path_parts[-1]: continue name = p_parts[len(path_parts)] if name in entries: continue is_dir = len(p_parts) > len(path_parts) + 1 size = 0 if is_dir else len(v) entries[name] = Entry(name, is_dir, size) return entries.values() # TODO(aaron): determine whether this extra filesystem is really necessary, or if we could have just used a local fs # I suspect not, because the new workspace indexing/importing code wasn't written with a local fs in mind
def get_browser_locale(self, default="en_US"): """Determines the user's locale from Accept-Language header. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4 """ if "Accept-Language" in self.request.headers: languages = self.request.headers["Accept-Language"].split(",") locales = [] for language in languages: parts = language.strip().split(";") if len(parts) > 1 and parts[1].startswith("q="): try: score = float(parts[1][2:]) except (ValueError, TypeError): score = 0.0 else: score = 1.0 locales.append((parts[0], score)) if locales: locales.sort(key=lambda (l, s): s, reverse=True) codes = [l[0] for l in locales] return locale.get(*codes) return locale.get(default)
def get_host_interfaces(self, host): ok, net_devs = check_output_ssh(host, self.opts, 'ls -l /sys/class/net') if not ok: logger.warning("'ls -l /sys/class/net' failed %s", net_devs) return for line in net_devs.strip().split("\n")[1:]: if not line.startswith('l'): continue params = line.split() if len(params) < 11: logger.warning("Strange line in 'ls -l /sys/class/net' node %s: %r", host, line) continue yield ('devices/pci' in params[10]), params[8]
def put(self, bucket, object_name): object_name = urllib.unquote(object_name) bucket_dir = os.path.abspath(os.path.join( self.application.directory, bucket)) if not bucket_dir.startswith(self.application.directory) or \ not os.path.isdir(bucket_dir): raise web.HTTPError(404) path = self._object_path(bucket, object_name) if not path.startswith(bucket_dir) or os.path.isdir(path): raise web.HTTPError(403) directory = os.path.dirname(path) if not os.path.exists(directory): os.makedirs(directory) object_file = open(path, "w") object_file.write(self.request.body) object_file.close() self.finish()
def build_is_obsolete(self, build): # compare dates between git and docker path = build.get('path') Dockerfile = os.path.join(path, 'Dockerfile') build_templates = self.gits.get('build-templates') vanilla = self.gits.get('main') if path.startswith(build_templates.working_dir): git_repo = build_templates elif path.startswith(vanilla.working_dir): git_repo = vanilla else: log.exit("Unable to find git repo containing %s" % Dockerfile) obsolete, build_ts, last_commit = gitter.check_file_younger_than( gitobj=git_repo, filename=Dockerfile, timestamp=build.get('timestamp') ) return obsolete, build_ts, last_commit
def _handle_rename2(self, path, node, dst, overwriteDest): if not dst.startswith("/"): dst = self._join_user_path(dst) # Strip the last / if there is one. Hadoop doesn't like this if dst.endswith("/"): dst = dst[:-1] request = client_proto.Rename2RequestProto() request.src = path request.dst = dst request.overwriteDest = overwriteDest try: self.service.rename2(request) except RequestError as ex: if ("FileAlreadyExistsException" in str(ex) or "rename destination directory is not empty" in str(ex)): raise FileAlreadyExistsException(ex) else: raise return {"path": path}
def _join_url(self, path): if path.startswith('/'): path = path[1:] return urljoin(ZoomClient.BASE_URL, path)
def GetPath(self, path): ''' Put all the relative path calculations in one place. If we're given a path that has a leading slash, we treat it as absolute and do nothing. Otherwise, we treat it as a relative path based on the botPath setting in our config file. ''' if not path.startswith(os.sep): path = os.path.join(self.botPath, path) return path
def check_etag_header(self): """Checks the ``Etag`` header against requests's ``If-None-Match``. Returns ``True`` if the request's Etag matches and a 304 should be returned. For example:: self.set_etag_header() if self.check_etag_header(): self.set_status(304) return This method is called automatically when the request is finished, but may be called earlier for applications that override `compute_etag` and want to do an early check for ``If-None-Match`` before completing the request. The ``Etag`` header should be set (perhaps with `set_etag_header`) before calling this method. """ computed_etag = utf8(self._headers.get("Etag", "")) # Find all weak and strong etag values from If-None-Match header # because RFC 7232 allows multiple etag values in a single header. etags = re.findall( br'\*|(?:W/)?"[^"]*"', utf8(self.request.headers.get("If-None-Match", "")) ) if not computed_etag or not etags: return False match = False if etags[0] == b'*': match = True else: # Use a weak comparison when comparing entity-tags. val = lambda x: x[2:] if x.startswith(b'W/') else x for etag in etags: if val(etag) == val(computed_etag): match = True break return match
def _load_ui_methods(self, methods): if isinstance(methods, types.ModuleType): self._load_ui_methods(dict((n, getattr(methods, n)) for n in dir(methods))) elif isinstance(methods, list): for m in methods: self._load_ui_methods(m) else: for name, fn in methods.items(): if not name.startswith("_") and hasattr(fn, "__call__") \ and name[0].lower() == name[0]: self.ui_methods[name] = fn
def _compressible_type(self, ctype): return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
def _decode_signed_value_v1(secret, name, value, max_age_days, clock): parts = utf8(value).split(b"|") if len(parts) != 3: return None signature = _create_signature_v1(secret, name, parts[0], parts[1]) if not _time_independent_equals(parts[2], signature): gen_log.warning("Invalid cookie signature %r", value) return None timestamp = int(parts[1]) if timestamp < clock() - max_age_days * 86400: gen_log.warning("Expired cookie %r", value) return None if timestamp > clock() + 31 * 86400: # _cookie_signature does not hash a delimiter between the # parts of the cookie, so an attacker could transfer trailing # digits from the payload to the timestamp without altering the # signature. For backwards compatibility, sanity-check timestamp # here instead of modifying _cookie_signature. gen_log.warning("Cookie timestamp in future; possible tampering %r", value) return None if parts[1].startswith(b"0"): gen_log.warning("Tampered cookie %r", value) return None try: return base64.b64decode(parts[0]) except Exception: return None
def _find_groups(self): """Returns a tuple (reverse string, group count) for a url. For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method would return ('/%s/%s/', 2). """ pattern = self.regex.pattern if pattern.startswith('^'): pattern = pattern[1:] if pattern.endswith('$'): pattern = pattern[:-1] if self.regex.groups != pattern.count('('): # The pattern is too complicated for our simplistic matching, # so we can't support reversing it. return (None, None) pieces = [] for fragment in pattern.split('('): if ')' in fragment: paren_loc = fragment.index(')') if paren_loc >= 0: pieces.append('%s' + fragment[paren_loc + 1:]) else: pieces.append(fragment) return (''.join(pieces), self.regex.groups)