我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用token.STRING。
def find_strings(filename): """Return a dict of possible docstring positions. The dict maps line numbers to strings. There is an entry for line that contains only a string or a part of a triple-quoted string. """ d = {} # If the first token is a string, then it's the module docstring. # Add this special case so that the test in the loop passes. prev_ttype = token.INDENT f = open(filename) for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline): if ttype == token.STRING: if prev_ttype == token.INDENT: sline, scol = start eline, ecol = end for i in range(sline, eline + 1): d[i] = 1 prev_ttype = ttype f.close() return d
def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if s[:1] not in "'\"" or s.startswith('"""') or s.startswith("'''") \ or '\\' in s: raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] raise SyntaxError("Language feature not supported in environment markers")
def evaluate(cls, nodelist): while len(nodelist)==2: nodelist = nodelist[1] kind = nodelist[0] name = nodelist[1] if kind==token.NAME: try: op = cls.values[name] except KeyError: raise SyntaxError("Unknown name %r" % name) return op() if kind==token.STRING: s = nodelist[1] if not cls._safe_string(s): raise SyntaxError( "Only plain strings allowed in environment markers") return s[1:-1] msg = "Language feature not supported in environment markers" raise SyntaxError(msg)
def _find_strings(filename, encoding=None): """Return a dict of possible docstring positions. The dict maps line numbers to strings. There is an entry for line that contains only a string or a part of a triple-quoted string. """ d = {} # If the first token is a string, then it's the module docstring. # Add this special case so that the test in the loop passes. prev_ttype = token.INDENT with open(filename, encoding=encoding) as f: tok = tokenize.generate_tokens(f.readline) for ttype, tstr, start, end, line in tok: if ttype == token.STRING: if prev_ttype == token.INDENT: sline, scol = start eline, ecol = end for i in range(sline, eline + 1): d[i] = 1 prev_ttype = ttype return d