我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用token.start()。
def expect_token(token, tok_type, tok_str=None): """ Verifies that the given token is of the expected type. If tok_str is given, the token string is verified too. If the token doesn't match, raises an informative ValueError. """ if not match_token(token, tok_type, tok_str): raise ValueError("Expected token %s, got %s on line %s col %s" % ( token_repr(tok_type, tok_str), str(token), token.start[0], token.start[1] + 1))
def replace(text, replacements): """ Replaces multiple slices of text with new values. This is a convenience method for making code modifications of ranges e.g. as identified by ``ASTTokens.get_text_range(node)``. Replacements is an iterable of ``(start, end, new_text)`` tuples. For example, ``replace("this is a test", [(0, 4, "X"), (8, 1, "THE")])`` produces ``"X is THE test"``. """ p = 0 parts = [] for (start, end, new_text) in sorted(replacements): parts.append(text[p:start]) parts.append(new_text) p = end parts.append(text[p:]) return ''.join(parts)
def __repr__(self): annotated_type = '%d (%s)' % (self.type, tok_name[self.type]) return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' % self._replace(type=annotated_type))
def untokenize(self, iterable): for t in iterable: if len(t) == 2: self.compat(t, iterable) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens)
def untokenize(self, iterable): it = iter(iterable) for t in it: if len(t) == 2: self.compat(t, it) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue if tok_type == ENDMARKER: break self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens)
def add_whitespace(self, start): row, col = start if row < self.prev_row or row == self.prev_row and col < self.prev_col: raise ValueError("start ({},{}) precedes previous end ({},{})" .format(row, col, self.prev_row, self.prev_col)) row_offset = row - self.prev_row if row_offset: self.tokens.append("\\\n" * row_offset) self.prev_col = 0 col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset)
def untokenize(self, iterable): it = iter(iterable) indents = [] startline = False for t in it: if len(t) == 2: self.compat(t, it) break tok_type, token, start, end, line = t if tok_type == ENCODING: self.encoding = token continue if tok_type == ENDMARKER: break if tok_type == INDENT: indents.append(token) continue elif tok_type == DEDENT: indents.pop() self.prev_row, self.prev_col = end continue elif tok_type in (NEWLINE, NL): startline = True elif startline and indents: indent = indents[-1] if start[1] >= len(indent): self.tokens.append(indent) self.prev_col = len(indent) startline = False self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens)
def add_whitespace(self, start): row, col = start assert row <= self.prev_row col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset)
def parse_file(filename): with open(filename) as f: tokens = [t for t in generate_tokens(f.readline) if t.type != tokenize.COMMENT and t.type != tokenize.NL] try: file_input.parse(tokens) except CustomParseError as err: token = tokens[err.orig_error.state.pos] lineno = token.start[0] print('Syntax error at line {}. Details:'.format(lineno)) print(' ' + err.msg)
def main(): import argparse # Helper error handling routines def perror(message): print(message, file=sys.stderr) def error(message, filename=None, location=None): if location: args = (filename,) + location + (message,) perror("%s:%d:%d: error: %s" % args) elif filename: perror("%s: error: %s" % (filename, message)) else: perror("error: %s" % message) sys.exit(1) # Parse the arguments and options parser = argparse.ArgumentParser(prog='python -m tokenize') parser.add_argument(dest='filename', nargs='?', metavar='filename.py', help='the file to tokenize; defaults to stdin') parser.add_argument('-e', '--exact', dest='exact', action='store_true', help='display token names using the exact type') args = parser.parse_args() try: # Tokenize the input if args.filename: filename = args.filename with _builtin_open(filename, 'rb') as f: tokens = list(tokenize(f.readline)) else: filename = "<stdin>" tokens = _tokenize(sys.stdin.readline, None) # Output the tokenization for token in tokens: token_type = token.type if args.exact: token_type = token.exact_type token_range = "%d,%d-%d,%d:" % (token.start + token.end) print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string)) except IndentationError as err: line, column = err.args[1][1:3] error(err.args[0], filename, (line, column)) except TokenError as err: line, column = err.args[1] error(err.args[0], filename, (line, column)) except SyntaxError as err: error(err, filename) except OSError as err: error(err) except KeyboardInterrupt: print("interrupted\n") except Exception as err: perror("unexpected error: %s" % err) raise