Hot-keys on this page
r m x p toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
"""Tokenization help for Python programs.
generate_tokens(readline) is a generator that breaks a stream of text into Python tokens. It accepts a readline-like method which is called repeatedly to get the next line of input (or "" for EOF). It generates 5-tuples with these members:
the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string)
It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators
Older entry points tokenize_loop(readline, tokeneater) tokenize(readline, tokeneater=printtoken) are the same, except instead of generating tokens, tokeneater is a callback function to which the 5 fields described above are passed as 5 arguments, each time a new token is found."""
'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro, Raymond Hettinger'
"generate_tokens", "NL", "untokenize"]
# Tail end of ' string. # Tail end of " string. # Tail end of ''' string. # Tail end of """ string. # Single-line ' or " string. r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). r"//=?", r"[+\-*/%&|^=<>]=?", r"~")
# First (or only) line of ' or " string. group("'", r'\\\r?\n'), r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n'))
re.compile, (Token, PseudoToken, Single3, Double3)) "'''": single3prog, '"""': double3prog, "r'''": single3prog, 'r"""': double3prog, "u'''": single3prog, 'u"""': double3prog, "ur'''": single3prog, 'ur"""': double3prog, "R'''": single3prog, 'R"""': double3prog, "U'''": single3prog, 'U"""': double3prog, "uR'''": single3prog, 'uR"""': double3prog, "Ur'''": single3prog, 'Ur"""': double3prog, "UR'''": single3prog, 'UR"""': double3prog, "b'''": single3prog, 'b"""': double3prog, "br'''": single3prog, 'br"""': double3prog, "B'''": single3prog, 'B"""': double3prog, "bR'''": single3prog, 'bR"""': double3prog, "Br'''": single3prog, 'Br"""': double3prog, "BR'''": single3prog, 'BR"""': double3prog, 'r': None, 'R': None, 'u': None, 'U': None, 'b': None, 'B': None}
"r'''", 'r"""', "R'''", 'R"""', "u'''", 'u"""', "U'''", 'U"""', "ur'''", 'ur"""', "Ur'''", 'Ur"""', "uR'''", 'uR"""', "UR'''", 'UR"""', "b'''", 'b"""', "B'''", 'B"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""'): "r'", 'r"', "R'", 'R"', "u'", 'u"', "U'", 'U"', "ur'", 'ur"', "Ur'", 'Ur"', "uR'", 'uR"', "UR'", 'UR"', "b'", 'b"', "B'", 'B"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"' ):
srow, scol = srow_scol erow, ecol = erow_ecol print("%d,%d-%d,%d:\t%s\t%s" % \ (srow, scol, erow, ecol, tok_name[type], repr(token)))
""" The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize().
The first parameter, readline, must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string.
The second parameter, tokeneater, must also be a callable object. It is called once for each token, with five arguments, corresponding to the tuples generated by generate_tokens(). """ try: tokenize_loop(readline, tokeneater) except StopTokenizing: pass
# backwards compatible interface
for token_info in generate_tokens(readline): tokeneater(*token_info)
row, col = start if row > self.prev_row: raise ValueError("row should not be greater than prev_row") col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset)
tok_type, token, start, end, line = t self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0
tokval += ' ' startline = True
# Insert a space between two consecutive strings if prevstring: tokval = ' ' + tokval prevstring = True else:
indents.append(tokval) continue indents.pop() continue startline = True toks_append(indents[-1]) startline = False
"""Transform tokens back into Python source code.
Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input: Untokenized source will match input source exactly
Round-trip invariant for limited intput::
# Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tok in generate_tokens(readline)] if t1 != t2: raise ValueError("t1 should be equal to t2") """
""" The generate_tokens() generator requires one argment, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration::
readline = open(myfile).next # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. """
except StopIteration: line = ''
if not line: raise TokenError("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue
column = column + 1 column = (column/tabsize + 1)*tabsize column = 0 else: pos = pos + 1 break
if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield (COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue
indents.append(column) yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("<tokenize>", lnum, pos, line)) indents = indents[:-1] yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement if not line: raise TokenError("EOF in multi-line statement", (lnum, 0)) continued = 0
(initial == '.' and token != '.'): # ordinary number yield (NL if parenlev > 0 else NEWLINE, token, spos, epos, line) if token.endswith("\n"): raise ValueError("Token should not end with \n") yield (COMMENT, token, spos, epos, line) endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield (STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield (STRING, token, spos, epos, line) continued = 1 else: else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line) pos = pos + 1
yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
import sys if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) else: tokenize(sys.stdin.readline) |