本文整理匯總了Python中tokenize.generate_tokens方法的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.generate_tokens方法的具體用法?Python tokenize.generate_tokens怎麽用?Python tokenize.generate_tokens使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tokenize
的用法示例。
在下文中一共展示了tokenize.generate_tokens方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: file_tokens
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def file_tokens(self):
"""The complete set of tokens for a file.
Accessing this attribute *may* raise an InvalidSyntax exception.
:raises: flake8.exceptions.InvalidSyntax
"""
if self._file_tokens is None:
line_iter = iter(self.lines)
try:
self._file_tokens = list(tokenize.generate_tokens(
lambda: next(line_iter)
))
except tokenize.TokenError as exc:
raise exceptions.InvalidSyntax(exc.message, exception=exc)
return self._file_tokens
示例2: _verify_pre_check
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def _verify_pre_check(filepath):
"""Check student code for certain issues."""
# Make sure the program doesn't crash for students.
# Could use some improvement for better logging and error reporting.
try:
# Check for inline "pylint:" comment, which may indicate a student
# trying to disable a check.
with tokenize.open(os.path.expanduser(filepath)) as f:
for tok_type, content, _, _, _ in tokenize.generate_tokens(f.readline):
if tok_type != tokenize.COMMENT:
continue
match = pylint.constants.OPTION_RGX.search(content)
if match is not None:
print('[ERROR] String "pylint:" found in comment. ' +
'No check run on file `{}.`\n'.format(filepath))
return False
except IndentationError as e:
print('[ERROR] python_ta could not check your code due to an ' +
'indentation error at line {}.'.format(e.lineno))
return False
except tokenize.TokenError as e:
print('[ERROR] python_ta could not check your code due to a ' +
'syntax error in your file.')
return False
return True
示例3: _parse
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def _parse(self, source):
reader = StringIO(source)
# parse until EOF or TokenError (allows incomplete modules)
tokens = []
try:
tokens.extend(tokenize.generate_tokens(reader.readline))
except tokenize.TokenError:
# TokenError happens always at EOF, for unclosed strings or brackets.
# We don't care about that here, since we still can recover the whole
# source code.
pass
self._tokens = tokens
it = Iterator(self._tokens)
self._imports_begin, self._imports_end = self._find_import_range(it)
it = Iterator(self._tokens, start=self._imports_begin, end=self._imports_end)
self._parse_imports(it)
示例4: fix_e402
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def fix_e402(self, result):
(line_index, offset, target) = get_index_offset_contents(result,
self.source)
for i in range(1, 100):
line = "".join(self.source[line_index:line_index+i])
try:
generate_tokens("".join(line))
except (SyntaxError, tokenize.TokenError):
continue
break
if not (target in self.imports and self.imports[target] != line_index):
mod_offset = get_module_imports_on_top_of_file(self.source,
line_index)
self.source[mod_offset] = line + self.source[mod_offset]
for offset in range(i):
self.source[line_index+offset] = ''
示例5: _find_logical
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def _find_logical(source_lines):
# Make a variable which is the index of all the starts of lines.
logical_start = []
logical_end = []
last_newline = True
parens = 0
for t in generate_tokens(''.join(source_lines)):
if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
tokenize.INDENT, tokenize.NL,
tokenize.ENDMARKER]:
continue
if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
last_newline = True
logical_end.append((t[3][0] - 1, t[2][1]))
continue
if last_newline and not parens:
logical_start.append((t[2][0] - 1, t[2][1]))
last_newline = False
if t[0] == tokenize.OP:
if t[1] in '([{':
parens += 1
elif t[1] in '}])':
parens -= 1
return (logical_start, logical_end)
示例6: find_strings
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
示例7: scanvars
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def scanvars(reader, frame, locals):
"""Scan one logical line of Python and look up values of variables used."""
vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
for ttype, token, start, end, line in tokenize.generate_tokens(reader):
if ttype == tokenize.NEWLINE: break
if ttype == tokenize.NAME and token not in keyword.kwlist:
if lasttoken == '.':
if parent is not __UNDEF__:
value = getattr(parent, token, __UNDEF__)
vars.append((prefix + token, prefix, value))
else:
where, value = lookup(token, frame, locals)
vars.append((token, where, value))
elif token == '.':
prefix += lasttoken + '.'
parent = value
else:
parent, prefix = None, ''
lasttoken = token
return vars
示例8: get_parse_error
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def get_parse_error(code):
"""
Checks code for ambiguous tabs or other basic parsing issues.
:param code: a string containing a file's worth of Python code
:returns: a string containing a description of the first parse error encountered,
or None if the code is ok
"""
# note that this uses non-public elements from stdlib's tabnanny, because tabnanny
# is (very frustratingly) written only to be used as a script, but using it that way
# in this context requires writing temporarily files, running subprocesses, blah blah blah
code_buffer = StringIO(code)
try:
tabnanny.process_tokens(tokenize.generate_tokens(code_buffer.readline))
except tokenize.TokenError as err:
return "Could not parse code: %s" % err
except IndentationError as err:
return "Indentation error: %s" % err
except tabnanny.NannyNag as err:
return "Ambiguous tab at line %d; line is '%s'." % (err.get_lineno(), err.get_line())
return None
示例9: _dedent
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def _dedent(s):
"""Dedent python code string."""
result = [t[:2] for t in generate_tokens(StringIO(s).readline)]
# set initial indent to 0 if any
if result[0][0] == INDENT:
result[0] = (INDENT, '')
return untokenize(result)
示例10: getblock
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def getblock(lines):
"""Extract the block of code at the top of the given list of lines."""
blockfinder = BlockFinder()
try:
tokens = tokenize.generate_tokens(iter(lines).__next__)
for _token in tokens:
blockfinder.tokeneater(*_token)
except (EndOfBlock, IndentationError):
pass
return lines[:blockfinder.last]
示例11: _find_docstrings
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def _find_docstrings(self, filename):
# A replacement for trace.find_strings() which was deprecated in
# Python 3.2 and removed in 3.6.
strs = set()
prev = token.INDENT # so module docstring is detected as docstring
with open(filename) as f:
tokens = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tokens:
if ttype == token.STRING and prev == token.INDENT:
strs.update(range(start[0], end[0] + 1))
prev = ttype
return strs
示例12: deindent
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def deindent(lines, offset=None):
if offset is None:
for line in lines:
line = line.expandtabs()
s = line.lstrip()
if s:
offset = len(line)-len(s)
break
else:
offset = 0
if offset == 0:
return list(lines)
newlines = []
def readline_generator(lines):
for line in lines:
yield line + '\n'
while True:
yield ''
it = readline_generator(lines)
try:
for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
if sline > len(lines):
break # End of input reached
if sline > len(newlines):
line = lines[sline - 1].expandtabs()
if line.lstrip() and line[:offset].isspace():
line = line[offset:] # Deindent
newlines.append(line)
for i in range(sline, eline):
# Don't deindent continuing lines of
# multiline tokens (i.e. multiline strings)
newlines.append(lines[i])
except (IndentationError, tokenize.TokenError):
pass
# Add any lines we didn't see. E.g. if an exception was raised.
newlines.extend(lines[len(newlines):])
return newlines
示例13: getstatementrange_ast
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
if astnode is None:
content = str(source)
try:
astnode = compile(content, "source", "exec", 1024) # 1024 for AST
except ValueError:
start, end = getstatementrange_old(lineno, source, assertion)
return None, start, end
start, end = get_statement_startend2(lineno, astnode)
# we need to correct the end:
# - ast-parsing strips comments
# - there might be empty lines
# - we might have lesser indented code blocks at the end
if end is None:
end = len(source.lines)
if end > start + 1:
# make sure we don't span differently indented code blocks
# by using the BlockFinder helper used which inspect.getsource() uses itself
block_finder = inspect.BlockFinder()
# if we start with an indented line, put blockfinder to "started" mode
block_finder.started = source.lines[start][0].isspace()
it = ((x + "\n") for x in source.lines[start:end])
try:
for tok in tokenize.generate_tokens(lambda: next(it)):
block_finder.tokeneater(*tok)
except (inspect.EndOfBlock, IndentationError):
end = block_finder.last + start
except Exception:
pass
# the end might still point to a comment or empty line, correct it
while end:
line = source.lines[end - 1].lstrip()
if line.startswith("#") or not line:
end -= 1
else:
break
return astnode, start, end
示例14: generate_tokens
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def generate_tokens(self):
"""Tokenize the file and yield the tokens.
:raises flake8.exceptions.InvalidSyntax:
If a :class:`tokenize.TokenError` is raised while generating
tokens.
"""
try:
for token in tokenize.generate_tokens(self.next_line):
if token[2][0] > self.total_lines:
break
self.tokens.append(token)
yield token
except (tokenize.TokenError, SyntaxError) as exc:
raise exceptions.InvalidSyntax(exception=exc)
示例15: _generate
# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import generate_tokens [as 別名]
def _generate(code):
'''Pass the code into `tokenize.generate_tokens` and convert the result
into a list.
'''
# tokenize.generate_tokens is an undocumented function accepting text
return list(tokenize.generate_tokens(io.StringIO(code).readline))