本文整理汇总了Python中tokenize.open方法的典型用法代码示例。如果您正苦于以下问题:Python tokenize.open方法的具体用法?Python tokenize.open怎么用?Python tokenize.open使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tokenize
的用法示例。
在下文中一共展示了tokenize.open方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: importfile
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def importfile(path):
"""Import a Python source file or compiled file given its path."""
magic = imp.get_magic()
with open(path, 'rb') as file:
if file.read(len(magic)) == magic:
kind = imp.PY_COMPILED
else:
kind = imp.PY_SOURCE
file.seek(0)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
try:
module = imp.load_module(name, file, path, (ext, 'r', kind))
except:
raise ErrorDuringImport(path, sys.exc_info())
return module
示例2: _compile_modules
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def _compile_modules(self) -> None:
"""Compile all modules to catch SyntaxErrors."""
if os.path.basename(sys.argv[0]) == 'qutebrowser':
# Launched via launcher script
return
elif hasattr(sys, 'frozen'):
return
else:
path = os.path.abspath(os.path.dirname(qutebrowser.__file__))
if not os.path.isdir(path):
# Probably running from a python egg.
return
for dirpath, _dirnames, filenames in os.walk(path):
for fn in filenames:
if os.path.splitext(fn)[1] == '.py' and os.path.isfile(fn):
with tokenize.open(os.path.join(dirpath, fn)) as f:
compile(f.read(), fn, 'exec')
示例3: quit_
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def quit_(save: bool = False,
session: sessions.ArgType = None) -> None:
"""Quit qutebrowser.
Args:
save: When given, save the open windows even if auto_save.session
is turned off.
session: The name of the session to save.
"""
if session is not None and not save:
raise cmdutils.CommandError("Session name given without --save!")
if save:
if session is None:
session = sessions.default
instance.shutdown(session=session)
else:
instance.shutdown()
示例4: test_warning_calls
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def test_warning_calls():
# combined "ignore" and stacklevel error
base = Path(numpy.__file__).parent
for path in base.rglob("*.py"):
if base / "testing" in path.parents:
continue
if path == base / "__init__.py":
continue
if path == base / "random" / "__init__.py":
continue
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read())
FindFuncs(path).visit(tree)
示例5: _verify_pre_check
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def _verify_pre_check(filepath):
"""Check student code for certain issues."""
# Make sure the program doesn't crash for students.
# Could use some improvement for better logging and error reporting.
try:
# Check for inline "pylint:" comment, which may indicate a student
# trying to disable a check.
with tokenize.open(os.path.expanduser(filepath)) as f:
for tok_type, content, _, _, _ in tokenize.generate_tokens(f.readline):
if tok_type != tokenize.COMMENT:
continue
match = pylint.constants.OPTION_RGX.search(content)
if match is not None:
print('[ERROR] String "pylint:" found in comment. ' +
'No check run on file `{}.`\n'.format(filepath))
return False
except IndentationError as e:
print('[ERROR] python_ta could not check your code due to an ' +
'indentation error at line {}.'.format(e.lineno))
return False
except tokenize.TokenError as e:
print('[ERROR] python_ta could not check your code due to a ' +
'syntax error in your file.')
return False
return True
示例6: read_py_file
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def read_py_file(filename, skip_encoding_cookie=True):
"""Read a Python file, using the encoding declared inside the file.
Parameters
----------
filename : str
The path to the file to read.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
with open(filename) as f: # the open function defined in this module.
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(f))
else:
return f.read()
示例7: execfile
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def execfile(file, glob=None, loc=None):
if glob is None:
import sys
glob = sys._getframe().f_back.f_globals
if loc is None:
loc = glob
# It seems that the best way is using tokenize.open(): http://code.activestate.com/lists/python-dev/131251/
# (but tokenize.open() is only available for python 3.2)
import tokenize
if hasattr(tokenize, 'open'):
# version 3.2
stream = tokenize.open(file) # @UndefinedVariable
else:
# version 3.0 or 3.1
detect_encoding = tokenize.detect_encoding(open(file, mode="rb" ).readline)
stream = open(file, encoding=detect_encoding[0])
try:
contents = stream.read()
finally:
stream.close()
#execute the script (note: it's important to compile first to have the filename set in debug mode)
exec(compile(contents+"\n", file, 'exec'), glob, loc)
示例8: warning_calls
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def warning_calls():
# combined "ignore" and stacklevel error
base = Path(scipy.__file__).parent
bad_filters = []
bad_stacklevels = []
for path in base.rglob("*.py"):
# use tokenize to auto-detect encoding on systems where no
# default encoding is defined (e.g. LANG='C')
with tokenize.open(str(path)) as file:
tree = ast.parse(file.read(), filename=str(path))
finder = FindFuncs(path.relative_to(base))
finder.visit(tree)
bad_filters.extend(finder.bad_filters)
bad_stacklevels.extend(finder.bad_stacklevels)
return bad_filters, bad_stacklevels
示例9: process
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def process(filename, tabsize, verbose=True):
try:
with tokenize.open(filename) as f:
text = f.read()
encoding = f.encoding
except IOError as msg:
print("%r: I/O error: %s" % (filename, msg))
return
newtext = text.expandtabs(tabsize)
if newtext == text:
return
backup = filename + "~"
try:
os.unlink(backup)
except OSError:
pass
try:
os.rename(filename, backup)
except OSError:
pass
with open(filename, "w", encoding=encoding) as f:
f.write(newtext)
if verbose:
print(filename)
示例10: __init__
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
with open(self.infile, 'rb') as f:
counts, calledfuncs, callers = pickle.load(f)
self.update(self.__class__(counts, calledfuncs, callers))
except (OSError, EOFError, ValueError) as err:
print(("Skipping counts file %r: %s"
% (self.infile, err)), file=sys.stderr)
示例11: _find_strings
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def _find_strings(filename, encoding=None):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
with open(filename, encoding=encoding) as f:
tok = tokenize.generate_tokens(f.readline)
for ttype, tstr, start, end, line in tok:
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
return d
示例12: tabnanny
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def tabnanny(self, filename):
# XXX: tabnanny should work on binary files as well
with tokenize.open(filename) as f:
try:
tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError as msg:
msgtxt, (lineno, start) = msg.args
self.editwin.gotoline(lineno)
self.errorbox("Tabnanny Tokenizing Error",
"Token Error: %s" % msgtxt)
return False
except tabnanny.NannyNag as nag:
# The error messages from tabnanny are too confusing...
self.editwin.gotoline(nag.get_lineno())
self.errorbox("Tab/space error", indent_message)
return False
return True
示例13: __init__
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
self.color_breakpoint_text()
示例14: restore_file_breaks
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
with open(self.breakpointPath, "r") as fp:
lines = fp.readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
示例15: execfile
# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import open [as 别名]
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
with tokenize.open(filename) as fp:
source = fp.read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
print('*** Error in script or command!\n'
'Traceback (most recent call last):',
file=self.tkconsole.stderr)
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)