本文整理汇总了Python中tokenize.open函数的典型用法代码示例。如果您正苦于以下问题:Python open函数的具体用法?Python open怎么用?Python open使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了open函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: convert_dataset_metadata
def convert_dataset_metadata(in_dir, out_dir):
meta_dict = OrderedDict()
meta_dict["BIDSVersion"] = "1.0.0"
study_key_file = os.path.join(in_dir, "study_key.txt")
if os.path.exists(study_key_file):
meta_dict["Name"] = tokenize.open(study_key_file).read().strip()
else:
if in_dir.endswith(os.sep):
meta_dict["Name"] = in_dir.split(os.sep)[-1]
else:
meta_dict["Name"] = in_dir.split(os.sep)[-2]
ref_file = os.path.join(in_dir, "references.txt")
if os.path.exists(ref_file):
meta_dict["ReferencesAndLinks"] = tokenize.open(ref_file).read().strip()
lic_file = os.path.join(in_dir, "license.txt")
if os.path.exists(lic_file):
meta_dict["License"] = tokenize.open(lic_file).read().strip()
json.dump(meta_dict, open(os.path.join(out_dir,
"dataset_description.json"), "w"),
sort_keys=True, indent=4, separators=(',', ': '))
readme = os.path.join(in_dir, "README")
if os.path.exists(readme):
shutil.copy(readme, os.path.join(out_dir,"README"))
elif os.path.exists(readme + ".txt"):
shutil.copy(readme + ".txt", os.path.join(out_dir,"README"))
示例2: investigate_pep8_status
def investigate_pep8_status(self, filename):
sys.stdout.write("%s: " % (filename,))
sys.stdout.flush()
with tokenize.open(filename) as i:
source = i.read()
if not is_valid_source(source):
return
errors = self.errors_in_source(source)
if errors:
print(', '.join(errors))
else:
print('clean')
return
changed = True
while changed:
changed = False
for error, source in list(self.best_examples.items()):
self.note_source(source)
target = self.example_file_for_error(error)
if os.path.exists(target):
existing_length = len(tokenize.open(target).read())
if existing_length <= len(source):
continue
else:
print((
"A smaller example for %s (%d < %d). Simplifying "
"an example from %s"
) % (
error,
len(source), existing_length,
self.trash_file(source)))
else:
print(
'%s is new. Simplifying an example from %s' % (
error, self.trash_file(source)))
changed = True
example = self.find_minimal_example_from_source(
source,
is_example=lambda source:
error in self.errors_in_source(
source),
)
assert len(example) <= len(source)
with open(target, 'w') as o:
o.write(example)
示例3: main
def main():
def make_callback(text):
return count_calls_decorator(
lambda file_, start, stop: log(text, file_, start, stop)
)
nci_callback = make_callback('None-coalescing `if` block')
nco_callback = make_callback('[Possible] None-coalescing `or`')
nct_callback = make_callback('None-coalescing ternary')
sna_callback = make_callback('Safe navigation `and`')
sni_callback = make_callback('Safe navigation `if` block')
snt_callback = make_callback('Safe navigation ternary')
files = sys.argv[1:]
if files:
expanded_files = []
for file_ in files:
if '*' in file_:
expanded_files.extend(glob.glob(file_))
else:
expanded_files.append(file_)
else:
files = glob.glob(os.path.join(sys.prefix, 'Lib', '**', '*.py'))
for file_ in files:
try:
source = tokenize.open(file_)
except (SyntaxError, UnicodeDecodeError):
continue
with source:
try:
tree = ast.parse(source.read(), filename=file_)
except SyntaxError:
continue
NoneCoalesceIfBlockVisitor(file_, nci_callback).visit(tree)
NoneCoalesceOrVisitor(file_, nco_callback).visit(tree)
NoneCoalesceTernaryVisitor(file_, nct_callback).visit(tree)
SafeNavAndVisitor(file_, sna_callback).visit(tree)
SafeNavIfBlockVisitor(file_, sni_callback).visit(tree)
SafeNavTernaryVisitor(file_, snt_callback).visit(tree)
print('Total None-coalescing `if` blocks: {}'
.format(get_call_count(nci_callback)))
print('Total [possible] None-coalescing `or`: {}'
.format(get_call_count(nco_callback)))
print('Total None-coalescing ternaries: {}'
.format(get_call_count(nct_callback)))
print('Total Safe navigation `and`: {}'
.format(get_call_count(sna_callback)))
print('Total Safe navigation `if` blocks: {}'
.format(get_call_count(sni_callback)))
print('Total Safe navigation ternaries: {}'
.format(get_call_count(snt_callback)))
示例4: patch
def patch(self, filename):
self.current_file = filename
with tokenize.open(filename) as fp:
content = fp.read()
old_content = content
for operation in self.operations:
content = operation.patch(content)
if content == old_content:
# no change
self.check(content)
if self.options.to_stdout:
self.write_stdout(content)
return False
with open(filename, "rb") as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
if not self.options.quiet:
print("Patch %s" % filename)
if not self.options.to_stdout:
with open(filename, "w", encoding=encoding) as fp:
fp.write(content)
else:
self.write_stdout(content)
self.check(content)
return True
示例5: read_py_file
def read_py_file(filepath):
if sys.version_info < (3, ):
return open(filepath, 'rU').read()
else:
# see https://docs.python.org/3/library/tokenize.html#tokenize.detect_encoding
# first just see if the file is properly encoded
try:
with open(filepath, 'rb') as f:
tokenize.detect_encoding(f.readline)
except SyntaxError as err:
# this warning is issued:
# (1) in badly authored files (contains non-utf8 in a comment line)
# (2) a coding is specified, but wrong and
# (3) no coding is specified, and the default
# 'utf8' fails to decode.
# (4) the encoding specified by a pep263 declaration did not match
# with the encoding detected by inspecting the BOM
raise CouldNotHandleEncoding(filepath, err)
try:
return tokenize.open(filepath).read()
# this warning is issued:
# (1) if uft-8 is specified, but latin1 is used with something like \x0e9 appearing
# (see http://stackoverflow.com/a/5552623)
except UnicodeDecodeError as err:
raise CouldNotHandleEncoding(filepath, err)
示例6: main
def main():
if len(sys.argv) > 1 and sys.argv[1] == "+diag":
del sys.argv[1]
diag = True
else:
diag = False
if len(sys.argv) > 1 and sys.argv[1] == "+compile":
del sys.argv[1]
compile_only = True
else:
compile_only = False
ddb_path = os.path.join(os.path.dirname(sys.argv[1]), "device_db.py")
dmgr = DeviceManager(DeviceDB(ddb_path))
with tokenize.open(sys.argv[1]) as f:
testcase_code = compile(f.read(), f.name, "exec")
testcase_vars = {'__name__': 'testbench', 'dmgr': dmgr}
exec(testcase_code, testcase_vars)
try:
core = dmgr.get("core")
if compile_only:
core.compile(testcase_vars["entrypoint"], (), {})
else:
core.run(testcase_vars["entrypoint"], (), {})
except CompileError as error:
if not diag:
exit(1)
示例7: _verify_pre_check
def _verify_pre_check(filepath):
"""Check student code for certain issues."""
# Make sure the program doesn't crash for students.
# Could use some improvement for better logging and error reporting.
try:
# Check for inline "pylint:" comment, which may indicate a student
# trying to disable a check.
with tokenize.open(os.path.expanduser(filepath)) as f:
for tok_type, content, _, _, _ in tokenize.generate_tokens(f.readline):
if tok_type != tokenize.COMMENT:
continue
match = pylint.utils.OPTION_RGX.search(content)
if match is not None:
print('ERROR: string "pylint:" found in comment. ' +
'No check run on file `{}`\n'.format(filepath))
return False
except IndentationError as e:
print('ERROR: python_ta could not check your code due to an ' +
'indentation error at line {}'.format(e.lineno))
return False
except tokenize.TokenError as e:
print('ERROR: python_ta could not check your code due to a ' +
'syntax error in your file')
return False
return True
示例8: check
def check(file):
"""check(file_or_dir)
If file_or_dir is a directory and not a symbolic link, then recursively
descend the directory tree named by file_or_dir, checking all .py files
along the way. If file_or_dir is an ordinary Python source file, it is
checked for whitespace related problems. The diagnostic messages are
written to standard output using the print statement.
"""
if os.path.isdir(file) and not os.path.islink(file):
if verbose:
print("%r: listing directory" % (file,))
names = os.listdir(file)
for name in names:
fullname = os.path.join(file, name)
if (os.path.isdir(fullname) and
not os.path.islink(fullname) or
os.path.normcase(name[-3:]) == ".py"):
check(fullname)
return
try:
f = tokenize.open(file)
except IOError as msg:
errprint("%r: I/O Error: %s" % (file, msg))
return
if verbose > 1:
print("checking %r ..." % file)
try:
process_tokens(tokenize.generate_tokens(f.readline))
except tokenize.TokenError as msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
except NannyNag as nag:
badline = nag.get_lineno()
line = nag.get_line()
if verbose:
print("%r: *** Line %d: trouble in tab city! ***" % (file, badline))
print("offending line: %r" % (line,))
print(nag.get_msg())
else:
if ' ' in file: file = '"' + file + '"'
if filename_only: print(file)
else: print(file, badline, repr(line))
return
finally:
f.close()
if verbose:
print("%r: Clean bill of health." % (file,))
示例9: _open
def _open(self, filename):
if filename.endswith('.py') and hasattr(tokenize, 'open'):
# On Python 3.2 and newer, open Python files with tokenize.open().
# This functions uses the encoding cookie to get the encoding.
return tokenize.open(filename)
else:
return open(filename)
示例10: on_file
def on_file(self, tree: MypyFile, type_map: Dict[Expression, Type]) -> None:
self.last_xml = None
path = os.path.relpath(tree.path)
if stats.is_special_module(path):
return
if path.startswith('..'):
return
if 'stubs' in path.split('/'):
return
visitor = stats.StatisticsVisitor(inferred=True, typemap=type_map, all_nodes=True)
tree.accept(visitor)
root = etree.Element('mypy-report-file', name=path, module=tree._fullname)
doc = etree.ElementTree(root)
file_info = FileInfo(path, tree._fullname)
with tokenize.open(path) as input_file:
for lineno, line_text in enumerate(input_file, 1):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
file_info.counts[status] += 1
etree.SubElement(root, 'line',
number=str(lineno),
precision=stats.precision_names[status],
content=line_text[:-1])
# Assumes a layout similar to what XmlReporter uses.
xslt_path = os.path.relpath('mypy-html.xslt', path)
transform_pi = etree.ProcessingInstruction('xml-stylesheet',
'type="text/xsl" href="%s"' % cgi.escape(xslt_path, True))
root.addprevious(transform_pi)
self.schema.assertValid(doc)
self.last_xml = doc
self.files.append(file_info)
示例11: updatecache
def updatecache(filename, module_globals=None):
"""Update a cache entry and return its list of lines.
If something's wrong, print a message, discard the cache entry,
and return an empty list."""
if filename in cache:
if len(cache[filename]) != 1:
del cache[filename]
if not filename or (filename.startswith("<") and filename.endswith(">")):
return []
fullname = filename
try:
stat = os.stat(fullname)
except OSError:
basename = filename
# Realise a lazy loader based lookup if there is one
# otherwise try to lookup right now.
if lazycache(filename, module_globals):
try:
data = cache[filename][0]()
except (ImportError, OSError):
pass
else:
if data is None:
# No luck, the PEP302 loader cannot find the source
# for this module.
return []
cache[filename] = (len(data), None, [line + "\n" for line in data.splitlines()], fullname)
return cache[filename][2]
# Try looking through the module search path, which is only useful
# when handling a relative filename.
if os.path.isabs(filename):
return []
for dirname in sys.path:
try:
fullname = os.path.join(dirname, basename)
except (TypeError, AttributeError):
# Not sufficiently string-like to do anything useful with.
continue
try:
stat = os.stat(fullname)
break
except OSError:
pass
else:
return []
try:
with tokenize.open(fullname) as fp:
lines = fp.readlines()
except OSError:
return []
if lines and not lines[-1].endswith("\n"):
lines[-1] += "\n"
size, mtime = stat.st_size, stat.st_mtime
cache[filename] = size, mtime, lines, fullname
return lines
示例12: check_spelling
def check_spelling():
"""Check commonly misspelled words."""
# Words which I often misspell
words = {'[Bb]ehaviour', '[Qq]uitted', 'Ll]ikelyhood', '[Ss]ucessfully',
'[Oo]ccur[^r .]', '[Ss]eperator', '[Ee]xplicitely', '[Rr]esetted',
'[Aa]uxillary', '[Aa]ccidentaly', '[Aa]mbigious', '[Ll]oosly',
'[Ii]nitialis', '[Cc]onvienence', '[Ss]imiliar', '[Uu]ncommited',
'[Rr]eproducable'}
# Words which look better when splitted, but might need some fine tuning.
words |= {'[Kk]eystrings', '[Ww]ebelements', '[Mm]ouseevent',
'[Kk]eysequence', '[Nn]ormalmode', '[Ee]ventloops',
'[Ss]izehint', '[Ss]tatemachine', '[Mm]etaobject',
'[Ll]ogrecord', '[Ff]iletype'}
seen = collections.defaultdict(list)
try:
ok = True
for fn in _py_files():
with tokenize.open(fn) as f:
if fn == os.path.join('.', 'scripts', 'misc_checks.py'):
continue
for line in f:
for w in words:
if re.search(w, line) and fn not in seen[w]:
print('Found "{}" in {}!'.format(w, fn))
seen[w].append(fn)
ok = False
print()
return ok
except Exception:
traceback.print_exc()
return None
示例13: check_spelling
def check_spelling(target):
"""Check commonly misspelled words."""
# Words which I often misspell
words = {'behaviour', 'quitted', 'likelyhood', 'sucessfully',
'occur[^r .]', 'seperator', 'explicitely', 'resetted',
'auxillary', 'accidentaly', 'ambigious', 'loosly',
'initialis', 'convienence', 'similiar', 'uncommited',
'reproducable'}
# Words which look better when splitted, but might need some fine tuning.
words |= {'keystrings', 'webelements', 'mouseevent', 'keysequence',
'normalmode', 'eventloops', 'sizehint', 'statemachine',
'metaobject', 'logrecord', 'monkeypatch', 'filetype'}
seen = collections.defaultdict(list)
try:
ok = True
for fn in _py_files(target):
with tokenize.open(fn) as f:
if fn == os.path.join('scripts', 'misc_checks.py'):
continue
for line in f:
for w in words:
if re.search(w, line) and fn not in seen[w]:
print("Found '{}' in {}!".format(w, fn))
seen[w].append(fn)
print()
return ok
except Exception:
traceback.print_exc()
return None
示例14: on_file
def on_file(self,
tree: MypyFile,
type_map: Dict[Expression, Type],
options: Options) -> None:
path = os.path.relpath(tree.path)
visitor = stats.StatisticsVisitor(inferred=True, filename=tree.fullname(),
typemap=type_map, all_nodes=True)
tree.accept(visitor)
class_name = os.path.basename(path)
file_info = FileInfo(path, tree._fullname)
class_element = etree.Element('class',
filename=path,
complexity='1.0',
name=class_name)
etree.SubElement(class_element, 'methods')
lines_element = etree.SubElement(class_element, 'lines')
with tokenize.open(path) as input_file:
class_lines_covered = 0
class_total_lines = 0
for lineno, _ in enumerate(input_file, 1):
status = visitor.line_map.get(lineno, stats.TYPE_EMPTY)
hits = 0
branch = False
if status == stats.TYPE_EMPTY:
continue
class_total_lines += 1
if status != stats.TYPE_ANY:
class_lines_covered += 1
hits = 1
if status == stats.TYPE_IMPRECISE:
branch = True
file_info.counts[status] += 1
line_element = etree.SubElement(lines_element, 'line',
number=str(lineno),
precision=stats.precision_names[status],
hits=str(hits),
branch=str(branch).lower())
if branch:
line_element.attrib['condition-coverage'] = '50% (1/2)'
class_element.attrib['branch-rate'] = '0'
class_element.attrib['line-rate'] = get_line_rate(class_lines_covered,
class_total_lines)
# parent_module is set to whichever module contains this file. For most files, we want
# to simply strip the last element off of the module. But for __init__.py files,
# the module == the parent module.
parent_module = file_info.module.rsplit('.', 1)[0]
if file_info.name.endswith('__init__.py'):
parent_module = file_info.module
if parent_module not in self.root_package.packages:
self.root_package.packages[parent_module] = CoberturaPackage(parent_module)
current_package = self.root_package.packages[parent_module]
packages_to_update = [self.root_package, current_package]
for package in packages_to_update:
package.total_lines += class_total_lines
package.covered_lines += class_lines_covered
current_package.classes[class_name] = class_element
示例15: test_getline
def test_getline(self):
with tokenize.open(self.file_name) as fp:
for index, line in enumerate(fp):
if not line.endswith('\n'):
line += '\n'
cached_line = linecache.getline(self.file_name, index + 1)
self.assertEqual(line, cached_line)