本文整理汇总了Python中lib2to3.pgen2.tokenize.detect_encoding方法的典型用法代码示例。如果您正苦于以下问题:Python tokenize.detect_encoding方法的具体用法?Python tokenize.detect_encoding怎么用?Python tokenize.detect_encoding使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lib2to3.pgen2.tokenize
的用法示例。
在下文中一共展示了tokenize.detect_encoding方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_all_project_files
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def test_all_project_files(self):
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
self.assertIsNotNone(encoding,
"can't detect encoding for %s" % filepath)
with open(filepath, "r", encoding=encoding) as fp:
source = fp.read()
try:
tree = driver.parse_string(source)
except ParseError:
try:
tree = driver_no_print_statement.parse_string(source)
except ParseError as err:
self.fail('ParseError on file %s (%s)' % (filepath, err))
new = str(tree)
if new != source:
print(diff_texts(source, new, filepath))
self.fail("Idempotency failed: %s" % filepath)
示例2: test_all_project_files
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def test_all_project_files(self):
if sys.platform.startswith("win"):
# XXX something with newlines goes wrong on Windows.
return
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
self.assertIsNotNone(encoding,
"can't detect encoding for %s" % filepath)
with open(filepath, "r") as fp:
source = fp.read()
source = source.decode(encoding)
tree = driver.parse_string(source)
new = unicode(tree)
if diff(filepath, new, encoding):
self.fail("Idempotency failed: %s" % filepath)
示例3: test_all_project_files
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def test_all_project_files(self):
if sys.platform.startswith("win"):
# XXX something with newlines goes wrong on Windows.
return
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
self.assertTrue(encoding is not None,
"can't detect encoding for %s" % filepath)
with open(filepath, "r") as fp:
source = fp.read()
source = source.decode(encoding)
tree = driver.parse_string(source)
new = unicode(tree)
if diff(filepath, new, encoding):
self.fail("Idempotency failed: %s" % filepath)
示例4: test_all_project_files
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def test_all_project_files(self):
for filepath in support.all_project_files():
with open(filepath, "rb") as fp:
encoding = tokenize.detect_encoding(fp.readline)[0]
self.assertIsNotNone(encoding,
"can't detect encoding for %s" % filepath)
with open(filepath, "r", encoding=encoding) as fp:
source = fp.read()
try:
tree = driver.parse_string(source)
except ParseError as err:
if verbose > 0:
warnings.warn('ParseError on file %s (%s)' % (filepath, err))
continue
new = str(tree)
x = diff(filepath, new)
if x:
self.fail("Idempotency failed: %s" % filepath)
示例5: open_with_encoding
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def open_with_encoding(filename, mode='r', encoding=None, limit_byte_check=-1):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename, limit_byte_check=limit_byte_check)
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
示例6: detect_encoding
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def detect_encoding(filename, limit_byte_check=-1):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
with open_with_encoding(filename, encoding=encoding) as test_file:
test_file.read(limit_byte_check)
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
示例7: fix_file
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def fix_file(filename, options=None, output=None, apply_config=False):
if not options:
options = parse_args([filename], apply_config=apply_config)
original_source = readlines_from_file(filename)
fixed_source = original_source
if options.in_place or output:
encoding = detect_encoding(filename)
if output:
output = LineEndingWrapper(wrap_output(output, encoding=encoding))
fixed_source = fix_lines(fixed_source, options, filename=filename)
if options.diff:
new = io.StringIO(fixed_source)
new = new.readlines()
diff = get_diff_text(original_source, new, filename)
if output:
output.write(diff)
output.flush()
return diff
elif options.in_place:
original = "".join(original_source).splitlines()
fixed = fixed_source.splitlines()
if original != fixed:
with open_with_encoding(filename, 'w', encoding=encoding) as fp:
fp.write(fixed_source)
return fixed_source
return None
else:
if output:
output.write(fixed_source)
output.flush()
return fixed_source
示例8: open_with_encoding
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def open_with_encoding(filename,
encoding=None, mode='r', limit_byte_check=-1):
"""Return opened file with a specific encoding."""
if not encoding:
encoding = detect_encoding(filename, limit_byte_check=limit_byte_check)
return io.open(filename, mode=mode, encoding=encoding,
newline='') # Preserve line endings
示例9: detect_encoding
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def detect_encoding(filename, limit_byte_check=-1):
"""Return file encoding."""
try:
with open(filename, 'rb') as input_file:
from lib2to3.pgen2 import tokenize as lib2to3_tokenize
encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]
with open_with_encoding(filename, encoding) as test_file:
test_file.read(limit_byte_check)
return encoding
except (LookupError, SyntaxError, UnicodeDecodeError):
return 'latin-1'
示例10: fix_file
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def fix_file(filename, options=None, output=None, apply_config=False):
if not options:
options = parse_args([filename], apply_config=apply_config)
original_source = readlines_from_file(filename)
fixed_source = original_source
if options.in_place or output:
encoding = detect_encoding(filename)
if output:
output = LineEndingWrapper(wrap_output(output, encoding=encoding))
fixed_source = fix_lines(fixed_source, options, filename=filename)
if options.diff:
new = io.StringIO(fixed_source)
new = new.readlines()
diff = get_diff_text(original_source, new, filename)
if output:
output.write(diff)
output.flush()
else:
return diff
elif options.in_place:
fp = open_with_encoding(filename, encoding=encoding, mode='w')
fp.write(fixed_source)
fp.close()
else:
if output:
output.write(fixed_source)
output.flush()
else:
return fixed_source
示例11: openpy
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def openpy(filename):
from lib2to3.pgen2.tokenize import detect_encoding
import io
# The following is copied from tokenize.py in Python 3.2,
# Copyright (c) 2001-2014 Python Software Foundation; All Rights Reserved
buffer = io.open(filename, 'rb')
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = io.TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = 'r'
return text
示例12: IsPythonFile
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def IsPythonFile(filename):
"""Return True if filename is a Python file."""
if os.path.splitext(filename)[1] == '.py':
return True
try:
with open(filename, 'rb') as fd:
encoding = tokenize.detect_encoding(fd.readline)[0]
# Check for correctness of encoding.
with py3compat.open_with_encoding(
filename, mode='r', encoding=encoding) as fd:
fd.read()
except UnicodeDecodeError:
encoding = 'latin-1'
except (IOError, SyntaxError):
# If we fail to detect encoding (or the encoding cookie is incorrect - which
# will make detect_encoding raise SyntaxError), assume it's not a Python
# file.
return False
try:
with py3compat.open_with_encoding(
filename, mode='r', encoding=encoding) as fd:
first_line = fd.readline(256)
except IOError:
return False
return re.match(r'^#!.*\bpython[23]?\b', first_line)
示例13: FileEncoding
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def FileEncoding(filename):
"""Return the file's encoding."""
with open(filename, 'rb') as fd:
return tokenize.detect_encoding(fd.readline)[0]
示例14: decode_source
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def decode_source(source):
if isinstance(source, bytes):
encoding, _ = detect_encoding(io.BytesIO(source).readline)
source = source.decode(encoding)
return source
示例15: open_with_encoding_check
# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def open_with_encoding_check(filename): # type: ignore
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
fp = io.open(filename, 'rb')
try:
encoding, lines = detect_encoding(fp.readline)
fp.seek(0)
text = io.TextIOWrapper(fp, encoding, line_buffering=True)
text.mode = 'r'
return text
except:
fp.close()
raise