本文整理汇总了Python中tokenize.detect_encoding函数的典型用法代码示例。如果您正苦于以下问题:Python detect_encoding函数的具体用法?Python detect_encoding怎么用?Python detect_encoding使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了detect_encoding函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_filename_in_exception
def test_filename_in_exception(self):
# When possible, include the file name in the exception.
path = 'some_file_path'
lines = (
b'print("\xdf")', # Latin-1: LATIN SMALL LETTER SHARP S
)
class Bunk:
def __init__(self, lines, path):
self.name = path
self._lines = lines
self._index = 0
def readline(self):
if self._index == len(lines):
raise StopIteration
line = lines[self._index]
self._index += 1
return line
with self.assertRaises(SyntaxError):
ins = Bunk(lines, path)
# Make sure lacking a name isn't an issue.
del ins.name
detect_encoding(ins.readline)
with self.assertRaisesRegex(SyntaxError, '.*{}'.format(path)):
ins = Bunk(lines, path)
detect_encoding(ins.readline)
示例2: read_py_file
def read_py_file(filepath):
if sys.version_info < (3, ):
return open(filepath, 'rU').read()
else:
# see https://docs.python.org/3/library/tokenize.html#tokenize.detect_encoding
# first just see if the file is properly encoded
try:
with open(filepath, 'rb') as f:
tokenize.detect_encoding(f.readline)
except SyntaxError as err:
# this warning is issued:
# (1) in badly authored files (contains non-utf8 in a comment line)
# (2) a coding is specified, but wrong and
# (3) no coding is specified, and the default
# 'utf8' fails to decode.
# (4) the encoding specified by a pep263 declaration did not match
# with the encoding detected by inspecting the BOM
raise CouldNotHandleEncoding(filepath, err)
try:
return tokenize.open(filepath).read()
# this warning is issued:
# (1) if uft-8 is specified, but latin1 is used with something like \x0e9 appearing
# (see http://stackoverflow.com/a/5552623)
except UnicodeDecodeError as err:
raise CouldNotHandleEncoding(filepath, err)
示例3: update_fileinfo
def update_fileinfo(cls, fileinfo, document=None):
import tokenize
if not document:
try:
with open(fileinfo.fullpathname, 'rb') as buffer:
encoding, lines = tokenize.detect_encoding(buffer.readline)
fileinfo.encoding = encoding
except IOError:
pass
else:
s = document.gettext(0, 1024).encode('utf-8', errors='ignore')
buffer = io.BytesIO(s)
encoding, lines = tokenize.detect_encoding(buffer.readline)
fileinfo.encoding = encoding
示例4: _readSourceCodeFromFilename3
def _readSourceCodeFromFilename3(source_filename):
import tokenize
try:
with open(source_filename, "rb") as source_file:
encoding = tokenize.detect_encoding(source_file.readline)[0] # @UndefinedVariable
# Rewind to get the whole file.
source_file.seek(0)
source_code = source_file.read()
return source_code.decode(encoding)
except SyntaxError as e:
if Options.isFullCompat():
if PythonVersions.doShowUnknownEncodingName():
match = re.match("unknown encoding for '.*?': (.*)", e.args[0])
complaint = match.group(1)
else:
complaint = "with BOM"
e.args = (
"encoding problem: %s" % complaint,
(source_filename, 1, None, None)
)
if hasattr(e, "msg"):
e.msg = e.args[0]
raise
示例5: get_source
def get_source(self, fullname):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(fullname)
try:
source_bytes = self.get_data(path)
except IOError:
raise ImportError("source not available through get_data()")
if py3k:
import io, tokenize
readsource = io.BytesIO(source_bytes).readline
try:
encoding = tokenize.detect_encoding(readsource)
except SyntaxError as exc:
raise ImportError("Failed to detect encoding")
newline_decoder = io.IncrementalNewlineDecoder(None, True)
try:
return newline_decoder.decode(source_bytes.decode(encoding[0]))
except UnicodeDecodeError as exc:
raise ImportError("Failed to decode source file")
else:
return source_bytes # XXX proper encoding
示例6: source_to_unicode
def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True):
"""Converts a bytes string with python source code to unicode.
Unicode strings are passed through unchanged. Byte strings are checked
for the python source file encoding cookie to determine encoding.
txt can be either a bytes buffer or a string containing the source
code.
"""
if isinstance(txt, unicode):
return txt
if isinstance(txt, bytes):
buffer = BytesIO(txt)
else:
buffer = txt
try:
encoding, _ = detect_encoding(buffer.readline)
except SyntaxError:
encoding = "ascii"
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return u"".join(strip_encoding_cookie(text))
else:
return text.read()
示例7: read_py_url
def read_py_url(url, errors='replace', skip_encoding_cookie=True):
"""Read a Python file from a URL, using the encoding declared inside the file.
Parameters
----------
url : str
The URL from which to fetch the file.
errors : str
How to handle decoding errors in the file. Options are the same as for
bytes.decode(), but here 'replace' is the default.
skip_encoding_cookie : bool
If True (the default), and the encoding declaration is found in the first
two lines, that line will be excluded from the output - compiling a
unicode string with an encoding declaration is a SyntaxError in Python 2.
Returns
-------
A unicode string containing the contents of the file.
"""
response = urllib.request.urlopen(url)
buffer = io.BytesIO(response.read())
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = TextIOWrapper(buffer, encoding, errors=errors, line_buffering=True)
text.mode = 'r'
if skip_encoding_cookie:
return "".join(strip_encoding_cookie(text))
else:
return text.read()
示例8: execute
def execute(self):
# Try to detect the encoding for you.
with open(self.script, 'rb') as file:
try:
encoding = tokenize.detect_encoding(file.readline)[0]
except SyntaxError:
encoding = "utf-8"
# Set the global values for the module.
global_values = {
'__file__': self.script, # Use actual filename of the script.
'__name__': '__main__' # Make sure that 'if __name__ == "__main__"'-hook works
}
with open(self.script, 'r', encoding=encoding) as file:
# Do not inherit any 'from future import ...'-statements
# that may be used by AnimaFX.
# Additionally set the current filename.
module = compile(file.read(), self.script, 'exec', False)
try:
exec(module, global_values)
# Reraise any occuring exceptions
except (SystemExit, KeyboardInterrupt) as e:
raise e
# Print the exception
except BaseException as e:
traceback.print_exception(e.__class__, e, e.__traceback__)
return False
return True
示例9: roundtrip
def roundtrip(filename, output=sys.stdout):
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
tree = compile(source, filename, "exec", ast.PyCF_ONLY_AST)
Unparser(tree, output)
示例10: read_text_file
def read_text_file(filename, encoding=None):
"""Read text file.
Give back the contents, and the encoding we used.
Unless specified manually, We have no way of knowing what text
encoding this file may be in.
The standard Python 'open' method uses the default system encoding
to read text files in Python 3 or falls back to utf-8.
On Python 3 we can use tokenize to detect the encoding.
On Python 2 we can use chardet to detect the encoding.
"""
# Only if the encoding is not manually specified, we may try to
# detect it.
if encoding is None and detect_encoding is not None:
with open(filename, 'rb') as filehandler:
encoding = detect_encoding(filehandler.readline)[0]
with open(filename, 'rb') as filehandler:
data = filehandler.read()
if encoding is not None:
return data.decode(encoding), encoding
if HAVE_CHARDET:
encoding_result = chardet.detect(data)
if encoding_result and encoding_result['encoding'] is not None:
encoding = encoding_result['encoding']
return data.decode(encoding), encoding
# Look for hints, PEP263-style
if data[:3] == b'\xef\xbb\xbf':
encoding = 'utf-8'
return data.decode(encoding), encoding
data_len = len(data)
for canary in ENCODING_HINTS:
if canary in data:
pos = data.index(canary)
if pos > 1 and data[pos - 1] not in (b' ', b'\n', b'\r'):
continue
pos += len(canary)
coding = b''
while pos < data_len and data[pos] not in (b' ', b'\n'):
coding += data[pos]
pos += 1
encoding = coding.decode('ascii').strip()
try:
return data.decode(encoding), encoding
except (LookupError, UnicodeError):
# Try the next one
pass
# Fall back to utf-8
encoding = 'utf-8'
return data.decode(encoding), encoding
示例11: patch
def patch(self, filename):
self.current_file = filename
with tokenize.open(filename) as fp:
content = fp.read()
old_content = content
for operation in self.operations:
content = operation.patch(content)
if content == old_content:
# no change
self.check(content)
if self.options.to_stdout:
self.write_stdout(content)
return False
with open(filename, "rb") as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
if not self.options.quiet:
print("Patch %s" % filename)
if not self.options.to_stdout:
with open(filename, "w", encoding=encoding) as fp:
fp.write(content)
else:
self.write_stdout(content)
self.check(content)
return True
示例12: _read_file
def _read_file(filename):
# read the file contents, obeying the python encoding marker
with open(filename, 'rb') as fp:
encoding, _ = tokenize.detect_encoding(fp.readline)
with open(filename, 'rt', encoding=encoding) as fp:
content = fp.read()
content += '\n\n'
return content
示例13: read_source_code
def read_source_code(filename):
with open(filename, 'rb') as source_file:
encoding, first_lines = tokenize.detect_encoding(source_file.readline)
source_bytes = b''.join(first_lines) + source_file.read()
newline_decoder = io.IncrementalNewlineDecoder(None, translate=True)
source_code = newline_decoder.decode(source_bytes.decode(encoding))
return source_code.splitlines(True)
示例14: read_pyfile
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
示例15: _stdin_get_value_py3
def _stdin_get_value_py3():
stdin_value = sys.stdin.buffer.read()
fd = io.BytesIO(stdin_value)
try:
(coding, lines) = tokenize.detect_encoding(fd.readline)
return io.StringIO(stdin_value.decode(coding))
except (LookupError, SyntaxError, UnicodeError):
return io.StringIO(stdin_value.decode("utf-8"))