当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.detect_encoding方法代码示例

本文整理汇总了Python中lib2to3.pgen2.tokenize.detect_encoding方法的典型用法代码示例。如果您正苦于以下问题:Python tokenize.detect_encoding方法的具体用法?Python tokenize.detect_encoding怎么用?Python tokenize.detect_encoding使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lib2to3.pgen2.tokenize的用法示例。


在下文中一共展示了tokenize.detect_encoding方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_all_project_files

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def test_all_project_files(self):
        for filepath in support.all_project_files():
            with open(filepath, "rb") as fp:
                encoding = tokenize.detect_encoding(fp.readline)[0]
            self.assertIsNotNone(encoding,
                                 "can't detect encoding for %s" % filepath)
            with open(filepath, "r", encoding=encoding) as fp:
                source = fp.read()
            try:
                tree = driver.parse_string(source)
            except ParseError:
                try:
                    tree = driver_no_print_statement.parse_string(source)
                except ParseError as err:
                    self.fail('ParseError on file %s (%s)' % (filepath, err))
            new = str(tree)
            if new != source:
                print(diff_texts(source, new, filepath))
                self.fail("Idempotency failed: %s" % filepath) 
开发者ID:remg427,项目名称:misp42splunk,代码行数:21,代码来源:test_parser.py

示例2: test_all_project_files

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def test_all_project_files(self):
        if sys.platform.startswith("win"):
            # XXX something with newlines goes wrong on Windows.
            return
        for filepath in support.all_project_files():
            with open(filepath, "rb") as fp:
                encoding = tokenize.detect_encoding(fp.readline)[0]
            self.assertIsNotNone(encoding,
                                 "can't detect encoding for %s" % filepath)
            with open(filepath, "r") as fp:
                source = fp.read()
                source = source.decode(encoding)
            tree = driver.parse_string(source)
            new = unicode(tree)
            if diff(filepath, new, encoding):
                self.fail("Idempotency failed: %s" % filepath) 
开发者ID:remg427,项目名称:misp42splunk,代码行数:18,代码来源:test_parser.py

示例3: test_all_project_files

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def test_all_project_files(self):
        if sys.platform.startswith("win"):
            # XXX something with newlines goes wrong on Windows.
            return
        for filepath in support.all_project_files():
            with open(filepath, "rb") as fp:
                encoding = tokenize.detect_encoding(fp.readline)[0]
            self.assertTrue(encoding is not None,
                            "can't detect encoding for %s" % filepath)
            with open(filepath, "r") as fp:
                source = fp.read()
                source = source.decode(encoding)
            tree = driver.parse_string(source)
            new = unicode(tree)
            if diff(filepath, new, encoding):
                self.fail("Idempotency failed: %s" % filepath) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:18,代码来源:test_parser.py

示例4: test_all_project_files

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def test_all_project_files(self):
        for filepath in support.all_project_files():
            with open(filepath, "rb") as fp:
                encoding = tokenize.detect_encoding(fp.readline)[0]
            self.assertIsNotNone(encoding,
                                 "can't detect encoding for %s" % filepath)
            with open(filepath, "r", encoding=encoding) as fp:
                source = fp.read()
            try:
                tree = driver.parse_string(source)
            except ParseError as err:
                if verbose > 0:
                    warnings.warn('ParseError on file %s (%s)' % (filepath, err))
                continue
            new = str(tree)
            x = diff(filepath, new)
            if x:
                self.fail("Idempotency failed: %s" % filepath) 
开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:20,代码来源:test_parser.py

示例5: open_with_encoding

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def open_with_encoding(filename, mode='r', encoding=None, limit_byte_check=-1):
    """Return opened file with a specific encoding."""
    if not encoding:
        encoding = detect_encoding(filename, limit_byte_check=limit_byte_check)

    return io.open(filename, mode=mode, encoding=encoding,
                   newline='')  # Preserve line endings 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:9,代码来源:autopep8.py

示例6: detect_encoding

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def detect_encoding(filename, limit_byte_check=-1):
    """Return file encoding."""
    try:
        with open(filename, 'rb') as input_file:
            from lib2to3.pgen2 import tokenize as lib2to3_tokenize
            encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]

        with open_with_encoding(filename, encoding=encoding) as test_file:
            test_file.read(limit_byte_check)

        return encoding
    except (LookupError, SyntaxError, UnicodeDecodeError):
        return 'latin-1' 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:15,代码来源:autopep8.py

示例7: fix_file

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def fix_file(filename, options=None, output=None, apply_config=False):
    if not options:
        options = parse_args([filename], apply_config=apply_config)

    original_source = readlines_from_file(filename)

    fixed_source = original_source

    if options.in_place or output:
        encoding = detect_encoding(filename)

    if output:
        output = LineEndingWrapper(wrap_output(output, encoding=encoding))

    fixed_source = fix_lines(fixed_source, options, filename=filename)

    if options.diff:
        new = io.StringIO(fixed_source)
        new = new.readlines()
        diff = get_diff_text(original_source, new, filename)
        if output:
            output.write(diff)
            output.flush()
        return diff
    elif options.in_place:
        original = "".join(original_source).splitlines()
        fixed = fixed_source.splitlines()
        if original != fixed:
            with open_with_encoding(filename, 'w', encoding=encoding) as fp:
                fp.write(fixed_source)
            return fixed_source
        return None
    else:
        if output:
            output.write(fixed_source)
            output.flush()
    return fixed_source 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:39,代码来源:autopep8.py

示例8: open_with_encoding

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def open_with_encoding(filename,
                       encoding=None, mode='r', limit_byte_check=-1):
    """Return opened file with a specific encoding."""
    if not encoding:
        encoding = detect_encoding(filename, limit_byte_check=limit_byte_check)

    return io.open(filename, mode=mode, encoding=encoding,
                   newline='')  # Preserve line endings 
开发者ID:fabioz,项目名称:PyDev.Debugger,代码行数:10,代码来源:autopep8.py

示例9: detect_encoding

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def detect_encoding(filename, limit_byte_check=-1):
    """Return file encoding."""
    try:
        with open(filename, 'rb') as input_file:
            from lib2to3.pgen2 import tokenize as lib2to3_tokenize
            encoding = lib2to3_tokenize.detect_encoding(input_file.readline)[0]

        with open_with_encoding(filename, encoding) as test_file:
            test_file.read(limit_byte_check)

        return encoding
    except (LookupError, SyntaxError, UnicodeDecodeError):
        return 'latin-1' 
开发者ID:fabioz,项目名称:PyDev.Debugger,代码行数:15,代码来源:autopep8.py

示例10: fix_file

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def fix_file(filename, options=None, output=None, apply_config=False):
    if not options:
        options = parse_args([filename], apply_config=apply_config)

    original_source = readlines_from_file(filename)

    fixed_source = original_source

    if options.in_place or output:
        encoding = detect_encoding(filename)

    if output:
        output = LineEndingWrapper(wrap_output(output, encoding=encoding))

    fixed_source = fix_lines(fixed_source, options, filename=filename)

    if options.diff:
        new = io.StringIO(fixed_source)
        new = new.readlines()
        diff = get_diff_text(original_source, new, filename)
        if output:
            output.write(diff)
            output.flush()
        else:
            return diff
    elif options.in_place:
        fp = open_with_encoding(filename, encoding=encoding, mode='w')
        fp.write(fixed_source)
        fp.close()
    else:
        if output:
            output.write(fixed_source)
            output.flush()
        else:
            return fixed_source 
开发者ID:fabioz,项目名称:PyDev.Debugger,代码行数:37,代码来源:autopep8.py

示例11: openpy

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def openpy(filename):
        from lib2to3.pgen2.tokenize import detect_encoding
        import io

        # The following is copied from tokenize.py in Python 3.2,
        # Copyright (c) 2001-2014 Python Software Foundation; All Rights Reserved
        buffer = io.open(filename, 'rb')
        encoding, lines = detect_encoding(buffer.readline)
        buffer.seek(0)
        text = io.TextIOWrapper(buffer, encoding, line_buffering=True)
        text.mode = 'r'
        return text 
开发者ID:Bachmann1234,项目名称:diff_cover,代码行数:14,代码来源:snippets.py

示例12: IsPythonFile

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def IsPythonFile(filename):
  """Return True if filename is a Python file."""
  if os.path.splitext(filename)[1] == '.py':
    return True

  try:
    with open(filename, 'rb') as fd:
      encoding = tokenize.detect_encoding(fd.readline)[0]

    # Check for correctness of encoding.
    with py3compat.open_with_encoding(
        filename, mode='r', encoding=encoding) as fd:
      fd.read()
  except UnicodeDecodeError:
    encoding = 'latin-1'
  except (IOError, SyntaxError):
    # If we fail to detect encoding (or the encoding cookie is incorrect - which
    # will make detect_encoding raise SyntaxError), assume it's not a Python
    # file.
    return False

  try:
    with py3compat.open_with_encoding(
        filename, mode='r', encoding=encoding) as fd:
      first_line = fd.readline(256)
  except IOError:
    return False

  return re.match(r'^#!.*\bpython[23]?\b', first_line) 
开发者ID:google,项目名称:yapf,代码行数:31,代码来源:file_resources.py

示例13: FileEncoding

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def FileEncoding(filename):
  """Return the file's encoding."""
  with open(filename, 'rb') as fd:
    return tokenize.detect_encoding(fd.readline)[0] 
开发者ID:google,项目名称:yapf,代码行数:6,代码来源:file_resources.py

示例14: decode_source

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def decode_source(source):
        if isinstance(source, bytes):
            encoding, _ = detect_encoding(io.BytesIO(source).readline)
            source = source.decode(encoding)
        return source 
开发者ID:alexmojaki,项目名称:executing,代码行数:7,代码来源:executing.py

示例15: open_with_encoding_check

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import detect_encoding [as 别名]
def open_with_encoding_check(filename):  # type: ignore
        """Open a file in read only mode using the encoding detected by
        detect_encoding().
        """
        fp = io.open(filename, 'rb')
        try:
            encoding, lines = detect_encoding(fp.readline)
            fp.seek(0)
            text = io.TextIOWrapper(fp, encoding, line_buffering=True)
            text.mode = 'r'
            return text
        except:
            fp.close()
            raise 
开发者ID:alexmojaki,项目名称:executing,代码行数:16,代码来源:utils.py


注:本文中的lib2to3.pgen2.tokenize.detect_encoding方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。