当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.TokenError方法代码示例

本文整理汇总了Python中lib2to3.pgen2.tokenize.TokenError方法的典型用法代码示例。如果您正苦于以下问题:Python tokenize.TokenError方法的具体用法?Python tokenize.TokenError怎么用?Python tokenize.TokenError使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lib2to3.pgen2.tokenize的用法示例。


在下文中一共展示了tokenize.TokenError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fix_e402

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def fix_e402(self, result):
        (line_index, offset, target) = get_index_offset_contents(result,
                                                                 self.source)
        for i in range(1, 100):
            line = "".join(self.source[line_index:line_index+i])
            try:
                generate_tokens("".join(line))
            except (SyntaxError, tokenize.TokenError):
                continue
            break
        if not (target in self.imports and self.imports[target] != line_index):
            mod_offset = get_module_imports_on_top_of_file(self.source,
                                                           line_index)
            self.source[mod_offset] = line + self.source[mod_offset]
        for offset in range(i):
            self.source[line_index+offset] = '' 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:18,代码来源:autopep8.py

示例2: fix_long_line_physically

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def fix_long_line_physically(self, result):
        """Try to make lines fit within --max-line-length characters."""
        line_index = result['line'] - 1
        target = self.source[line_index]

        previous_line = get_item(self.source, line_index - 1, default='')
        next_line = get_item(self.source, line_index + 1, default='')

        try:
            fixed = self.fix_long_line(
                target=target,
                previous_line=previous_line,
                next_line=next_line,
                original=target)
        except (SyntaxError, tokenize.TokenError):
            return []

        if fixed:
            self.source[line_index] = fixed
            return [line_index + 1]

        return [] 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:24,代码来源:autopep8.py

示例3: refactor_with_2to3

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def refactor_with_2to3(source_text, fixer_names, filename=''):
    """Use lib2to3 to refactor the source.

    Return the refactored source code.

    """
    from lib2to3.refactor import RefactoringTool
    fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
    tool = RefactoringTool(fixer_names=fixers, explicit=fixers)

    from lib2to3.pgen2 import tokenize as lib2to3_tokenize
    try:
        # The name parameter is necessary particularly for the "import" fixer.
        return unicode(tool.refactor_string(source_text, name=filename))
    except lib2to3_tokenize.TokenError:
        return source_text 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:18,代码来源:autopep8.py

示例4: fix_long_line_physically

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def fix_long_line_physically(self, result):
        """Try to make lines fit within --max-line-length characters."""
        line_index = result['line'] - 1
        target = self.source[line_index]

        previous_line = get_item(self.source, line_index - 1, default='')
        next_line = get_item(self.source, line_index + 1, default='')

        try:
            fixed = self.fix_long_line(
                target=target,
                previous_line=previous_line,
                next_line=next_line,
                original=target)
        except (SyntaxError, tokenize.TokenError):
            return []

        if fixed:
            self.source[line_index] = fixed
            return [line_index + 1]
        else:
            return [] 
开发者ID:fabioz,项目名称:PyDev.Debugger,代码行数:24,代码来源:autopep8.py

示例5: fix_w503

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def fix_w503(self, result):
        (line_index, _, target) = get_index_offset_contents(result,
                                                            self.source)
        one_string_token = target.split()[0]
        try:
            ts = generate_tokens(one_string_token)
        except tokenize.TokenError:
            return
        if not _is_binary_operator(ts[0][0], one_string_token):
            return
        i = target.index(one_string_token)
        self.source[line_index] = '{0}{1}'.format(
            target[:i], target[i + len(one_string_token):])
        nl = find_newline(self.source[line_index - 1:line_index])
        before_line = self.source[line_index - 1]
        bl = before_line.index(nl)
        self.source[line_index - 1] = '{0} {1}{2}'.format(
            before_line[:bl], one_string_token,
            before_line[bl:]) 
开发者ID:fabioz,项目名称:PyDev.Debugger,代码行数:21,代码来源:autopep8.py

示例6: refactor_with_2to3

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def refactor_with_2to3(source_text, fixer_names, filename=''):
    """Use lib2to3 to refactor the source.

    Return the refactored source code.

    """
    check_lib2to3()
    from lib2to3.refactor import RefactoringTool
    fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
    tool = RefactoringTool(fixer_names=fixers, explicit=fixers)

    from lib2to3.pgen2 import tokenize as lib2to3_tokenize
    try:
        # The name parameter is necessary particularly for the "import" fixer.
        return unicode(tool.refactor_string(source_text, name=filename))
    except lib2to3_tokenize.TokenError:
        return source_text 
开发者ID:mrknow,项目名称:filmkodi,代码行数:19,代码来源:autopep8.py

示例7: fix_e225

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def fix_e225(self, result):
        """Fix missing whitespace around operator."""
        target = self.source[result['line'] - 1]
        offset = result['column'] - 1
        fixed = target[:offset] + ' ' + target[offset:]

        # Only proceed if non-whitespace characters match.
        # And make sure we don't break the indentation.
        if (
            fixed.replace(' ', '') == target.replace(' ', '') and
            _get_indentation(fixed) == _get_indentation(target)
        ):
            self.source[result['line'] - 1] = fixed
            error_code = result.get('id', 0)
            try:
                ts = generate_tokens(fixed)
            except (SyntaxError, tokenize.TokenError):
                return
            if not check_syntax(fixed.lstrip()):
                return
            errors = list(
                pycodestyle.missing_whitespace_around_operator(fixed, ts))
            for e in reversed(errors):
                if error_code != e[1].split()[0]:
                    continue
                offset = e[0][1]
                fixed = fixed[:offset] + ' ' + fixed[offset:]
            self.source[result['line'] - 1] = fixed
        else:
            return [] 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:32,代码来源:autopep8.py

示例8: fix_long_line_logically

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def fix_long_line_logically(self, result, logical):
        """Try to make lines fit within --max-line-length characters."""
        if (
            not logical or
            len(logical[2]) == 1 or
            self.source[result['line'] - 1].lstrip().startswith('#')
        ):
            return self.fix_long_line_physically(result)

        start_line_index = logical[0][0]
        end_line_index = logical[1][0]
        logical_lines = logical[2]

        previous_line = get_item(self.source, start_line_index - 1, default='')
        next_line = get_item(self.source, end_line_index + 1, default='')

        single_line = join_logical_line(''.join(logical_lines))

        try:
            fixed = self.fix_long_line(
                target=single_line,
                previous_line=previous_line,
                next_line=next_line,
                original=''.join(logical_lines))
        except (SyntaxError, tokenize.TokenError):
            return self.fix_long_line_physically(result)

        if fixed:
            for line_index in range(start_line_index, end_line_index + 1):
                self.source[line_index] = ''
            self.source[start_line_index] = fixed
            return range(start_line_index + 1, end_line_index + 1)

        return [] 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:36,代码来源:autopep8.py

示例9: _get_indentword

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def _get_indentword(source):
    """Return indentation type."""
    indent_word = '    '  # Default in case source has no indentation
    try:
        for t in generate_tokens(source):
            if t[0] == token.INDENT:
                indent_word = t[1]
                break
    except (SyntaxError, tokenize.TokenError):
        pass
    return indent_word 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:13,代码来源:autopep8.py

示例10: multiline_string_lines

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def multiline_string_lines(source, include_docstrings=False):
    """Return line numbers that are within multiline strings.

    The line numbers are indexed at 1.

    Docstrings are ignored.

    """
    line_numbers = set()
    previous_token_type = ''
    try:
        for t in generate_tokens(source):
            token_type = t[0]
            start_row = t[2][0]
            end_row = t[3][0]

            if token_type == tokenize.STRING and start_row != end_row:
                if (
                    include_docstrings or
                    previous_token_type != tokenize.INDENT
                ):
                    # We increment by one since we want the contents of the
                    # string.
                    line_numbers |= set(range(1 + start_row, 1 + end_row))

            previous_token_type = token_type
    except (SyntaxError, tokenize.TokenError):
        pass

    return line_numbers 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:32,代码来源:autopep8.py

示例11: commented_out_code_lines

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def commented_out_code_lines(source):
    """Return line numbers of comments that are likely code.

    Commented-out code is bad practice, but modifying it just adds even
    more clutter.

    """
    line_numbers = []
    try:
        for t in generate_tokens(source):
            token_type = t[0]
            token_string = t[1]
            start_row = t[2][0]
            line = t[4]

            # Ignore inline comments.
            if not line.lstrip().startswith('#'):
                continue

            if token_type == tokenize.COMMENT:
                stripped_line = token_string.lstrip('#').strip()
                if (
                    ' ' in stripped_line and
                    '#' not in stripped_line and
                    check_syntax(stripped_line)
                ):
                    line_numbers.append(start_row)
    except (SyntaxError, tokenize.TokenError):
        pass

    return line_numbers 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:33,代码来源:autopep8.py

示例12: fix_w605

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def fix_w605(self, result):
        (line_index, _, target) = get_index_offset_contents(result,
                                                            self.source)
        try:
            tokens = list(generate_tokens(target))
        except (SyntaxError, tokenize.TokenError):
            return
        for (pos, _msg) in get_w605_position(tokens):
            self.source[line_index] = '{}r{}'.format(
                target[:pos], target[pos:]) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:12,代码来源:autopep8.py

示例13: fix_long_line_logically

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def fix_long_line_logically(self, result, logical):
        """Try to make lines fit within --max-line-length characters."""
        if (
            not logical or
            len(logical[2]) == 1 or
            self.source[result['line'] - 1].lstrip().startswith('#')
        ):
            return self.fix_long_line_physically(result)

        start_line_index = logical[0][0]
        end_line_index = logical[1][0]
        logical_lines = logical[2]

        previous_line = get_item(self.source, start_line_index - 1, default='')
        next_line = get_item(self.source, end_line_index + 1, default='')

        single_line = join_logical_line(''.join(logical_lines))

        try:
            fixed = self.fix_long_line(
                target=single_line,
                previous_line=previous_line,
                next_line=next_line,
                original=''.join(logical_lines))
        except (SyntaxError, tokenize.TokenError):
            return self.fix_long_line_physically(result)

        if fixed:
            for line_index in range(start_line_index, end_line_index + 1):
                self.source[line_index] = ''
            self.source[start_line_index] = fixed
            return range(start_line_index + 1, end_line_index + 1)
        else:
            return [] 
开发者ID:fabioz,项目名称:PyDev.Debugger,代码行数:36,代码来源:autopep8.py

示例14: testBadCode

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def testBadCode(self):
    code = 'x = """hello\n'
    self.assertRaises(tokenize.TokenError, yapf_api.FormatCode, code) 
开发者ID:google,项目名称:yapf,代码行数:5,代码来源:yapf_test.py

示例15: _FormatFile

# 需要导入模块: from lib2to3.pgen2 import tokenize [as 别名]
# 或者: from lib2to3.pgen2.tokenize import TokenError [as 别名]
def _FormatFile(filename,
                lines,
                style_config=None,
                no_local_style=False,
                in_place=False,
                print_diff=False,
                verify=False,
                quiet=False,
                verbose=False):
  """Format an individual file."""
  if verbose and not quiet:
    print('Reformatting %s' % filename)

  if style_config is None and not no_local_style:
    style_config = file_resources.GetDefaultStyleForDir(
        os.path.dirname(filename))

  try:
    reformatted_code, encoding, has_change = yapf_api.FormatFile(
        filename,
        in_place=in_place,
        style_config=style_config,
        lines=lines,
        print_diff=print_diff,
        verify=verify,
        logger=logging.warning)
  except tokenize.TokenError as e:
    raise errors.YapfError('%s:%s:%s' % (filename, e.args[1][0], e.args[0]))
  except SyntaxError as e:
    e.filename = filename
    raise

  if not in_place and not quiet and reformatted_code:
    file_resources.WriteReformattedCode(filename, reformatted_code, encoding,
                                        in_place)
  return has_change 
开发者ID:google,项目名称:yapf,代码行数:38,代码来源:__init__.py


注:本文中的lib2to3.pgen2.tokenize.TokenError方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。