当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.TokenError方法代码示例

本文整理汇总了Python中tokenize.TokenError方法的典型用法代码示例。如果您正苦于以下问题:Python tokenize.TokenError方法的具体用法?Python tokenize.TokenError怎么用?Python tokenize.TokenError使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tokenize的用法示例。


在下文中一共展示了tokenize.TokenError方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: file_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def file_tokens(self):
        """The complete set of tokens for a file.

        Accessing this attribute *may* raise an InvalidSyntax exception.

        :raises: flake8.exceptions.InvalidSyntax
        """
        if self._file_tokens is None:
            line_iter = iter(self.lines)
            try:
                self._file_tokens = list(tokenize.generate_tokens(
                    lambda: next(line_iter)
                ))
            except tokenize.TokenError as exc:
                raise exceptions.InvalidSyntax(exc.message, exception=exc)

        return self._file_tokens 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:19,代码来源:processor.py

示例2: _get_all_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def _get_all_tokens(line, lines):
    '''Starting from *line*, generate the necessary tokens which represent the
    shortest tokenization possible. This is done by catching
    :exc:`tokenize.TokenError` when a multi-line string or statement is
    encountered.
    :returns: tokens, lines
    '''
    buffer = line
    used_lines = [line]
    while True:
        try:
            tokens = _generate(buffer)
        except tokenize.TokenError:
            # A multi-line string or statement has been encountered:
            # start adding lines and stop when tokenize stops complaining
            pass
        else:
            if not any(t[0] == tokenize.ERRORTOKEN for t in tokens):
                return tokens, used_lines

        # Add another line
        next_line = next(lines)
        buffer = buffer + '\n' + next_line
        used_lines.append(next_line) 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:26,代码来源:raw.py

示例3: get_annotated_lines

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def get_annotated_lines(self):
        """Helper function that returns lines with extra information."""
        lines = [Line(idx + 1, x) for idx, x in enumerate(self.sourcelines)]

        # find function definition and mark lines
        if hasattr(self.code, "co_firstlineno"):
            lineno = self.code.co_firstlineno - 1
            while lineno > 0:
                if _funcdef_re.match(lines[lineno].code):
                    break
                lineno -= 1
            try:
                offset = len(inspect.getblock([x.code + "\n" for x in lines[lineno:]]))
            except TokenError:
                offset = 0
            for line in lines[lineno : lineno + offset]:
                line.in_frame = True

        # mark current line
        try:
            lines[self.lineno - 1].current = True
        except IndexError:
            pass

        return lines 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:27,代码来源:tbtools.py

示例4: _verify_pre_check

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def _verify_pre_check(filepath):
    """Check student code for certain issues."""
    # Make sure the program doesn't crash for students.
    # Could use some improvement for better logging and error reporting.
    try:
        # Check for inline "pylint:" comment, which may indicate a student
        # trying to disable a check.
        with tokenize.open(os.path.expanduser(filepath)) as f:
            for tok_type, content, _, _, _ in tokenize.generate_tokens(f.readline):
                if tok_type != tokenize.COMMENT:
                    continue
                match = pylint.constants.OPTION_RGX.search(content)
                if match is not None:
                    print('[ERROR] String "pylint:" found in comment. ' +
                          'No check run on file `{}.`\n'.format(filepath))
                    return False
    except IndentationError as e:
        print('[ERROR] python_ta could not check your code due to an ' +
              'indentation error at line {}.'.format(e.lineno))
        return False
    except tokenize.TokenError as e:
        print('[ERROR] python_ta could not check your code due to a ' +
              'syntax error in your file.')
        return False
    return True 
开发者ID:pyta-uoft,项目名称:pyta,代码行数:27,代码来源:__init__.py

示例5: _parse

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def _parse(self, source):
        reader = StringIO(source)
        # parse until EOF or TokenError (allows incomplete modules)
        tokens = []
        try:
            tokens.extend(tokenize.generate_tokens(reader.readline))
        except tokenize.TokenError:
            # TokenError happens always at EOF, for unclosed strings or brackets.
            # We don't care about that here, since we still can recover the whole
            # source code.
            pass
        self._tokens = tokens
        it = Iterator(self._tokens)
        self._imports_begin, self._imports_end = self._find_import_range(it)
        it = Iterator(self._tokens, start=self._imports_begin, end=self._imports_end)
        self._parse_imports(it) 
开发者ID:alecthomas,项目名称:importmagic,代码行数:18,代码来源:importer.py

示例6: find_doc_for

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def find_doc_for(ast_entry, body_lines):
    lineno = ast_entry.lineno - 1
    line_io = io.BytesIO(body_lines[lineno].encode())
    try:
        tokens = tokenize(line_io.readline) or []
        line_comments = [t.string for t in tokens if t.type == COMMENT]

        if line_comments:
            formatted_lcs = [l[1:].strip() for l in line_comments]
            filtered_lcs = [l for l in formatted_lcs if not is_ignored(l)]
            if filtered_lcs:
                return filtered_lcs[0]
    except TokenError:
        pass

    lineno -= 1
    while lineno >= 0:
        if iscomment(body_lines[lineno]):
            comment = body_lines[lineno].strip("# ")
            if not is_ignored(comment):
                return comment
        if not body_lines[lineno].strip() == "":
            return None
        lineno -= 1
    return None 
开发者ID:IDSIA,项目名称:sacred,代码行数:27,代码来源:config_scope.py

示例7: fix_e402

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def fix_e402(self, result):
        (line_index, offset, target) = get_index_offset_contents(result,
                                                                 self.source)
        for i in range(1, 100):
            line = "".join(self.source[line_index:line_index+i])
            try:
                generate_tokens("".join(line))
            except (SyntaxError, tokenize.TokenError):
                continue
            break
        if not (target in self.imports and self.imports[target] != line_index):
            mod_offset = get_module_imports_on_top_of_file(self.source,
                                                           line_index)
            self.source[mod_offset] = line + self.source[mod_offset]
        for offset in range(i):
            self.source[line_index+offset] = '' 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:18,代码来源:autopep8.py

示例8: fix_long_line_physically

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def fix_long_line_physically(self, result):
        """Try to make lines fit within --max-line-length characters."""
        line_index = result['line'] - 1
        target = self.source[line_index]

        previous_line = get_item(self.source, line_index - 1, default='')
        next_line = get_item(self.source, line_index + 1, default='')

        try:
            fixed = self.fix_long_line(
                target=target,
                previous_line=previous_line,
                next_line=next_line,
                original=target)
        except (SyntaxError, tokenize.TokenError):
            return []

        if fixed:
            self.source[line_index] = fixed
            return [line_index + 1]

        return [] 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:24,代码来源:autopep8.py

示例9: refactor_with_2to3

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def refactor_with_2to3(source_text, fixer_names, filename=''):
    """Use lib2to3 to refactor the source.

    Return the refactored source code.

    """
    from lib2to3.refactor import RefactoringTool
    fixers = ['lib2to3.fixes.fix_' + name for name in fixer_names]
    tool = RefactoringTool(fixer_names=fixers, explicit=fixers)

    from lib2to3.pgen2 import tokenize as lib2to3_tokenize
    try:
        # The name parameter is necessary particularly for the "import" fixer.
        return unicode(tool.refactor_string(source_text, name=filename))
    except lib2to3_tokenize.TokenError:
        return source_text 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:18,代码来源:autopep8.py

示例10: run

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def run(self):
        OPENERS=('class', 'def', 'for', 'if', 'try', 'while')
        INDENT=tokenize.INDENT
        NAME=tokenize.NAME
                   
        save_tabsize = tokenize.tabsize
        tokenize.tabsize = self.tabwidth
        try:
            try:
                for (typ, token, start, end, line) in token_generator(self.readline):
                    if typ == NAME and token in OPENERS:
                        self.blkopenline = line
                    elif type == INDENT and self.blkopenline:
                        self.indentedline = line
                        break

            except (tokenize.TokenError, IndentationError):
                # since we cut off the tokenizer early, we can trigger
                # spurious errors
                pass
        finally:
            tokenize.tabsize = save_tabsize
        return self.blkopenline, self.indentedline 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:25,代码来源:AutoIndent.py

示例11: get_parse_error

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def get_parse_error(code):
    """
    Checks code for ambiguous tabs or other basic parsing issues.

    :param code: a string containing a file's worth of Python code
    :returns: a string containing a description of the first parse error encountered,
              or None if the code is ok
    """
    # note that this uses non-public elements from stdlib's tabnanny, because tabnanny
    # is (very frustratingly) written only to be used as a script, but using it that way
    # in this context requires writing temporarily files, running subprocesses, blah blah blah
    code_buffer = StringIO(code)
    try:
        tabnanny.process_tokens(tokenize.generate_tokens(code_buffer.readline))
    except tokenize.TokenError as err:
        return "Could not parse code: %s" % err
    except IndentationError as err:
        return "Indentation error: %s" % err
    except tabnanny.NannyNag as err:
        return "Ambiguous tab at line %d; line is '%s'." % (err.get_lineno(), err.get_line())
    return None 
开发者ID:muhanzhang,项目名称:D-VAE,代码行数:23,代码来源:check_whitespace.py

示例12: tabnanny

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def tabnanny(self, filename):
        f = open(filename, 'r')
        try:
            tabnanny.process_tokens(tokenize.generate_tokens(f.readline))
        except tokenize.TokenError as msg:
            msgtxt, (lineno, start) = msg
            self.editwin.gotoline(lineno)
            self.errorbox("Tabnanny Tokenizing Error",
                          "Token Error: %s" % msgtxt)
            return False
        except tabnanny.NannyNag as nag:
            # The error messages from tabnanny are too confusing...
            self.editwin.gotoline(nag.get_lineno())
            self.errorbox("Tab/space error", indent_message)
            return False
        return True 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:18,代码来源:ScriptBinding.py

示例13: fix_long_line_physically

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def fix_long_line_physically(self, result):
        """Try to make lines fit within --max-line-length characters."""
        line_index = result['line'] - 1
        target = self.source[line_index]

        previous_line = get_item(self.source, line_index - 1, default='')
        next_line = get_item(self.source, line_index + 1, default='')

        try:
            fixed = self.fix_long_line(
                target=target,
                previous_line=previous_line,
                next_line=next_line,
                original=target)
        except (SyntaxError, tokenize.TokenError):
            return []

        if fixed:
            self.source[line_index] = fixed
            return [line_index + 1]
        else:
            return [] 
开发者ID:fabioz,项目名称:PyDev.Debugger,代码行数:24,代码来源:autopep8.py

示例14: fix_w503

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def fix_w503(self, result):
        (line_index, _, target) = get_index_offset_contents(result,
                                                            self.source)
        one_string_token = target.split()[0]
        try:
            ts = generate_tokens(one_string_token)
        except tokenize.TokenError:
            return
        if not _is_binary_operator(ts[0][0], one_string_token):
            return
        i = target.index(one_string_token)
        self.source[line_index] = '{0}{1}'.format(
            target[:i], target[i + len(one_string_token):])
        nl = find_newline(self.source[line_index - 1:line_index])
        before_line = self.source[line_index - 1]
        bl = before_line.index(nl)
        self.source[line_index - 1] = '{0} {1}{2}'.format(
            before_line[:bl], one_string_token,
            before_line[bl:]) 
开发者ID:fabioz,项目名称:PyDev.Debugger,代码行数:21,代码来源:autopep8.py

示例15: parse_source

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import TokenError [as 别名]
def parse_source(self):
        """Parse source text to find executable lines, excluded lines, etc.

        Sets the .excluded and .statements attributes, normalized to the first
        line of multi-line statements.

        """
        try:
            self._raw_parse()
        except (tokenize.TokenError, IndentationError) as err:
            if hasattr(err, "lineno"):
                lineno = err.lineno         # IndentationError
            else:
                lineno = err.args[1][0]     # TokenError
            raise NotPython(
                u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
                    self.filename, err.args[0], lineno
                )
            )

        self.excluded = self.first_lines(self.raw_excluded)

        ignore = self.excluded | self.raw_docstrings
        starts = self.raw_statements - ignore
        self.statements = self.first_lines(starts) - ignore 
开发者ID:nedbat,项目名称:coveragepy-bbmirror,代码行数:27,代码来源:parser.py


注:本文中的tokenize.TokenError方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。