当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.COMMENT属性代码示例

本文整理汇总了Python中tokenize.COMMENT属性的典型用法代码示例。如果您正苦于以下问题:Python tokenize.COMMENT属性的具体用法?Python tokenize.COMMENT怎么用?Python tokenize.COMMENT使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tokenize的用法示例。


在下文中一共展示了tokenize.COMMENT属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_type

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def get_type(tokens, start_index):
    """return the line type : docstring, comment, code, empty"""
    i = start_index
    tok_type = tokens[i][0]
    start = tokens[i][2]
    pos = start
    line_type = None
    while i < len(tokens) and tokens[i][2][0] == start[0]:
        tok_type = tokens[i][0]
        pos = tokens[i][3]
        if line_type is None:
            if tok_type == tokenize.STRING:
                line_type = 'docstring_lines'
            elif tok_type == tokenize.COMMENT:
                line_type = 'comment_lines'
            elif tok_type in JUNK:
                pass
            else:
                line_type = 'code_lines'
        i += 1
    if line_type is None:
        line_type = 'empty_lines'
    elif i < len(tokens) and tokens[i][0] == tokenize.NEWLINE:
        i += 1
    return i, pos[0] - start[0] + 1, line_type 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:27,代码来源:raw_metrics.py

示例2: _break_around_binary_operators

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def _break_around_binary_operators(tokens):
    """Private function to reduce duplication.

    This factors out the shared details between
    :func:`break_before_binary_operator` and
    :func:`break_after_binary_operator`.
    """
    line_break = False
    unary_context = True
    # Previous non-newline token types and text
    previous_token_type = None
    previous_text = None
    for token_type, text, start, end, line in tokens:
        if token_type == tokenize.COMMENT:
            continue
        if ('\n' in text or '\r' in text) and token_type != tokenize.STRING:
            line_break = True
        else:
            yield (token_type, text, previous_token_type, previous_text,
                   line_break, unary_context, start)
            unary_context = text in '([{,;'
            line_break = False
            previous_token_type = token_type
            previous_text = text 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:26,代码来源:pycodestyle.py

示例3: _verify_pre_check

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def _verify_pre_check(filepath):
    """Check student code for certain issues."""
    # Make sure the program doesn't crash for students.
    # Could use some improvement for better logging and error reporting.
    try:
        # Check for inline "pylint:" comment, which may indicate a student
        # trying to disable a check.
        with tokenize.open(os.path.expanduser(filepath)) as f:
            for tok_type, content, _, _, _ in tokenize.generate_tokens(f.readline):
                if tok_type != tokenize.COMMENT:
                    continue
                match = pylint.constants.OPTION_RGX.search(content)
                if match is not None:
                    print('[ERROR] String "pylint:" found in comment. ' +
                          'No check run on file `{}.`\n'.format(filepath))
                    return False
    except IndentationError as e:
        print('[ERROR] python_ta could not check your code due to an ' +
              'indentation error at line {}.'.format(e.lineno))
        return False
    except tokenize.TokenError as e:
        print('[ERROR] python_ta could not check your code due to a ' +
              'syntax error in your file.')
        return False
    return True 
开发者ID:pyta-uoft,项目名称:pyta,代码行数:27,代码来源:__init__.py

示例4: find_doc_for

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def find_doc_for(ast_entry, body_lines):
    lineno = ast_entry.lineno - 1
    line_io = io.BytesIO(body_lines[lineno].encode())
    try:
        tokens = tokenize(line_io.readline) or []
        line_comments = [t.string for t in tokens if t.type == COMMENT]

        if line_comments:
            formatted_lcs = [l[1:].strip() for l in line_comments]
            filtered_lcs = [l for l in formatted_lcs if not is_ignored(l)]
            if filtered_lcs:
                return filtered_lcs[0]
    except TokenError:
        pass

    lineno -= 1
    while lineno >= 0:
        if iscomment(body_lines[lineno]):
            comment = body_lines[lineno].strip("# ")
            if not is_ignored(comment):
                return comment
        if not body_lines[lineno].strip() == "":
            return None
        lineno -= 1
    return None 
开发者ID:IDSIA,项目名称:sacred,代码行数:27,代码来源:config_scope.py

示例5: _find_logical

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:26,代码来源:autopep8.py

示例6: process_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def process_tokens(self, tokens):
        encoding = "ascii"
        for i, (tok_type, token, start, _, line) in enumerate(tokens):
            if tok_type == tokenize.ENCODING:
                # this is always the first token processed
                encoding = token
            elif tok_type == tokenize.STRING:
                # 'token' is the whole un-parsed token; we can look at the start
                # of it to see whether it's a raw or unicode string etc.
                self.process_string_token(token, start[0])
                # We figure the next token, ignoring comments & newlines:
                j = i + 1
                while j < len(tokens) and tokens[j].type in (
                    tokenize.NEWLINE,
                    tokenize.NL,
                    tokenize.COMMENT,
                ):
                    j += 1
                next_token = tokens[j] if j < len(tokens) else None
                if encoding != "ascii":
                    # We convert `tokenize` character count into a byte count,
                    # to match with astroid `.col_offset`
                    start = (start[0], len(line[: start[1]].encode(encoding)))
                self.string_tokens[start] = (str_eval(token), next_token) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:26,代码来源:strings.py

示例7: _count_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def _count_tokens(code, string):
    """
    Return a count of how many times `string` appears as a keyword in `code`.
    """
    count = 0

    try:
        for ttyp, ttok, __, __, __ in _tokens(code):
            if ttyp in (tokenize.COMMENT, tokenize.STRING):
                continue
            if ttok == string:
                count += 1
    except:
        # The input code was bad in some way. It will fail later on.
        pass
    return count 
开发者ID:edx,项目名称:xqueue-watcher,代码行数:18,代码来源:gradelib.py

示例8: count_non_comment_lines

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def count_non_comment_lines(at_least=None, at_most=None, exactly=None, error_msg=None):
    """
    Returns an input check function that checks that the number of non-comment,
    non-blank source lines conforms to the rules in the arguments.
    """
    def check(code):
        linenums = set()
        for ttyp, ttok, (srow, __), __, __ in _tokens(code):
            if ttyp in (tokenize.COMMENT, tokenize.STRING):
                # Comments and strings don't count toward line count. If a string
                # is the only thing on a line, then it's probably a docstring, so
                # don't count it.
                continue
            if not ttok.strip():
                # Tokens that are only whitespace don't count.
                continue
            linenums.add(srow)
        num = len(linenums)
        return _check_occurs(None, num, at_least, at_most, exactly, error_msg)
    return check 
开发者ID:edx,项目名称:xqueue-watcher,代码行数:22,代码来源:gradelib.py

示例9: __waiting

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def __waiting(self, ttype, tstring, lineno):
        opts = self.__options
        # Do docstring extractions, if enabled
        if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
            # module docstring?
            if self.__freshmodule:
                if ttype == tokenize.STRING:
                    self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
                    self.__freshmodule = 0
                elif ttype not in (tokenize.COMMENT, tokenize.NL):
                    self.__freshmodule = 0
                return
            # class docstring?
            if ttype == tokenize.NAME and tstring in ('class', 'def'):
                self.__state = self.__suiteseen
                return
        if ttype == tokenize.NAME and tstring in opts.keywords:
            self.__state = self.__keywordseen 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:20,代码来源:pygettext.py

示例10: __openseen

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def __openseen(self, ttype, tstring, lineno):
        if ttype == tokenize.OP and tstring == ')':
            # We've seen the last of the translatable strings.  Record the
            # line number of the first line of the strings and update the list
            # of messages seen.  Reset state for the next batch.  If there
            # were no strings inside _(), then just ignore this entry.
            if self.__data:
                self.__addentry(EMPTYSTRING.join(self.__data))
            self.__state = self.__waiting
        elif ttype == tokenize.STRING:
            self.__data.append(safe_eval(tstring))
        elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
                           token.NEWLINE, tokenize.NL]:
            # warn if we see anything else than STRING or whitespace
            print >> sys.stderr, _(
                '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
                ) % {
                'token': tstring,
                'file': self.__curfile,
                'lineno': self.__lineno
                }
            self.__state = self.__waiting 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:24,代码来源:pygettext.py

示例11: tokeneater

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def tokeneater(self, type, token, srowcol, erowcol, line):
        if not self.started:
            # look for the first "def", "class" or "lambda"
            if token in ("def", "class", "lambda"):
                if token == "lambda":
                    self.islambda = True
                self.started = True
            self.passline = True    # skip to the end of the line
        elif type == tokenize.NEWLINE:
            self.passline = False   # stop skipping when a NEWLINE is seen
            self.last = srowcol[0]
            if self.islambda:       # lambdas always end at the first NEWLINE
                raise EndOfBlock
        elif self.passline:
            pass
        elif type == tokenize.INDENT:
            self.indent = self.indent + 1
            self.passline = True
        elif type == tokenize.DEDENT:
            self.indent = self.indent - 1
            # the end of matching indent/dedent pairs end a block
            # (note that this only works for "def"/"class" blocks,
            #  not e.g. for "if: else:" or "try: finally:" blocks)
            if self.indent <= 0:
                raise EndOfBlock
        elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
            # any other token on the same indentation level end the previous
            # block as well, except the pseudo-tokens COMMENT and NL.
            raise EndOfBlock 
开发者ID:war-and-code,项目名称:jawfish,代码行数:31,代码来源:inspect.py

示例12: process_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def process_tokens(self, tokens):
        # type: (typing.Sequence[typing.Tuple]) -> None
        for _type, string, start, _, line in tokens:
            if _type == tokenize.NAME:
                self.__validate_name(string, start, line)
            elif _type == tokenize.COMMENT:
                self.__validate_comment(string, start) 
开发者ID:Shopify,项目名称:shopify_python,代码行数:9,代码来源:shopify_styleguide.py

示例13: build_logical_line_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def build_logical_line_tokens(self):
        """Build the mapping, comments, and logical line lists."""
        logical = []
        comments = []
        length = 0
        previous_row = previous_column = mapping = None
        for token_type, text, start, end, line in self.tokens:
            if token_type in SKIP_TOKENS:
                continue
            if not mapping:
                mapping = [(0, start)]
            if token_type == tokenize.COMMENT:
                comments.append(text)
                continue
            if token_type == tokenize.STRING:
                text = mutate_string(text)
            if previous_row:
                (start_row, start_column) = start
                if previous_row != start_row:
                    row_index = previous_row - 1
                    column_index = previous_column - 1
                    previous_text = self.lines[row_index][column_index]
                    if (previous_text == ',' or
                            (previous_text not in '{[(' and
                             text not in '}])')):
                        text = ' ' + text
                elif previous_column != start_column:
                    text = line[previous_column:start_column] + text
            logical.append(text)
            length += len(text)
            mapping.append((length, end))
            (previous_row, previous_column) = end
        return comments, logical, mapping 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:35,代码来源:processor.py

示例14: is_eol_token

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def is_eol_token(token, _is_eol_token=is_eol_token):
        """Check if the token is an end-of-line token."""
        return (_is_eol_token(token) or
                (token[0] == tokenize.COMMENT and token[1] == token[4])) 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:6,代码来源:processor.py

示例15: is_trailing_comma

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import COMMENT [as 别名]
def is_trailing_comma(tokens, index):
    """Check if the given token is a trailing comma

    :param tokens: Sequence of modules tokens
    :type tokens: list[tokenize.TokenInfo]
    :param int index: Index of token under check in tokens
    :returns: True if the token is a comma which trails an expression
    :rtype: bool
    """
    token = tokens[index]
    if token.exact_type != tokenize.COMMA:
        return False
    # Must have remaining tokens on the same line such as NEWLINE
    left_tokens = itertools.islice(tokens, index + 1, None)
    same_line_remaining_tokens = list(itertools.takewhile(
        lambda other_token, _token=token: other_token.start[0] == _token.start[0],
        left_tokens
    ))
    # Note: If the newline is tokenize.NEWLINE and not tokenize.NL
    # then the newline denotes the end of expression
    is_last_element = all(
        other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
        for other_token in same_line_remaining_tokens
    )
    if not same_line_remaining_tokens or not is_last_element:
        return False
    def get_curline_index_start():
        """Get the index denoting the start of the current line"""
        for subindex, token in enumerate(reversed(tokens[:index])):
            # See Lib/tokenize.py and Lib/token.py in cpython for more info
            if token.type in (tokenize.NEWLINE, tokenize.NL):
                return index - subindex
        return 0
    curline_start = get_curline_index_start()
    for prevtoken in tokens[curline_start:index]:
        if '=' in prevtoken.string:
            return True
    return False 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:40,代码来源:refactoring.py


注:本文中的tokenize.COMMENT属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。