當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenize.COMMENT屬性代碼示例

本文整理匯總了Python中tokenize.COMMENT屬性的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.COMMENT屬性的具體用法?Python tokenize.COMMENT怎麽用?Python tokenize.COMMENT使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在tokenize的用法示例。


在下文中一共展示了tokenize.COMMENT屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_type

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def get_type(tokens, start_index):
    """return the line type : docstring, comment, code, empty"""
    i = start_index
    tok_type = tokens[i][0]
    start = tokens[i][2]
    pos = start
    line_type = None
    while i < len(tokens) and tokens[i][2][0] == start[0]:
        tok_type = tokens[i][0]
        pos = tokens[i][3]
        if line_type is None:
            if tok_type == tokenize.STRING:
                line_type = 'docstring_lines'
            elif tok_type == tokenize.COMMENT:
                line_type = 'comment_lines'
            elif tok_type in JUNK:
                pass
            else:
                line_type = 'code_lines'
        i += 1
    if line_type is None:
        line_type = 'empty_lines'
    elif i < len(tokens) and tokens[i][0] == tokenize.NEWLINE:
        i += 1
    return i, pos[0] - start[0] + 1, line_type 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:27,代碼來源:raw_metrics.py

示例2: _break_around_binary_operators

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def _break_around_binary_operators(tokens):
    """Private function to reduce duplication.

    This factors out the shared details between
    :func:`break_before_binary_operator` and
    :func:`break_after_binary_operator`.
    """
    line_break = False
    unary_context = True
    # Previous non-newline token types and text
    previous_token_type = None
    previous_text = None
    for token_type, text, start, end, line in tokens:
        if token_type == tokenize.COMMENT:
            continue
        if ('\n' in text or '\r' in text) and token_type != tokenize.STRING:
            line_break = True
        else:
            yield (token_type, text, previous_token_type, previous_text,
                   line_break, unary_context, start)
            unary_context = text in '([{,;'
            line_break = False
            previous_token_type = token_type
            previous_text = text 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:26,代碼來源:pycodestyle.py

示例3: _verify_pre_check

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def _verify_pre_check(filepath):
    """Check student code for certain issues."""
    # Make sure the program doesn't crash for students.
    # Could use some improvement for better logging and error reporting.
    try:
        # Check for inline "pylint:" comment, which may indicate a student
        # trying to disable a check.
        with tokenize.open(os.path.expanduser(filepath)) as f:
            for tok_type, content, _, _, _ in tokenize.generate_tokens(f.readline):
                if tok_type != tokenize.COMMENT:
                    continue
                match = pylint.constants.OPTION_RGX.search(content)
                if match is not None:
                    print('[ERROR] String "pylint:" found in comment. ' +
                          'No check run on file `{}.`\n'.format(filepath))
                    return False
    except IndentationError as e:
        print('[ERROR] python_ta could not check your code due to an ' +
              'indentation error at line {}.'.format(e.lineno))
        return False
    except tokenize.TokenError as e:
        print('[ERROR] python_ta could not check your code due to a ' +
              'syntax error in your file.')
        return False
    return True 
開發者ID:pyta-uoft,項目名稱:pyta,代碼行數:27,代碼來源:__init__.py

示例4: find_doc_for

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def find_doc_for(ast_entry, body_lines):
    lineno = ast_entry.lineno - 1
    line_io = io.BytesIO(body_lines[lineno].encode())
    try:
        tokens = tokenize(line_io.readline) or []
        line_comments = [t.string for t in tokens if t.type == COMMENT]

        if line_comments:
            formatted_lcs = [l[1:].strip() for l in line_comments]
            filtered_lcs = [l for l in formatted_lcs if not is_ignored(l)]
            if filtered_lcs:
                return filtered_lcs[0]
    except TokenError:
        pass

    lineno -= 1
    while lineno >= 0:
        if iscomment(body_lines[lineno]):
            comment = body_lines[lineno].strip("# ")
            if not is_ignored(comment):
                return comment
        if not body_lines[lineno].strip() == "":
            return None
        lineno -= 1
    return None 
開發者ID:IDSIA,項目名稱:sacred,代碼行數:27,代碼來源:config_scope.py

示例5: _find_logical

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def _find_logical(source_lines):
    # Make a variable which is the index of all the starts of lines.
    logical_start = []
    logical_end = []
    last_newline = True
    parens = 0
    for t in generate_tokens(''.join(source_lines)):
        if t[0] in [tokenize.COMMENT, tokenize.DEDENT,
                    tokenize.INDENT, tokenize.NL,
                    tokenize.ENDMARKER]:
            continue
        if not parens and t[0] in [tokenize.NEWLINE, tokenize.SEMI]:
            last_newline = True
            logical_end.append((t[3][0] - 1, t[2][1]))
            continue
        if last_newline and not parens:
            logical_start.append((t[2][0] - 1, t[2][1]))
            last_newline = False
        if t[0] == tokenize.OP:
            if t[1] in '([{':
                parens += 1
            elif t[1] in '}])':
                parens -= 1
    return (logical_start, logical_end) 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:26,代碼來源:autopep8.py

示例6: process_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def process_tokens(self, tokens):
        encoding = "ascii"
        for i, (tok_type, token, start, _, line) in enumerate(tokens):
            if tok_type == tokenize.ENCODING:
                # this is always the first token processed
                encoding = token
            elif tok_type == tokenize.STRING:
                # 'token' is the whole un-parsed token; we can look at the start
                # of it to see whether it's a raw or unicode string etc.
                self.process_string_token(token, start[0])
                # We figure the next token, ignoring comments & newlines:
                j = i + 1
                while j < len(tokens) and tokens[j].type in (
                    tokenize.NEWLINE,
                    tokenize.NL,
                    tokenize.COMMENT,
                ):
                    j += 1
                next_token = tokens[j] if j < len(tokens) else None
                if encoding != "ascii":
                    # We convert `tokenize` character count into a byte count,
                    # to match with astroid `.col_offset`
                    start = (start[0], len(line[: start[1]].encode(encoding)))
                self.string_tokens[start] = (str_eval(token), next_token) 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:26,代碼來源:strings.py

示例7: _count_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def _count_tokens(code, string):
    """
    Return a count of how many times `string` appears as a keyword in `code`.
    """
    count = 0

    try:
        for ttyp, ttok, __, __, __ in _tokens(code):
            if ttyp in (tokenize.COMMENT, tokenize.STRING):
                continue
            if ttok == string:
                count += 1
    except:
        # The input code was bad in some way. It will fail later on.
        pass
    return count 
開發者ID:edx,項目名稱:xqueue-watcher,代碼行數:18,代碼來源:gradelib.py

示例8: count_non_comment_lines

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def count_non_comment_lines(at_least=None, at_most=None, exactly=None, error_msg=None):
    """
    Returns an input check function that checks that the number of non-comment,
    non-blank source lines conforms to the rules in the arguments.
    """
    def check(code):
        linenums = set()
        for ttyp, ttok, (srow, __), __, __ in _tokens(code):
            if ttyp in (tokenize.COMMENT, tokenize.STRING):
                # Comments and strings don't count toward line count. If a string
                # is the only thing on a line, then it's probably a docstring, so
                # don't count it.
                continue
            if not ttok.strip():
                # Tokens that are only whitespace don't count.
                continue
            linenums.add(srow)
        num = len(linenums)
        return _check_occurs(None, num, at_least, at_most, exactly, error_msg)
    return check 
開發者ID:edx,項目名稱:xqueue-watcher,代碼行數:22,代碼來源:gradelib.py

示例9: __waiting

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def __waiting(self, ttype, tstring, lineno):
        opts = self.__options
        # Do docstring extractions, if enabled
        if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
            # module docstring?
            if self.__freshmodule:
                if ttype == tokenize.STRING:
                    self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
                    self.__freshmodule = 0
                elif ttype not in (tokenize.COMMENT, tokenize.NL):
                    self.__freshmodule = 0
                return
            # class docstring?
            if ttype == tokenize.NAME and tstring in ('class', 'def'):
                self.__state = self.__suiteseen
                return
        if ttype == tokenize.NAME and tstring in opts.keywords:
            self.__state = self.__keywordseen 
開發者ID:aliyun,項目名稱:oss-ftp,代碼行數:20,代碼來源:pygettext.py

示例10: __openseen

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def __openseen(self, ttype, tstring, lineno):
        if ttype == tokenize.OP and tstring == ')':
            # We've seen the last of the translatable strings.  Record the
            # line number of the first line of the strings and update the list
            # of messages seen.  Reset state for the next batch.  If there
            # were no strings inside _(), then just ignore this entry.
            if self.__data:
                self.__addentry(EMPTYSTRING.join(self.__data))
            self.__state = self.__waiting
        elif ttype == tokenize.STRING:
            self.__data.append(safe_eval(tstring))
        elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
                           token.NEWLINE, tokenize.NL]:
            # warn if we see anything else than STRING or whitespace
            print >> sys.stderr, _(
                '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
                ) % {
                'token': tstring,
                'file': self.__curfile,
                'lineno': self.__lineno
                }
            self.__state = self.__waiting 
開發者ID:aliyun,項目名稱:oss-ftp,代碼行數:24,代碼來源:pygettext.py

示例11: tokeneater

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def tokeneater(self, type, token, srowcol, erowcol, line):
        if not self.started:
            # look for the first "def", "class" or "lambda"
            if token in ("def", "class", "lambda"):
                if token == "lambda":
                    self.islambda = True
                self.started = True
            self.passline = True    # skip to the end of the line
        elif type == tokenize.NEWLINE:
            self.passline = False   # stop skipping when a NEWLINE is seen
            self.last = srowcol[0]
            if self.islambda:       # lambdas always end at the first NEWLINE
                raise EndOfBlock
        elif self.passline:
            pass
        elif type == tokenize.INDENT:
            self.indent = self.indent + 1
            self.passline = True
        elif type == tokenize.DEDENT:
            self.indent = self.indent - 1
            # the end of matching indent/dedent pairs end a block
            # (note that this only works for "def"/"class" blocks,
            #  not e.g. for "if: else:" or "try: finally:" blocks)
            if self.indent <= 0:
                raise EndOfBlock
        elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
            # any other token on the same indentation level end the previous
            # block as well, except the pseudo-tokens COMMENT and NL.
            raise EndOfBlock 
開發者ID:war-and-code,項目名稱:jawfish,代碼行數:31,代碼來源:inspect.py

示例12: process_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def process_tokens(self, tokens):
        # type: (typing.Sequence[typing.Tuple]) -> None
        for _type, string, start, _, line in tokens:
            if _type == tokenize.NAME:
                self.__validate_name(string, start, line)
            elif _type == tokenize.COMMENT:
                self.__validate_comment(string, start) 
開發者ID:Shopify,項目名稱:shopify_python,代碼行數:9,代碼來源:shopify_styleguide.py

示例13: build_logical_line_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def build_logical_line_tokens(self):
        """Build the mapping, comments, and logical line lists."""
        logical = []
        comments = []
        length = 0
        previous_row = previous_column = mapping = None
        for token_type, text, start, end, line in self.tokens:
            if token_type in SKIP_TOKENS:
                continue
            if not mapping:
                mapping = [(0, start)]
            if token_type == tokenize.COMMENT:
                comments.append(text)
                continue
            if token_type == tokenize.STRING:
                text = mutate_string(text)
            if previous_row:
                (start_row, start_column) = start
                if previous_row != start_row:
                    row_index = previous_row - 1
                    column_index = previous_column - 1
                    previous_text = self.lines[row_index][column_index]
                    if (previous_text == ',' or
                            (previous_text not in '{[(' and
                             text not in '}])')):
                        text = ' ' + text
                elif previous_column != start_column:
                    text = line[previous_column:start_column] + text
            logical.append(text)
            length += len(text)
            mapping.append((length, end))
            (previous_row, previous_column) = end
        return comments, logical, mapping 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:35,代碼來源:processor.py

示例14: is_eol_token

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def is_eol_token(token, _is_eol_token=is_eol_token):
        """Check if the token is an end-of-line token."""
        return (_is_eol_token(token) or
                (token[0] == tokenize.COMMENT and token[1] == token[4])) 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:6,代碼來源:processor.py

示例15: is_trailing_comma

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import COMMENT [as 別名]
def is_trailing_comma(tokens, index):
    """Check if the given token is a trailing comma

    :param tokens: Sequence of modules tokens
    :type tokens: list[tokenize.TokenInfo]
    :param int index: Index of token under check in tokens
    :returns: True if the token is a comma which trails an expression
    :rtype: bool
    """
    token = tokens[index]
    if token.exact_type != tokenize.COMMA:
        return False
    # Must have remaining tokens on the same line such as NEWLINE
    left_tokens = itertools.islice(tokens, index + 1, None)
    same_line_remaining_tokens = list(itertools.takewhile(
        lambda other_token, _token=token: other_token.start[0] == _token.start[0],
        left_tokens
    ))
    # Note: If the newline is tokenize.NEWLINE and not tokenize.NL
    # then the newline denotes the end of expression
    is_last_element = all(
        other_token.type in (tokenize.NEWLINE, tokenize.COMMENT)
        for other_token in same_line_remaining_tokens
    )
    if not same_line_remaining_tokens or not is_last_element:
        return False
    def get_curline_index_start():
        """Get the index denoting the start of the current line"""
        for subindex, token in enumerate(reversed(tokens[:index])):
            # See Lib/tokenize.py and Lib/token.py in cpython for more info
            if token.type in (tokenize.NEWLINE, tokenize.NL):
                return index - subindex
        return 0
    curline_start = get_curline_index_start()
    for prevtoken in tokens[curline_start:index]:
        if '=' in prevtoken.string:
            return True
    return False 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:40,代碼來源:refactoring.py


注:本文中的tokenize.COMMENT屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。