当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.STRING属性代码示例

本文整理汇总了Python中tokenize.STRING属性的典型用法代码示例。如果您正苦于以下问题:Python tokenize.STRING属性的具体用法?Python tokenize.STRING怎么用?Python tokenize.STRING使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tokenize的用法示例。


在下文中一共展示了tokenize.STRING属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _has_valid_type_annotation

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets('('):
            return False
        bracket_level = 0
        for token in tokens[i-1::-1]:
            if token[1] == ':':
                return True
            if token[1] == '(':
                return False
            if token[1] == ']':
                bracket_level += 1
            elif token[1] == '[':
                bracket_level -= 1
            elif token[1] == ',':
                if not bracket_level:
                    return False
            elif token[1] == '.':
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING):
                return False
        return False 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:24,代码来源:format.py

示例2: _break_around_binary_operators

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def _break_around_binary_operators(tokens):
    """Private function to reduce duplication.

    This factors out the shared details between
    :func:`break_before_binary_operator` and
    :func:`break_after_binary_operator`.
    """
    line_break = False
    unary_context = True
    # Previous non-newline token types and text
    previous_token_type = None
    previous_text = None
    for token_type, text, start, end, line in tokens:
        if token_type == tokenize.COMMENT:
            continue
        if ('\n' in text or '\r' in text) and token_type != tokenize.STRING:
            line_break = True
        else:
            yield (token_type, text, previous_token_type, previous_text,
                   line_break, unary_context, start)
            unary_context = text in '([{,;'
            line_break = False
            previous_token_type = token_type
            previous_text = text 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:26,代码来源:pycodestyle.py

示例3: _has_valid_type_annotation

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets("("):
            return False
        # token_info
        # type string start end line
        #  0      1     2    3    4
        bracket_level = 0
        for token in tokens[i - 1 :: -1]:
            if token[1] == ":":
                return True
            if token[1] == "(":
                return False
            if token[1] == "]":
                bracket_level += 1
            elif token[1] == "[":
                bracket_level -= 1
            elif token[1] == ",":
                if not bracket_level:
                    return False
            elif token[1] in (".", "..."):
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL):
                return False
        return False 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:27,代码来源:format.py

示例4: process_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def process_tokens(self, tokens):
        encoding = "ascii"
        for i, (tok_type, token, start, _, line) in enumerate(tokens):
            if tok_type == tokenize.ENCODING:
                # this is always the first token processed
                encoding = token
            elif tok_type == tokenize.STRING:
                # 'token' is the whole un-parsed token; we can look at the start
                # of it to see whether it's a raw or unicode string etc.
                self.process_string_token(token, start[0])
                # We figure the next token, ignoring comments & newlines:
                j = i + 1
                while j < len(tokens) and tokens[j].type in (
                    tokenize.NEWLINE,
                    tokenize.NL,
                    tokenize.COMMENT,
                ):
                    j += 1
                next_token = tokens[j] if j < len(tokens) else None
                if encoding != "ascii":
                    # We convert `tokenize` character count into a byte count,
                    # to match with astroid `.col_offset`
                    start = (start[0], len(line[: start[1]].encode(encoding)))
                self.string_tokens[start] = (str_eval(token), next_token) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:26,代码来源:strings.py

示例5: check_for_concatenated_strings

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def check_for_concatenated_strings(self, iterable_node, iterable_type):
        for elt in iterable_node.elts:
            if isinstance(elt, Const) and elt.pytype() in _AST_NODE_STR_TYPES:
                if elt.col_offset < 0:
                    # This can happen in case of escaped newlines
                    continue
                if (elt.lineno, elt.col_offset) not in self.string_tokens:
                    # This may happen with Latin1 encoding
                    # cf. https://github.com/PyCQA/pylint/issues/2610
                    continue
                matching_token, next_token = self.string_tokens[
                    (elt.lineno, elt.col_offset)
                ]
                # We detect string concatenation: the AST Const is the
                # combination of 2 string tokens
                if matching_token != elt.value and next_token is not None:
                    if next_token.type == tokenize.STRING and (
                        next_token.start[0] == elt.lineno
                        or self.config.check_str_concat_over_line_jumps
                    ):
                        self.add_message(
                            "implicit-str-concat-in-sequence",
                            line=elt.lineno,
                            args=(iterable_type,),
                        ) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:27,代码来源:strings.py

示例6: get_type

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def get_type(tokens, start_index):
    """return the line type : docstring, comment, code, empty"""
    i = start_index
    tok_type = tokens[i][0]
    start = tokens[i][2]
    pos = start
    line_type = None
    while i < len(tokens) and tokens[i][2][0] == start[0]:
        tok_type = tokens[i][0]
        pos = tokens[i][3]
        if line_type is None:
            if tok_type == tokenize.STRING:
                line_type = "docstring_lines"
            elif tok_type == tokenize.COMMENT:
                line_type = "comment_lines"
            elif tok_type in JUNK:
                pass
            else:
                line_type = "code_lines"
        i += 1
    if line_type is None:
        line_type = "empty_lines"
    elif i < len(tokens) and tokens[i][0] == tokenize.NEWLINE:
        i += 1
    return i, pos[0] - start[0] + 1, line_type 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:27,代码来源:raw_metrics.py

示例7: _count_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def _count_tokens(code, string):
    """
    Return a count of how many times `string` appears as a keyword in `code`.
    """
    count = 0

    try:
        for ttyp, ttok, __, __, __ in _tokens(code):
            if ttyp in (tokenize.COMMENT, tokenize.STRING):
                continue
            if ttok == string:
                count += 1
    except:
        # The input code was bad in some way. It will fail later on.
        pass
    return count 
开发者ID:edx,项目名称:xqueue-watcher,代码行数:18,代码来源:gradelib.py

示例8: count_non_comment_lines

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def count_non_comment_lines(at_least=None, at_most=None, exactly=None, error_msg=None):
    """
    Returns an input check function that checks that the number of non-comment,
    non-blank source lines conforms to the rules in the arguments.
    """
    def check(code):
        linenums = set()
        for ttyp, ttok, (srow, __), __, __ in _tokens(code):
            if ttyp in (tokenize.COMMENT, tokenize.STRING):
                # Comments and strings don't count toward line count. If a string
                # is the only thing on a line, then it's probably a docstring, so
                # don't count it.
                continue
            if not ttok.strip():
                # Tokens that are only whitespace don't count.
                continue
            linenums.add(srow)
        num = len(linenums)
        return _check_occurs(None, num, at_least, at_most, exactly, error_msg)
    return check 
开发者ID:edx,项目名称:xqueue-watcher,代码行数:22,代码来源:gradelib.py

示例9: decistmt

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def decistmt(tokens):
    """Substitute Decimals for floats in a string of statements.

    Based on an example from the tokenize module docs.
    """
    result = []
    for toknum, tokval, _, _, _  in tokens:
        if toknum == tokenize.NUMBER and '.' in tokval:  # replace NUMBER tokens
            for newtok in [
                (tokenize.NAME, 'Decimal'),
                (tokenize.OP, '('),
                (tokenize.STRING, repr(tokval)),
                (tokenize.OP, ')')
            ]:
                yield newtok
        else:
            yield (toknum, tokval) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:19,代码来源:test_inputtransformer.py

示例10: __waiting

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def __waiting(self, ttype, tstring, lineno):
        opts = self.__options
        # Do docstring extractions, if enabled
        if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
            # module docstring?
            if self.__freshmodule:
                if ttype == tokenize.STRING:
                    self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
                    self.__freshmodule = 0
                elif ttype not in (tokenize.COMMENT, tokenize.NL):
                    self.__freshmodule = 0
                return
            # class docstring?
            if ttype == tokenize.NAME and tstring in ('class', 'def'):
                self.__state = self.__suiteseen
                return
        if ttype == tokenize.NAME and tstring in opts.keywords:
            self.__state = self.__keywordseen 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:20,代码来源:pygettext.py

示例11: __openseen

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def __openseen(self, ttype, tstring, lineno):
        if ttype == tokenize.OP and tstring == ')':
            # We've seen the last of the translatable strings.  Record the
            # line number of the first line of the strings and update the list
            # of messages seen.  Reset state for the next batch.  If there
            # were no strings inside _(), then just ignore this entry.
            if self.__data:
                self.__addentry(EMPTYSTRING.join(self.__data))
            self.__state = self.__waiting
        elif ttype == tokenize.STRING:
            self.__data.append(safe_eval(tstring))
        elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,
                           token.NEWLINE, tokenize.NL]:
            # warn if we see anything else than STRING or whitespace
            print >> sys.stderr, _(
                '*** %(file)s:%(lineno)s: Seen unexpected token "%(token)s"'
                ) % {
                'token': tstring,
                'file': self.__curfile,
                'lineno': self.__lineno
                }
            self.__state = self.__waiting 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:24,代码来源:pygettext.py

示例12: remove_docstrings

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def remove_docstrings(tokens):
    """
    Removes docstrings from *tokens* which is expected to be a list equivalent
    of `tokenize.generate_tokens()` (so we can update in-place).
    """
    prev_tok_type = None
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        if token_type == tokenize.STRING:
            if prev_tok_type == tokenize.INDENT:
                # Definitely a docstring
                tokens[index][1] = ''  # Remove it
                # Remove the leftover indentation and newline:
                tokens[index - 1][1] = ''
                tokens[index - 2][1] = ''
            elif prev_tok_type == tokenize.NL:
                # This captures whole-module docstrings:
                if tokens[index + 1][0] == tokenize.NEWLINE:
                    tokens[index][1] = ''
                    # Remove the trailing newline:
                    tokens[index + 1][1] = ''
        prev_tok_type = token_type 
开发者ID:riusksk,项目名称:shellsploit-library,代码行数:24,代码来源:minification.py

示例13: convert_toplevel_docstring

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def convert_toplevel_docstring(tokens):
    for token in tokens:
        # For each string
        if token.type == tokenize.STRING:
            text = token.string
            # Must be a docstring
            if text.startswith('"""') or text.startswith("'''"):
                startline, startcol = token.start
                # Starting column MUST be 0
                if startcol == 0:
                    endline, endcol = token.end
                    lines = ['# ' + line
                             for line in text.strip('"\' \n').split('\n')]
                    text = '\n'.join(lines)
                    fmt = '# <markdowncell>\n{0}\n# <codecell>'.format(text)
                    yield TokenInfo(type=tokenize.COMMENT,
                                    start=(startline, startcol),
                                    end=(endline, endcol),
                                    string=fmt,
                                    line='#')
                    # To next token
                    continue
        # Return untouched
        yield token 
开发者ID:sklam,项目名称:py2nb,代码行数:26,代码来源:reader.py

示例14: prg2py_after_preproc

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def prg2py_after_preproc(data, parser_start, input_filename):
    input_stream = antlr4.InputStream(data)
    lexer = VisualFoxpro9Lexer(input_stream)
    stream = antlr4.CommonTokenStream(lexer)
    parser = VisualFoxpro9Parser(stream)
    tree = run_parser(stream, parser, parser_start)
    TreeCleanVisitor().visit(tree)
    output_tree = PythonConvertVisitor(input_filename).visit(tree)
    if not isinstance(output_tree, list):
        return output_tree
    output = add_indents(output_tree, 0)
    options = autopep8.parse_args(['--max-line-length', '100000', '-'])
    output = autopep8.fix_code(output, options)
    tokens = list(tokenize.generate_tokens(io.StringIO(output).readline))
    for i, token in enumerate(tokens):
        token = list(token)
        if token[0] == tokenize.STRING and token[1].startswith('u'):
            token[1] = token[1][1:]
        tokens[i] = tuple(token)
    return tokenize.untokenize(tokens) 
开发者ID:mwisslead,项目名称:vfp2py,代码行数:22,代码来源:vfp2py.py

示例15: remove_docstrings

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import STRING [as 别名]
def remove_docstrings(tokens):
    """
    Removes docstrings from *tokens* which is expected to be a list equivalent
    of `tokenize.generate_tokens()` (so we can update in-place).
    """
    prev_tok_type = None
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        if token_type == tokenize.STRING:
            if prev_tok_type == tokenize.INDENT:
                # Definitely a docstring
                tokens[index][1] = '' # Remove it
                # Remove the leftover indentation and newline:
                tokens[index-1][1] = ''
                tokens[index-2][1] = ''
            elif prev_tok_type == tokenize.NL:
                # This captures whole-module docstrings:
                if tokens[index+1][0] == tokenize.NEWLINE:
                    tokens[index][1] = ''
                    # Remove the trailing newline:
                    tokens[index+1][1] = ''
        prev_tok_type = token_type 
开发者ID:liftoff,项目名称:pyminifier,代码行数:24,代码来源:minification.py


注:本文中的tokenize.STRING属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。