当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.NAME属性代码示例

本文整理汇总了Python中tokenize.NAME属性的典型用法代码示例。如果您正苦于以下问题:Python tokenize.NAME属性的具体用法?Python tokenize.NAME怎么用?Python tokenize.NAME使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tokenize的用法示例。


在下文中一共展示了tokenize.NAME属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _has_valid_type_annotation

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets('('):
            return False
        bracket_level = 0
        for token in tokens[i-1::-1]:
            if token[1] == ':':
                return True
            if token[1] == '(':
                return False
            if token[1] == ']':
                bracket_level += 1
            elif token[1] == '[':
                bracket_level -= 1
            elif token[1] == ',':
                if not bracket_level:
                    return False
            elif token[1] == '.':
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING):
                return False
        return False 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:24,代码来源:format.py

示例2: _replace_booleans

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def _replace_booleans(tok):
    """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
    precedence is changed to boolean precedence.

    Parameters
    ----------
    tok : tuple of int, str
        ints correspond to the all caps constants in the tokenize module

    Returns
    -------
    t : tuple of int, str
        Either the input or token or the replacement values
    """
    toknum, tokval = tok
    if toknum == tokenize.OP:
        if tokval == '&':
            return tokenize.NAME, 'and'
        elif tokval == '|':
            return tokenize.NAME, 'or'
        return toknum, tokval
    return toknum, tokval 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:24,代码来源:expr.py

示例3: _has_valid_type_annotation

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets("("):
            return False
        # token_info
        # type string start end line
        #  0      1     2    3    4
        bracket_level = 0
        for token in tokens[i - 1 :: -1]:
            if token[1] == ":":
                return True
            if token[1] == "(":
                return False
            if token[1] == "]":
                bracket_level += 1
            elif token[1] == "[":
                bracket_level -= 1
            elif token[1] == ",":
                if not bracket_level:
                    return False
            elif token[1] in (".", "..."):
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL):
                return False
        return False 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:27,代码来源:format.py

示例4: scanvars

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
开发者ID:glmcdona,项目名称:meddle,代码行数:22,代码来源:cgitb.py

示例5: run

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def run(self):
        OPENERS=('class', 'def', 'for', 'if', 'try', 'while')
        INDENT=tokenize.INDENT
        NAME=tokenize.NAME
                   
        save_tabsize = tokenize.tabsize
        tokenize.tabsize = self.tabwidth
        try:
            try:
                for (typ, token, start, end, line) in token_generator(self.readline):
                    if typ == NAME and token in OPENERS:
                        self.blkopenline = line
                    elif type == INDENT and self.blkopenline:
                        self.indentedline = line
                        break

            except (tokenize.TokenError, IndentationError):
                # since we cut off the tokenizer early, we can trigger
                # spurious errors
                pass
        finally:
            tokenize.tabsize = save_tabsize
        return self.blkopenline, self.indentedline 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:25,代码来源:AutoIndent.py

示例6: add_token

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def add_token(self, token_type, token):
        if self.done:
            return
        self.tokens.append((token_type, token))
        if token in ["(", "{", "["]:
            self.paren_depth += 1
        if token in [")", "}", "]"]:
            self.paren_depth -= 1
        assert self.paren_depth >= 0
        if not self.started:
            if token == "(":
                self.started = True
            else:
                assert token_type == tokenize.NAME or token == "."
                self.func.append(token)
        if self.started and self.paren_depth == 0:
            self.done = True

# This is not a very general function -- it assumes that all references to the
# given object are of the form '<obj_name>.something(method call)'. 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:22,代码来源:eval.py

示例7: decistmt

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def decistmt(tokens):
    """Substitute Decimals for floats in a string of statements.

    Based on an example from the tokenize module docs.
    """
    result = []
    for toknum, tokval, _, _, _  in tokens:
        if toknum == tokenize.NUMBER and '.' in tokval:  # replace NUMBER tokens
            for newtok in [
                (tokenize.NAME, 'Decimal'),
                (tokenize.OP, '('),
                (tokenize.STRING, repr(tokval)),
                (tokenize.OP, ')')
            ]:
                yield newtok
        else:
            yield (toknum, tokval) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:19,代码来源:test_inputtransformer.py

示例8: __waiting

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def __waiting(self, ttype, tstring, lineno):
        opts = self.__options
        # Do docstring extractions, if enabled
        if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
            # module docstring?
            if self.__freshmodule:
                if ttype == tokenize.STRING:
                    self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
                    self.__freshmodule = 0
                elif ttype not in (tokenize.COMMENT, tokenize.NL):
                    self.__freshmodule = 0
                return
            # class docstring?
            if ttype == tokenize.NAME and tstring in ('class', 'def'):
                self.__state = self.__suiteseen
                return
        if ttype == tokenize.NAME and tstring in opts.keywords:
            self.__state = self.__keywordseen 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:20,代码来源:pygettext.py

示例9: enumerate_keyword_args

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def enumerate_keyword_args(tokens):
    """
    Iterates over *tokens* and returns a dictionary with function names as the
    keys and lists of keyword arguments as the values.
    """
    keyword_args = {}
    inside_function = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            inside_function = False
        if token_type == tokenize.NAME:
            if token_string == "def":
                function_name = tokens[index + 1][1]
                inside_function = function_name
                keyword_args.update({function_name: []})
            elif inside_function:
                if tokens[index + 1][1] == '=':  # keyword argument
                    keyword_args[function_name].append(token_string)
    return keyword_args 
开发者ID:riusksk,项目名称:shellsploit-library,代码行数:23,代码来源:analyze.py

示例10: obfuscatable_class

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def obfuscatable_class(tokens, index, **kwargs):
    """
    Given a list of *tokens* and an *index* (representing the current position),
    returns the token string if it is a class name that can be safely
    obfuscated.
    """
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if index > 0:
        prev_tok = tokens[index - 1]
    else:  # Pretend it's a newline (for simplicity)
        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):  # Don't mess with specials
        return None
    if prev_tok_string == "class":
        return token_string 
开发者ID:riusksk,项目名称:shellsploit-library,代码行数:22,代码来源:obfuscate.py

示例11: obfuscatable_function

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def obfuscatable_function(tokens, index, **kwargs):
    """
    Given a list of *tokens* and an *index* (representing the current position),
    returns the token string if it is a function or method name that can be
    safely obfuscated.
    """
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if index > 0:
        prev_tok = tokens[index - 1]
    else:  # Pretend it's a newline (for simplicity)
        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):  # Don't mess with specials
        return None
    if prev_tok_string == "def":
        return token_string 
开发者ID:riusksk,项目名称:shellsploit-library,代码行数:22,代码来源:obfuscate.py

示例12: obfuscate_class

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def obfuscate_class(tokens, index, replace, replacement, *args):
    """
    If the token string (a class) inside *tokens[index]* matches *replace*,
    return *replacement*.
    """
    def return_replacement(replacement):
        CLASS_REPLACEMENTS[replacement] = replace
        return replacement
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    prev_tok = tokens[index - 1]
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):
        return None
    if prev_tok_string != '.':
        if token_string == replace:
            return return_replacement(replacement) 
开发者ID:riusksk,项目名称:shellsploit-library,代码行数:22,代码来源:obfuscate.py

示例13: obfuscate_unique

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def obfuscate_unique(tokens, index, replace, replacement, *args):
    """
    If the token string (a unique value anywhere) inside *tokens[index]*
    matches *replace*, return *replacement*.

    .. note::

        This function is only for replacing absolutely unique ocurrences of
        *replace* (where we don't have to worry about their position).
    """
    def return_replacement(replacement):
        UNIQUE_REPLACEMENTS[replacement] = replace
        return replacement
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string == replace:
        return return_replacement(replacement) 
开发者ID:riusksk,项目名称:shellsploit-library,代码行数:22,代码来源:obfuscate.py

示例14: enumerate_keyword_args

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def enumerate_keyword_args(tokens):
    """
    Iterates over *tokens* and returns a dictionary with function names as the
    keys and lists of keyword arguments as the values.
    """
    keyword_args = {}
    inside_function = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            inside_function = False
        if token_type == tokenize.NAME:
            if token_string == "def":
                function_name = tokens[index+1][1]
                inside_function = function_name
                keyword_args.update({function_name: []})
            elif inside_function:
                if tokens[index+1][1] == '=': # keyword argument
                    keyword_args[function_name].append(token_string)
    return keyword_args 
开发者ID:liftoff,项目名称:pyminifier,代码行数:23,代码来源:analyze.py

示例15: process_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NAME [as 别名]
def process_tokens(self, tokens):
        # type: (typing.Sequence[typing.Tuple]) -> None
        for _type, string, start, _, line in tokens:
            if _type == tokenize.NAME:
                self.__validate_name(string, start, line)
            elif _type == tokenize.COMMENT:
                self.__validate_comment(string, start) 
开发者ID:Shopify,项目名称:shopify_python,代码行数:9,代码来源:shopify_styleguide.py


注:本文中的tokenize.NAME属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。