當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenize.NAME屬性代碼示例

本文整理匯總了Python中tokenize.NAME屬性的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.NAME屬性的具體用法?Python tokenize.NAME怎麽用?Python tokenize.NAME使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在tokenize的用法示例。


在下文中一共展示了tokenize.NAME屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _has_valid_type_annotation

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets('('):
            return False
        bracket_level = 0
        for token in tokens[i-1::-1]:
            if token[1] == ':':
                return True
            if token[1] == '(':
                return False
            if token[1] == ']':
                bracket_level += 1
            elif token[1] == '[':
                bracket_level -= 1
            elif token[1] == ',':
                if not bracket_level:
                    return False
            elif token[1] == '.':
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING):
                return False
        return False 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:24,代碼來源:format.py

示例2: _replace_booleans

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def _replace_booleans(tok):
    """Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
    precedence is changed to boolean precedence.

    Parameters
    ----------
    tok : tuple of int, str
        ints correspond to the all caps constants in the tokenize module

    Returns
    -------
    t : tuple of int, str
        Either the input or token or the replacement values
    """
    toknum, tokval = tok
    if toknum == tokenize.OP:
        if tokval == '&':
            return tokenize.NAME, 'and'
        elif tokval == '|':
            return tokenize.NAME, 'or'
        return toknum, tokval
    return toknum, tokval 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:24,代碼來源:expr.py

示例3: _has_valid_type_annotation

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def _has_valid_type_annotation(self, tokens, i):
        """Extended check of PEP-484 type hint presence"""
        if not self._inside_brackets("("):
            return False
        # token_info
        # type string start end line
        #  0      1     2    3    4
        bracket_level = 0
        for token in tokens[i - 1 :: -1]:
            if token[1] == ":":
                return True
            if token[1] == "(":
                return False
            if token[1] == "]":
                bracket_level += 1
            elif token[1] == "[":
                bracket_level -= 1
            elif token[1] == ",":
                if not bracket_level:
                    return False
            elif token[1] in (".", "..."):
                continue
            elif token[0] not in (tokenize.NAME, tokenize.STRING, tokenize.NL):
                return False
        return False 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:27,代碼來源:format.py

示例4: scanvars

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def scanvars(reader, frame, locals):
    """Scan one logical line of Python and look up values of variables used."""
    vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
    for ttype, token, start, end, line in tokenize.generate_tokens(reader):
        if ttype == tokenize.NEWLINE: break
        if ttype == tokenize.NAME and token not in keyword.kwlist:
            if lasttoken == '.':
                if parent is not __UNDEF__:
                    value = getattr(parent, token, __UNDEF__)
                    vars.append((prefix + token, prefix, value))
            else:
                where, value = lookup(token, frame, locals)
                vars.append((token, where, value))
        elif token == '.':
            prefix += lasttoken + '.'
            parent = value
        else:
            parent, prefix = None, ''
        lasttoken = token
    return vars 
開發者ID:glmcdona,項目名稱:meddle,代碼行數:22,代碼來源:cgitb.py

示例5: run

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def run(self):
        OPENERS=('class', 'def', 'for', 'if', 'try', 'while')
        INDENT=tokenize.INDENT
        NAME=tokenize.NAME
                   
        save_tabsize = tokenize.tabsize
        tokenize.tabsize = self.tabwidth
        try:
            try:
                for (typ, token, start, end, line) in token_generator(self.readline):
                    if typ == NAME and token in OPENERS:
                        self.blkopenline = line
                    elif type == INDENT and self.blkopenline:
                        self.indentedline = line
                        break

            except (tokenize.TokenError, IndentationError):
                # since we cut off the tokenizer early, we can trigger
                # spurious errors
                pass
        finally:
            tokenize.tabsize = save_tabsize
        return self.blkopenline, self.indentedline 
開發者ID:IronLanguages,項目名稱:ironpython2,代碼行數:25,代碼來源:AutoIndent.py

示例6: add_token

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def add_token(self, token_type, token):
        if self.done:
            return
        self.tokens.append((token_type, token))
        if token in ["(", "{", "["]:
            self.paren_depth += 1
        if token in [")", "}", "]"]:
            self.paren_depth -= 1
        assert self.paren_depth >= 0
        if not self.started:
            if token == "(":
                self.started = True
            else:
                assert token_type == tokenize.NAME or token == "."
                self.func.append(token)
        if self.started and self.paren_depth == 0:
            self.done = True

# This is not a very general function -- it assumes that all references to the
# given object are of the form '<obj_name>.something(method call)'. 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:22,代碼來源:eval.py

示例7: decistmt

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def decistmt(tokens):
    """Substitute Decimals for floats in a string of statements.

    Based on an example from the tokenize module docs.
    """
    result = []
    for toknum, tokval, _, _, _  in tokens:
        if toknum == tokenize.NUMBER and '.' in tokval:  # replace NUMBER tokens
            for newtok in [
                (tokenize.NAME, 'Decimal'),
                (tokenize.OP, '('),
                (tokenize.STRING, repr(tokval)),
                (tokenize.OP, ')')
            ]:
                yield newtok
        else:
            yield (toknum, tokval) 
開發者ID:ktraunmueller,項目名稱:Computable,代碼行數:19,代碼來源:test_inputtransformer.py

示例8: __waiting

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def __waiting(self, ttype, tstring, lineno):
        opts = self.__options
        # Do docstring extractions, if enabled
        if opts.docstrings and not opts.nodocstrings.get(self.__curfile):
            # module docstring?
            if self.__freshmodule:
                if ttype == tokenize.STRING:
                    self.__addentry(safe_eval(tstring), lineno, isdocstring=1)
                    self.__freshmodule = 0
                elif ttype not in (tokenize.COMMENT, tokenize.NL):
                    self.__freshmodule = 0
                return
            # class docstring?
            if ttype == tokenize.NAME and tstring in ('class', 'def'):
                self.__state = self.__suiteseen
                return
        if ttype == tokenize.NAME and tstring in opts.keywords:
            self.__state = self.__keywordseen 
開發者ID:aliyun,項目名稱:oss-ftp,代碼行數:20,代碼來源:pygettext.py

示例9: enumerate_keyword_args

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def enumerate_keyword_args(tokens):
    """
    Iterates over *tokens* and returns a dictionary with function names as the
    keys and lists of keyword arguments as the values.
    """
    keyword_args = {}
    inside_function = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            inside_function = False
        if token_type == tokenize.NAME:
            if token_string == "def":
                function_name = tokens[index + 1][1]
                inside_function = function_name
                keyword_args.update({function_name: []})
            elif inside_function:
                if tokens[index + 1][1] == '=':  # keyword argument
                    keyword_args[function_name].append(token_string)
    return keyword_args 
開發者ID:riusksk,項目名稱:shellsploit-library,代碼行數:23,代碼來源:analyze.py

示例10: obfuscatable_class

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def obfuscatable_class(tokens, index, **kwargs):
    """
    Given a list of *tokens* and an *index* (representing the current position),
    returns the token string if it is a class name that can be safely
    obfuscated.
    """
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if index > 0:
        prev_tok = tokens[index - 1]
    else:  # Pretend it's a newline (for simplicity)
        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):  # Don't mess with specials
        return None
    if prev_tok_string == "class":
        return token_string 
開發者ID:riusksk,項目名稱:shellsploit-library,代碼行數:22,代碼來源:obfuscate.py

示例11: obfuscatable_function

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def obfuscatable_function(tokens, index, **kwargs):
    """
    Given a list of *tokens* and an *index* (representing the current position),
    returns the token string if it is a function or method name that can be
    safely obfuscated.
    """
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if index > 0:
        prev_tok = tokens[index - 1]
    else:  # Pretend it's a newline (for simplicity)
        prev_tok = (54, '\n', (1, 1), (1, 2), '#\n')
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):  # Don't mess with specials
        return None
    if prev_tok_string == "def":
        return token_string 
開發者ID:riusksk,項目名稱:shellsploit-library,代碼行數:22,代碼來源:obfuscate.py

示例12: obfuscate_class

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def obfuscate_class(tokens, index, replace, replacement, *args):
    """
    If the token string (a class) inside *tokens[index]* matches *replace*,
    return *replacement*.
    """
    def return_replacement(replacement):
        CLASS_REPLACEMENTS[replacement] = replace
        return replacement
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    prev_tok = tokens[index - 1]
    prev_tok_string = prev_tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string.startswith('__'):
        return None
    if prev_tok_string != '.':
        if token_string == replace:
            return return_replacement(replacement) 
開發者ID:riusksk,項目名稱:shellsploit-library,代碼行數:22,代碼來源:obfuscate.py

示例13: obfuscate_unique

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def obfuscate_unique(tokens, index, replace, replacement, *args):
    """
    If the token string (a unique value anywhere) inside *tokens[index]*
    matches *replace*, return *replacement*.

    .. note::

        This function is only for replacing absolutely unique ocurrences of
        *replace* (where we don't have to worry about their position).
    """
    def return_replacement(replacement):
        UNIQUE_REPLACEMENTS[replacement] = replace
        return replacement
    tok = tokens[index]
    token_type = tok[0]
    token_string = tok[1]
    if token_type != tokenize.NAME:
        return None  # Skip this token
    if token_string == replace:
        return return_replacement(replacement) 
開發者ID:riusksk,項目名稱:shellsploit-library,代碼行數:22,代碼來源:obfuscate.py

示例14: enumerate_keyword_args

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def enumerate_keyword_args(tokens):
    """
    Iterates over *tokens* and returns a dictionary with function names as the
    keys and lists of keyword arguments as the values.
    """
    keyword_args = {}
    inside_function = False
    for index, tok in enumerate(tokens):
        token_type = tok[0]
        token_string = tok[1]
        if token_type == tokenize.NEWLINE:
            inside_function = False
        if token_type == tokenize.NAME:
            if token_string == "def":
                function_name = tokens[index+1][1]
                inside_function = function_name
                keyword_args.update({function_name: []})
            elif inside_function:
                if tokens[index+1][1] == '=': # keyword argument
                    keyword_args[function_name].append(token_string)
    return keyword_args 
開發者ID:liftoff,項目名稱:pyminifier,代碼行數:23,代碼來源:analyze.py

示例15: process_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NAME [as 別名]
def process_tokens(self, tokens):
        # type: (typing.Sequence[typing.Tuple]) -> None
        for _type, string, start, _, line in tokens:
            if _type == tokenize.NAME:
                self.__validate_name(string, start, line)
            elif _type == tokenize.COMMENT:
                self.__validate_comment(string, start) 
開發者ID:Shopify,項目名稱:shopify_python,代碼行數:9,代碼來源:shopify_styleguide.py


注:本文中的tokenize.NAME屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。