當前位置: 首頁>>代碼示例>>Python>>正文


Python token.string方法代碼示例

本文整理匯總了Python中token.string方法的典型用法代碼示例。如果您正苦於以下問題:Python token.string方法的具體用法?Python token.string怎麽用?Python token.string使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在token的用法示例。


在下文中一共展示了token.string方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __repr__

# 需要導入模塊: import token [as 別名]
# 或者: from token import string [as 別名]
def __repr__(self):
        annotated_type = '%d (%s)' % (self.type, tok_name[self.type])
        return ('TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)' %
                self._replace(type=annotated_type)) 
開發者ID:war-and-code,項目名稱:jawfish,代碼行數:6,代碼來源:tokenize.py

示例2: exact_type

# 需要導入模塊: import token [as 別名]
# 或者: from token import string [as 別名]
def exact_type(self):
        if self.type == OP and self.string in EXACT_TOKEN_TYPES:
            return EXACT_TOKEN_TYPES[self.string]
        else:
            return self.type 
開發者ID:war-and-code,項目名稱:jawfish,代碼行數:7,代碼來源:tokenize.py

示例3: tokenize

# 需要導入模塊: import token [as 別名]
# 或者: from token import string [as 別名]
def tokenize(readline):
    """
    The tokenize() generator requires one argment, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects.  Each call to the function
    should return one line of input as bytes.  Alternately, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile, 'rb').__next__  # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found.  The line passed is the
    logical line; continuation lines are included.

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
    """
    # This import is here to avoid problems when the itertools module is not
    # built yet and tokenize is imported.
    from itertools import chain, repeat
    encoding, consumed = detect_encoding(readline)
    rl_gen = iter(readline, b"")
    empty = repeat(b"")
    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) 
開發者ID:war-and-code,項目名稱:jawfish,代碼行數:28,代碼來源:tokenize.py

示例4: _all_string_prefixes

# 需要導入模塊: import token [as 別名]
# 或者: from token import string [as 別名]
def _all_string_prefixes():
    # The valid string prefixes. Only contain the lower case versions,
    #  and don't contain any permuations (include 'fr', but not
    #  'rf'). The various permutations will be generated.
    _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
    # if we add binary f-strings, add: ['fb', 'fbr']
    result = set([''])
    for prefix in _valid_string_prefixes:
        for t in _itertools.permutations(prefix):
            # create a list with upper and lower versions of each
            #  character
            for u in _itertools.product(*[(c, c.upper()) for c in t]):
                result.add(''.join(u))
    return result 
開發者ID:Xython,項目名稱:YAPyPy,代碼行數:16,代碼來源:yapypy_tokenize36.py

示例5: _compile

# 需要導入模塊: import token [as 別名]
# 或者: from token import string [as 別名]
def _compile(expr):
    return re.compile(expr, re.UNICODE)

# Note that since _all_string_prefixes includes the empty string,
#  StringPrefix can be the empty string (making it optional). 
開發者ID:Xython,項目名稱:YAPyPy,代碼行數:7,代碼來源:yapypy_tokenize36.py

示例6: tokenize

# 需要導入模塊: import token [as 別名]
# 或者: from token import string [as 別名]
def tokenize(readline):
    """
    The tokenize() generator requires one argument, readline, which
    must be a callable object which provides the same interface as the
    readline() method of built-in file objects.  Each call to the function
    should return one line of input as bytes.  Alternatively, readline
    can be a callable function terminating with StopIteration:
        readline = open(myfile, 'rb').__next__  # Example of alternate readline

    The generator produces 5-tuples with these members: the token type; the
    token string; a 2-tuple (srow, scol) of ints specifying the row and
    column where the token begins in the source; a 2-tuple (erow, ecol) of
    ints specifying the row and column where the token ends in the source;
    and the line on which the token was found.  The line passed is the
    logical line; continuation lines are included.

    The first token sequence will always be an ENCODING token
    which tells you which encoding was used to decode the bytes stream.
    """
    # This import is here to avoid problems when the itertools module is not
    # built yet and tokenize is imported.
    from itertools import chain, repeat
    encoding, consumed = detect_encoding(readline)
    rl_gen = iter(readline, b"")
    empty = repeat(b"")
    return _tokenize(chain(consumed, rl_gen, empty).__next__, encoding) 
開發者ID:Xython,項目名稱:YAPyPy,代碼行數:28,代碼來源:yapypy_tokenize36.py

示例7: _all_string_prefixes

# 需要導入模塊: import token [as 別名]
# 或者: from token import string [as 別名]
def _all_string_prefixes():
    # The valid string prefixes. Only contain the lower case versions,
    #  and don't contain any permuations (include 'fr', but not
    #  'rf'). The various permutations will be generated.
    _valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
    # if we add binary f-strings, add: ['fb', 'fbr']
    result = {''}
    for prefix in _valid_string_prefixes:
        for t in _itertools.permutations(prefix):
            # create a list with upper and lower versions of each
            #  character
            for u in _itertools.product(*[(c, c.upper()) for c in t]):
                result.add(''.join(u))
    return result 
開發者ID:Xython,項目名稱:YAPyPy,代碼行數:16,代碼來源:yapypy_tokenize37.py


注:本文中的token.string方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。