當前位置: 首頁>>代碼示例>>Python>>正文


Python tokenize.NUMBER屬性代碼示例

本文整理匯總了Python中tokenize.NUMBER屬性的典型用法代碼示例。如果您正苦於以下問題:Python tokenize.NUMBER屬性的具體用法?Python tokenize.NUMBER怎麽用?Python tokenize.NUMBER使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在tokenize的用法示例。


在下文中一共展示了tokenize.NUMBER屬性的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: decistmt

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def decistmt(tokens):
    """Substitute Decimals for floats in a string of statements.

    Based on an example from the tokenize module docs.
    """
    result = []
    for toknum, tokval, _, _, _  in tokens:
        if toknum == tokenize.NUMBER and '.' in tokval:  # replace NUMBER tokens
            for newtok in [
                (tokenize.NAME, 'Decimal'),
                (tokenize.OP, '('),
                (tokenize.STRING, repr(tokval)),
                (tokenize.OP, ')')
            ]:
                yield newtok
        else:
            yield (toknum, tokval) 
開發者ID:ktraunmueller,項目名稱:Computable,代碼行數:19,代碼來源:test_inputtransformer.py

示例2: get_codepoints

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def get_codepoints(cps):
    results = []
    for cp in cps:
        if not cp.type == tokenize.NUMBER:
            continue
        results.append(int(cp.string, 16))
    return results 
開發者ID:googlefonts,項目名稱:gftools,代碼行數:9,代碼來源:gftools-rangify.py

示例3: process_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def process_tokens(self, tokens):
        for idx, (tok_type, token, start, _, _) in enumerate(tokens):
            if tok_type == tokenize.NUMBER:
                if token.lower().endswith('l'):
                    # This has a different semantic than lowercase-l-suffix.
                    self.add_message('long-suffix', line=start[0])
                elif _is_old_octal(token):
                    self.add_message('old-octal-literal', line=start[0])
            if tokens[idx][1] == '<>':
                self.add_message('old-ne-operator', line=tokens[idx][2][0])
            if tok_type == tokenize.STRING and token.startswith('b'):
                if any(elem for elem in token if ord(elem) > 127):
                    self.add_message('non-ascii-bytes-literal', line=start[0])
            if tok_type == tokenize.STRING and token.startswith('ur'):
                self.add_message('invalid-unicode-literal', line=start[0]) 
開發者ID:AtomLinter,項目名稱:linter-pylama,代碼行數:17,代碼來源:python3.py

示例4: _filter_header

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def _filter_header(s):
    """Clean up 'L' in npz header ints.

    Cleans up the 'L' in strings representing integers. Needed to allow npz
    headers produced in Python2 to be read in Python3.

    Parameters
    ----------
    s : byte string
        Npy file header.

    Returns
    -------
    header : str
        Cleaned up header.

    """
    import tokenize
    if sys.version_info[0] >= 3:
        from io import StringIO
    else:
        from StringIO import StringIO

    tokens = []
    last_token_was_number = False
    # adding newline as python 2.7.5 workaround
    string = asstr(s) + "\n"
    for token in tokenize.generate_tokens(StringIO(string).readline):
        token_type = token[0]
        token_string = token[1]
        if (last_token_was_number and
                token_type == tokenize.NAME and
                token_string == "L"):
            continue
        else:
            tokens.append(token)
        last_token_was_number = (token_type == tokenize.NUMBER)
    # removing newline (see above) as python 2.7.5 workaround
    return tokenize.untokenize(tokens)[:-1] 
開發者ID:Frank-qlu,項目名稱:recruit,代碼行數:41,代碼來源:format.py

示例5: process_tokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def process_tokens(self, tokens):
        for idx, (tok_type, token, start, _, _) in enumerate(tokens):
            if tok_type == tokenize.NUMBER:
                if token.lower().endswith("l"):
                    # This has a different semantic than lowercase-l-suffix.
                    self.add_message("long-suffix", line=start[0])
                elif _is_old_octal(token):
                    self.add_message("old-octal-literal", line=start[0])
            if tokens[idx][1] == "<>":
                self.add_message("old-ne-operator", line=tokens[idx][2][0])
            if tok_type == tokenize.STRING and token.startswith("b"):
                if any(elem for elem in token if ord(elem) > 127):
                    self.add_message("non-ascii-bytes-literal", line=start[0]) 
開發者ID:sofia-netsurv,項目名稱:python-netsurv,代碼行數:15,代碼來源:python3.py

示例6: _filter_header

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def _filter_header(s):
    """Clean up 'L' in npz header ints.

    Cleans up the 'L' in strings representing integers. Needed to allow npz
    headers produced in Python2 to be read in Python3.

    Parameters
    ----------
    s : byte string
        Npy file header.

    Returns
    -------
    header : str
        Cleaned up header.

    """
    import tokenize
    if sys.version_info[0] >= 3:
        from io import StringIO
    else:
        from StringIO import StringIO

    tokens = []
    last_token_was_number = False
    for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
        token_type = token[0]
        token_string = token[1]
        if (last_token_was_number and
                token_type == tokenize.NAME and
                token_string == "L"):
            continue
        else:
            tokens.append(token)
        last_token_was_number = (token_type == tokenize.NUMBER)
    return tokenize.untokenize(tokens) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:38,代碼來源:format.py

示例7: _ProcessToken

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def _ProcessToken(self, type, token, spos, epos, line):
        srow, scol = spos
        erow, ecol = epos
        self.GetText() # Prime us.
        linenum = srow - 1 # Lines zero based for us too.
        realCharPos = self.lineOffsets[linenum] + scol
        numskipped = realCharPos - self.lastPos
        if numskipped==0:
            pass
        elif numskipped==1:
            self.attrs.append(axdebug.SOURCETEXT_ATTR_COMMENT)
        else:
            self.attrs.append((axdebug.SOURCETEXT_ATTR_COMMENT, numskipped))
        kwSize = len(token)
        self.lastPos = realCharPos + kwSize
        attr = 0

        if type==tokenize.NAME:
            if token in _keywords:
                attr = axdebug.SOURCETEXT_ATTR_KEYWORD
        elif type==tokenize.STRING:
            attr = axdebug.SOURCETEXT_ATTR_STRING
        elif type==tokenize.NUMBER:
            attr = axdebug.SOURCETEXT_ATTR_NUMBER
        elif type==tokenize.OP:
            attr = axdebug.SOURCETEXT_ATTR_OPERATOR
        elif type==tokenize.COMMENT:
            attr = axdebug.SOURCETEXT_ATTR_COMMENT
        # else attr remains zero...
        if kwSize==0:
            pass
        elif kwSize==1:
            self.attrs.append(attr)
        else:
            self.attrs.append((attr, kwSize)) 
開發者ID:IronLanguages,項目名稱:ironpython2,代碼行數:37,代碼來源:codecontainer.py

示例8: test_python_tokenize

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def test_python_tokenize():
    code = "a + (foo * -1)"
    tokens = list(python_tokenize(code))
    expected = [(tokenize.NAME, "a", Origin(code, 0, 1)),
                (tokenize.OP, "+", Origin(code, 2, 3)),
                (tokenize.OP, "(", Origin(code, 4, 5)),
                (tokenize.NAME, "foo", Origin(code, 5, 8)),
                (tokenize.OP, "*", Origin(code, 9, 10)),
                (tokenize.OP, "-", Origin(code, 11, 12)),
                (tokenize.NUMBER, "1", Origin(code, 12, 13)),
                (tokenize.OP, ")", Origin(code, 13, 14))]
    assert tokens == expected

    code2 = "a + (b"
    tokens2 = list(python_tokenize(code2))
    expected2 = [(tokenize.NAME, "a", Origin(code2, 0, 1)),
                 (tokenize.OP, "+", Origin(code2, 2, 3)),
                 (tokenize.OP, "(", Origin(code2, 4, 5)),
                 (tokenize.NAME, "b", Origin(code2, 5, 6))]
    assert tokens2 == expected2

    from nose.tools import assert_raises
    assert_raises(PatsyError, list, python_tokenize("a b # c"))

    from nose.tools import assert_raises
    assert_raises(PatsyError, list, python_tokenize("a b \"c")) 
開發者ID:birforce,項目名稱:vnpy_crypto,代碼行數:28,代碼來源:tokens.py

示例9: getLineOfTokens

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def getLineOfTokens(gen):
    tokens = []
    nextNeg = 0
    token = gen.next()
    if token[0] == tokenize.ENDMARKER:
        return None
    while token[0] != tokenize.NEWLINE and token[0] != tokenize.NL:
        if token[0] == tokenize.COMMENT:
            pass
        elif token[0] == tokenize.OP and token[1] == '-':
            nextNeg = 1
        elif token[0] == tokenize.NUMBER:
            if nextNeg:
                tokens.append(-eval(token[1]))
                nextNeg = 0
            else:
                tokens.append(eval(token[1]))
        elif token[0] == tokenize.STRING:
            tokens.append(eval(token[1]))
        elif token[0] == tokenize.NAME:
            tokens.append(token[1])
        else:
            notify.warning('Ignored token type: %s on line: %s' % (tokenize.tok_name[token[0]], token[2][0]))
        token = gen.next()

    return tokens 
開發者ID:PiratesOnlineRewritten,項目名稱:Pirates-Online-Rewritten,代碼行數:28,代碼來源:QuestParser.py

示例10: _maybe_parse_basic_type

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def _maybe_parse_basic_type(self):
    """Try to parse a basic type (str, bool, number)."""
    token_value = ''
    # Allow a leading dash to handle negative numbers.
    if self._current_token.value == '-':
      token_value += self._current_token.value
      self._advance()

    basic_type_tokens = [tokenize.NAME, tokenize.NUMBER, tokenize.STRING]
    continue_parsing = self._current_token.kind in basic_type_tokens
    if not continue_parsing:
      return False, None

    while continue_parsing:
      token_value += self._current_token.value

      try:
        value = ast.literal_eval(token_value)
      except Exception as e:  # pylint: disable=broad-except
        err_str = "{}\n    Failed to parse token '{}'"
        self._raise_syntax_error(err_str.format(e, token_value))

      was_string = self._current_token.kind == tokenize.STRING
      self._advance()
      is_string = self._current_token.kind == tokenize.STRING
      continue_parsing = was_string and is_string

    return True, value 
開發者ID:google,項目名稱:gin-config,代碼行數:30,代碼來源:config_parser.py

示例11: build

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def build(data):
    result = []
    for num, val, _, _, _  in split(StringIO(data).readline):
        if num == NUMBER: 
            result.extend([(NAME, 'Num'), (OP, '('),
            (STRING, str(val)), (OP, ')')])
        elif num == NAME and not val.startswith('_'):
            result.extend([(NAME, 'Chk'), 
            (OP, '('), (OP, "'"),
            (STRING, str(val)), (OP, "'"), (OP, ')')])
        else:
            result.append((num, val))
    return untokenize(result) 
開發者ID:iogf,項目名稱:lax,代碼行數:15,代碼來源:parser.py

示例12: preprocess

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def preprocess(tokentype, tokenval):
    if tokentype == tokenize.NUMBER:
        return number_token

    elif tokentype == tokenize.INDENT:
        return "<indent>"

    elif tokentype == tokenize.DEDENT:
        return "<dedent>"

    # Need to replace spaces with some other character because the ngram processor
    # splits on spaces
    return tokenval.replace(" ", "§").replace("\n", "<newline>") 
開發者ID:uclnlp,項目名稱:pycodesuggest,代碼行數:15,代碼來源:reader.py

示例13: preprocess

# 需要導入模塊: import tokenize [as 別名]
# 或者: from tokenize import NUMBER [as 別名]
def preprocess(tokentype, tokenval):
    if tokentype == tokenize.NUMBER:
        return number_token

    elif tokentype == tokenize.INDENT:
        return indent_token

    elif tokentype == tokenize.DEDENT:
        return dedent_token

    return tokenval 
開發者ID:uclnlp,項目名稱:pycodesuggest,代碼行數:13,代碼來源:pyreader.py


注:本文中的tokenize.NUMBER屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。