当前位置: 首页>>代码示例>>Python>>正文


Python tokenize.NUMBER属性代码示例

本文整理汇总了Python中tokenize.NUMBER属性的典型用法代码示例。如果您正苦于以下问题:Python tokenize.NUMBER属性的具体用法?Python tokenize.NUMBER怎么用?Python tokenize.NUMBER使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在tokenize的用法示例。


在下文中一共展示了tokenize.NUMBER属性的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: decistmt

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def decistmt(tokens):
    """Substitute Decimals for floats in a string of statements.

    Based on an example from the tokenize module docs.
    """
    result = []
    for toknum, tokval, _, _, _  in tokens:
        if toknum == tokenize.NUMBER and '.' in tokval:  # replace NUMBER tokens
            for newtok in [
                (tokenize.NAME, 'Decimal'),
                (tokenize.OP, '('),
                (tokenize.STRING, repr(tokval)),
                (tokenize.OP, ')')
            ]:
                yield newtok
        else:
            yield (toknum, tokval) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:19,代码来源:test_inputtransformer.py

示例2: get_codepoints

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def get_codepoints(cps):
    results = []
    for cp in cps:
        if not cp.type == tokenize.NUMBER:
            continue
        results.append(int(cp.string, 16))
    return results 
开发者ID:googlefonts,项目名称:gftools,代码行数:9,代码来源:gftools-rangify.py

示例3: process_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def process_tokens(self, tokens):
        for idx, (tok_type, token, start, _, _) in enumerate(tokens):
            if tok_type == tokenize.NUMBER:
                if token.lower().endswith('l'):
                    # This has a different semantic than lowercase-l-suffix.
                    self.add_message('long-suffix', line=start[0])
                elif _is_old_octal(token):
                    self.add_message('old-octal-literal', line=start[0])
            if tokens[idx][1] == '<>':
                self.add_message('old-ne-operator', line=tokens[idx][2][0])
            if tok_type == tokenize.STRING and token.startswith('b'):
                if any(elem for elem in token if ord(elem) > 127):
                    self.add_message('non-ascii-bytes-literal', line=start[0])
            if tok_type == tokenize.STRING and token.startswith('ur'):
                self.add_message('invalid-unicode-literal', line=start[0]) 
开发者ID:AtomLinter,项目名称:linter-pylama,代码行数:17,代码来源:python3.py

示例4: _filter_header

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def _filter_header(s):
    """Clean up 'L' in npz header ints.

    Cleans up the 'L' in strings representing integers. Needed to allow npz
    headers produced in Python2 to be read in Python3.

    Parameters
    ----------
    s : byte string
        Npy file header.

    Returns
    -------
    header : str
        Cleaned up header.

    """
    import tokenize
    if sys.version_info[0] >= 3:
        from io import StringIO
    else:
        from StringIO import StringIO

    tokens = []
    last_token_was_number = False
    # adding newline as python 2.7.5 workaround
    string = asstr(s) + "\n"
    for token in tokenize.generate_tokens(StringIO(string).readline):
        token_type = token[0]
        token_string = token[1]
        if (last_token_was_number and
                token_type == tokenize.NAME and
                token_string == "L"):
            continue
        else:
            tokens.append(token)
        last_token_was_number = (token_type == tokenize.NUMBER)
    # removing newline (see above) as python 2.7.5 workaround
    return tokenize.untokenize(tokens)[:-1] 
开发者ID:Frank-qlu,项目名称:recruit,代码行数:41,代码来源:format.py

示例5: process_tokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def process_tokens(self, tokens):
        for idx, (tok_type, token, start, _, _) in enumerate(tokens):
            if tok_type == tokenize.NUMBER:
                if token.lower().endswith("l"):
                    # This has a different semantic than lowercase-l-suffix.
                    self.add_message("long-suffix", line=start[0])
                elif _is_old_octal(token):
                    self.add_message("old-octal-literal", line=start[0])
            if tokens[idx][1] == "<>":
                self.add_message("old-ne-operator", line=tokens[idx][2][0])
            if tok_type == tokenize.STRING and token.startswith("b"):
                if any(elem for elem in token if ord(elem) > 127):
                    self.add_message("non-ascii-bytes-literal", line=start[0]) 
开发者ID:sofia-netsurv,项目名称:python-netsurv,代码行数:15,代码来源:python3.py

示例6: _filter_header

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def _filter_header(s):
    """Clean up 'L' in npz header ints.

    Cleans up the 'L' in strings representing integers. Needed to allow npz
    headers produced in Python2 to be read in Python3.

    Parameters
    ----------
    s : byte string
        Npy file header.

    Returns
    -------
    header : str
        Cleaned up header.

    """
    import tokenize
    if sys.version_info[0] >= 3:
        from io import StringIO
    else:
        from StringIO import StringIO

    tokens = []
    last_token_was_number = False
    for token in tokenize.generate_tokens(StringIO(asstr(s)).read):
        token_type = token[0]
        token_string = token[1]
        if (last_token_was_number and
                token_type == tokenize.NAME and
                token_string == "L"):
            continue
        else:
            tokens.append(token)
        last_token_was_number = (token_type == tokenize.NUMBER)
    return tokenize.untokenize(tokens) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:38,代码来源:format.py

示例7: _ProcessToken

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def _ProcessToken(self, type, token, spos, epos, line):
        srow, scol = spos
        erow, ecol = epos
        self.GetText() # Prime us.
        linenum = srow - 1 # Lines zero based for us too.
        realCharPos = self.lineOffsets[linenum] + scol
        numskipped = realCharPos - self.lastPos
        if numskipped==0:
            pass
        elif numskipped==1:
            self.attrs.append(axdebug.SOURCETEXT_ATTR_COMMENT)
        else:
            self.attrs.append((axdebug.SOURCETEXT_ATTR_COMMENT, numskipped))
        kwSize = len(token)
        self.lastPos = realCharPos + kwSize
        attr = 0

        if type==tokenize.NAME:
            if token in _keywords:
                attr = axdebug.SOURCETEXT_ATTR_KEYWORD
        elif type==tokenize.STRING:
            attr = axdebug.SOURCETEXT_ATTR_STRING
        elif type==tokenize.NUMBER:
            attr = axdebug.SOURCETEXT_ATTR_NUMBER
        elif type==tokenize.OP:
            attr = axdebug.SOURCETEXT_ATTR_OPERATOR
        elif type==tokenize.COMMENT:
            attr = axdebug.SOURCETEXT_ATTR_COMMENT
        # else attr remains zero...
        if kwSize==0:
            pass
        elif kwSize==1:
            self.attrs.append(attr)
        else:
            self.attrs.append((attr, kwSize)) 
开发者ID:IronLanguages,项目名称:ironpython2,代码行数:37,代码来源:codecontainer.py

示例8: test_python_tokenize

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def test_python_tokenize():
    code = "a + (foo * -1)"
    tokens = list(python_tokenize(code))
    expected = [(tokenize.NAME, "a", Origin(code, 0, 1)),
                (tokenize.OP, "+", Origin(code, 2, 3)),
                (tokenize.OP, "(", Origin(code, 4, 5)),
                (tokenize.NAME, "foo", Origin(code, 5, 8)),
                (tokenize.OP, "*", Origin(code, 9, 10)),
                (tokenize.OP, "-", Origin(code, 11, 12)),
                (tokenize.NUMBER, "1", Origin(code, 12, 13)),
                (tokenize.OP, ")", Origin(code, 13, 14))]
    assert tokens == expected

    code2 = "a + (b"
    tokens2 = list(python_tokenize(code2))
    expected2 = [(tokenize.NAME, "a", Origin(code2, 0, 1)),
                 (tokenize.OP, "+", Origin(code2, 2, 3)),
                 (tokenize.OP, "(", Origin(code2, 4, 5)),
                 (tokenize.NAME, "b", Origin(code2, 5, 6))]
    assert tokens2 == expected2

    from nose.tools import assert_raises
    assert_raises(PatsyError, list, python_tokenize("a b # c"))

    from nose.tools import assert_raises
    assert_raises(PatsyError, list, python_tokenize("a b \"c")) 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:28,代码来源:tokens.py

示例9: getLineOfTokens

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def getLineOfTokens(gen):
    tokens = []
    nextNeg = 0
    token = gen.next()
    if token[0] == tokenize.ENDMARKER:
        return None
    while token[0] != tokenize.NEWLINE and token[0] != tokenize.NL:
        if token[0] == tokenize.COMMENT:
            pass
        elif token[0] == tokenize.OP and token[1] == '-':
            nextNeg = 1
        elif token[0] == tokenize.NUMBER:
            if nextNeg:
                tokens.append(-eval(token[1]))
                nextNeg = 0
            else:
                tokens.append(eval(token[1]))
        elif token[0] == tokenize.STRING:
            tokens.append(eval(token[1]))
        elif token[0] == tokenize.NAME:
            tokens.append(token[1])
        else:
            notify.warning('Ignored token type: %s on line: %s' % (tokenize.tok_name[token[0]], token[2][0]))
        token = gen.next()

    return tokens 
开发者ID:PiratesOnlineRewritten,项目名称:Pirates-Online-Rewritten,代码行数:28,代码来源:QuestParser.py

示例10: _maybe_parse_basic_type

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def _maybe_parse_basic_type(self):
    """Try to parse a basic type (str, bool, number)."""
    token_value = ''
    # Allow a leading dash to handle negative numbers.
    if self._current_token.value == '-':
      token_value += self._current_token.value
      self._advance()

    basic_type_tokens = [tokenize.NAME, tokenize.NUMBER, tokenize.STRING]
    continue_parsing = self._current_token.kind in basic_type_tokens
    if not continue_parsing:
      return False, None

    while continue_parsing:
      token_value += self._current_token.value

      try:
        value = ast.literal_eval(token_value)
      except Exception as e:  # pylint: disable=broad-except
        err_str = "{}\n    Failed to parse token '{}'"
        self._raise_syntax_error(err_str.format(e, token_value))

      was_string = self._current_token.kind == tokenize.STRING
      self._advance()
      is_string = self._current_token.kind == tokenize.STRING
      continue_parsing = was_string and is_string

    return True, value 
开发者ID:google,项目名称:gin-config,代码行数:30,代码来源:config_parser.py

示例11: build

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def build(data):
    result = []
    for num, val, _, _, _  in split(StringIO(data).readline):
        if num == NUMBER: 
            result.extend([(NAME, 'Num'), (OP, '('),
            (STRING, str(val)), (OP, ')')])
        elif num == NAME and not val.startswith('_'):
            result.extend([(NAME, 'Chk'), 
            (OP, '('), (OP, "'"),
            (STRING, str(val)), (OP, "'"), (OP, ')')])
        else:
            result.append((num, val))
    return untokenize(result) 
开发者ID:iogf,项目名称:lax,代码行数:15,代码来源:parser.py

示例12: preprocess

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def preprocess(tokentype, tokenval):
    if tokentype == tokenize.NUMBER:
        return number_token

    elif tokentype == tokenize.INDENT:
        return "<indent>"

    elif tokentype == tokenize.DEDENT:
        return "<dedent>"

    # Need to replace spaces with some other character because the ngram processor
    # splits on spaces
    return tokenval.replace(" ", "§").replace("\n", "<newline>") 
开发者ID:uclnlp,项目名称:pycodesuggest,代码行数:15,代码来源:reader.py

示例13: preprocess

# 需要导入模块: import tokenize [as 别名]
# 或者: from tokenize import NUMBER [as 别名]
def preprocess(tokentype, tokenval):
    if tokentype == tokenize.NUMBER:
        return number_token

    elif tokentype == tokenize.INDENT:
        return indent_token

    elif tokentype == tokenize.DEDENT:
        return dedent_token

    return tokenval 
开发者ID:uclnlp,项目名称:pycodesuggest,代码行数:13,代码来源:pyreader.py


注:本文中的tokenize.NUMBER属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。