当前位置: 首页>>代码示例>>Python>>正文


Python token.tok_name方法代码示例

本文整理汇总了Python中token.tok_name方法的典型用法代码示例。如果您正苦于以下问题:Python token.tok_name方法的具体用法?Python token.tok_name怎么用?Python token.tok_name使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在token的用法示例。


在下文中一共展示了token.tok_name方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: translate_symbols

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def translate_symbols(ast_tuple):
    """ Translate numeric grammar symbols in an ast_tuple descriptive names.

        This simply traverses the tree converting any integer value to values
        found in symbol.sym_name or token.tok_name.
    """
    new_list = []
    for item in ast_tuple:
        if isinstance(item, int):
            new_list.append(int_to_symbol(item))
        elif issequence(item):
            new_list.append(translate_symbols(item))
        else:
            new_list.append(item)
    if isinstance(ast_tuple, tuple):
        return tuple(new_list)
    else:
        return new_list 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:20,代码来源:ast_tools.py

示例2: extract_line_count

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    lines = open(example_file).readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif ((tok_type == 'STRING') and check_docstring):
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
开发者ID:sklearn-theano,项目名称:sklearn-theano,代码行数:22,代码来源:gen_rst.py

示例3: extract_line_count

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    if six.PY2:
        lines = open(example_file).readlines()
    else:
        lines = open(example_file, encoding='utf-8').readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif (tok_type == 'STRING') and check_docstring:
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
开发者ID:X-DataInitiative,项目名称:tick,代码行数:25,代码来源:gen_rst.py

示例4: __init__

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def __init__(self, code, value, start=(0,0), stop=(0,0), line=''):
        """
        Args:
            code (string|int): Token code. Ints are translated using token.tok_name.
            value (string): Token value
            start (tuple): Pair of values describing token start line, start position
            stop (tuple): Pair of values describing token stop line, stop position
            line (string): String containing the line the token was parsed from
        """
        try:
            self.code = token.tok_name[code]
        except:
            self.code = code
        self.value = value
        self.start = start
        self.stop = stop
        self.line = line 
开发者ID:jhuapl-boss,项目名称:heaviside,代码行数:19,代码来源:lexer.py

示例5: extract_line_count

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_line_count(filename, target_dir):
    # Extract the line count of a file
    example_file = os.path.join(target_dir, filename)
    lines = file(example_file).readlines()
    start_row = 0
    if lines and lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    tokens = tokenize.generate_tokens(lines.__iter__().next)
    check_docstring = True
    erow_docstring = 0
    for tok_type, _, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif ((tok_type == 'STRING') and check_docstring):
            erow_docstring = erow
            check_docstring = False
    return erow_docstring+1+start_row, erow+1+start_row 
开发者ID:nguy,项目名称:artview,代码行数:21,代码来源:gen_rst.py

示例6: int_to_symbol

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def int_to_symbol(i):
    """ Convert numeric symbol or token to a desriptive name.
    """
    try:
        return symbol.sym_name[i]
    except KeyError:
        return token.tok_name[i] 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:9,代码来源:ast_tools.py

示例7: __call__

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def __call__(self, ttype, tstring, stup, etup, line):
        # dispatch
##        import token
##        print >> sys.stderr, 'ttype:', token.tok_name[ttype], \
##              'tstring:', tstring
        self.__state(ttype, tstring, stup[0]) 
开发者ID:aliyun,项目名称:oss-ftp,代码行数:8,代码来源:pygettext.py

示例8: extract_docstring

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_docstring(filename, ignore_heading=False):
    """ Extract a module-level docstring, if any
    """
    lines = open(filename).readlines()
    start_row = 0
    if lines[0].startswith('#!'):
        lines.pop(0)
        start_row = 1
    docstring = ''
    first_par = ''
    line_iterator = iter(lines)
    tokens = tokenize.generate_tokens(lambda: next(line_iterator))
    for tok_type, tok_content, _, (erow, _), _ in tokens:
        tok_type = token.tok_name[tok_type]
        if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
            continue
        elif tok_type == 'STRING':
            docstring = eval(tok_content)
            # If the docstring is formatted with several paragraphs, extract
            # the first one:
            paragraphs = '\n'.join(
                line.rstrip() for line
                in docstring.split('\n')).split('\n\n')
            if paragraphs:
                if ignore_heading:
                    if len(paragraphs) > 1:
                        first_par = re.sub('\n', ' ', paragraphs[1])
                        first_par = ((first_par[:95] + '...')
                                     if len(first_par) > 95 else first_par)
                    else:
                        raise ValueError("Docstring not found by gallery",
                                         "Please check your example's layout",
                                         " and make sure it's correct")
                else:
                    first_par = paragraphs[0]

        break
    return docstring, first_par, erow + 1 + start_row 
开发者ID:sklearn-theano,项目名称:sklearn-theano,代码行数:40,代码来源:gen_rst.py

示例9: type_name

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def type_name(self):
    return token.tok_name[self._type] 
开发者ID:FSecureLABS,项目名称:Jandroid,代码行数:4,代码来源:offset_token.py

示例10: type_name

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def type_name(self):
    return token.tok_name[self.type] 
开发者ID:FSecureLABS,项目名称:Jandroid,代码行数:4,代码来源:snippet.py

示例11: extract_docstring

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def extract_docstring(self):
        """ Extract a module-level docstring
        """
        lines = open(self.filename).readlines()
        start_row = 0
        if lines[0].startswith('#!'):
            lines.pop(0)
            start_row = 1

        docstring = ''
        first_par = ''
        tokens = tokenize.generate_tokens(lines.__iter__().next)
        for tok_type, tok_content, _, (erow, _), _ in tokens:
            tok_type = token.tok_name[tok_type]
            if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
                continue
            elif tok_type == 'STRING':
                docstring = eval(tok_content)
                # If the docstring is formatted with several paragraphs,
                # extract the first one:
                paragraphs = '\n'.join(line.rstrip()
                                       for line in docstring.split('\n')
                                       ).split('\n\n')
                if len(paragraphs) > 0:
                    first_par = paragraphs[0]
            break

        thumbloc = None
        for i, line in enumerate(docstring.split("\n")):
            m = re.match(r"^_thumb: (\.\d+),\s*(\.\d+)", line)
            if m:
                thumbloc = float(m.group(1)), float(m.group(2))
                break
        if thumbloc is not None:
            self.thumbloc = thumbloc
            docstring = "\n".join([l for l in docstring.split("\n")
                                   if not l.startswith("_thumb")])

        self.docstring = docstring
        self.short_desc = first_par
        self.end_line = erow + 1 + start_row 
开发者ID:matplotlib,项目名称:mpl-probscale,代码行数:43,代码来源:plot_generator.py

示例12: check_tokenize

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def check_tokenize(self, s, expected):
        # Format the tokens in s in a table format.
        # The ENDMARKER is omitted.
        result = []
        f = BytesIO(s.encode('utf-8'))
        for type, token, start, end, line in tokenize(f.readline):
            if type == ENDMARKER:
                break
            type = tok_name[type]
            result.append("    %(type)-10.10s %(token)-13.13r %(start)s %(end)s" %
                          locals())
        self.assertEqual(result,
                         ["    ENCODING   'utf-8'       (0, 0) (0, 0)"] +
                         expected.rstrip().splitlines()) 
开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:16,代码来源:test_tokenize.py

示例13: assertExactTypeEqual

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def assertExactTypeEqual(self, opstr, *optypes):
        tokens = list(tokenize(BytesIO(opstr.encode('utf-8')).readline))
        num_optypes = len(optypes)
        self.assertEqual(len(tokens), 2 + num_optypes)
        self.assertEqual(token.tok_name[tokens[0].exact_type],
                         token.tok_name[ENCODING])
        for i in range(num_optypes):
            self.assertEqual(token.tok_name[tokens[i + 1].exact_type],
                             token.tok_name[optypes[i]])
        self.assertEqual(token.tok_name[tokens[1 + num_optypes].exact_type],
                         token.tok_name[token.ENDMARKER]) 
开发者ID:Microvellum,项目名称:Fluid-Designer,代码行数:13,代码来源:test_tokenize.py

示例14: cvrtr

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def cvrtr(tuple):
    """debug method returning an ast string in a readable fashion"""
    if type(tuple) is TupleType:
        try:
            try:
                txt = 'token.'+token.tok_name[tuple[0]]
            except:
                txt = 'symbol.'+symbol.sym_name[tuple[0]]
        except:
            txt =  'Unknown token/symbol'
        return [txt] + list(map(cvrtr, tuple[1:]))
    else:
        return tuple 
开发者ID:jlachowski,项目名称:clonedigger,代码行数:15,代码来源:astutils.py

示例15: token_repr

# 需要导入模块: import token [as 别名]
# 或者: from token import tok_name [as 别名]
def token_repr(tok_type, string):
  """Returns a human-friendly representation of a token with the given type and string."""
  # repr() prefixes unicode with 'u' on Python2 but not Python3; strip it out for consistency.
  return '%s:%s' % (token.tok_name[tok_type], repr(string).lstrip('u')) 
开发者ID:gristlabs,项目名称:asttokens,代码行数:6,代码来源:util.py


注:本文中的token.tok_name方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。