当前位置: 首页>>代码示例>>Python>>正文


Python token.NAME属性代码示例

本文整理汇总了Python中lib2to3.pygram.token.NAME属性的典型用法代码示例。如果您正苦于以下问题:Python token.NAME属性的具体用法?Python token.NAME怎么用?Python token.NAME使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在lib2to3.pygram.token的用法示例。


在下文中一共展示了token.NAME属性的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: parse_args

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def parse_args(arglist, scheme):
    u"""
    Parse a list of arguments into a dict
    """
    arglist = [i for i in arglist if i.type != token.COMMA]

    ret_mapping = dict([(k, None) for k in scheme])

    for i, arg in enumerate(arglist):
        if arg.type == syms.argument and arg.children[1].type == token.EQUAL:
            # argument < NAME '=' any >
            slot = arg.children[0].value
            ret_mapping[slot] = arg.children[2]
        else:
            slot = scheme[i]
            ret_mapping[slot] = arg

    return ret_mapping


# def is_import_from(node):
#     """Returns true if the node is a statement "from ... import ..."
#     """
#     return node.type == syms.import_from 
开发者ID:remg427,项目名称:misp42splunk,代码行数:26,代码来源:fixer_util.py

示例2: parse_args

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def parse_args(arglist, scheme):
    u"""
    Parse a list of arguments into a dict
    """
    arglist = [i for i in arglist if i.type != token.COMMA]
    
    ret_mapping = dict([(k, None) for k in scheme])

    for i, arg in enumerate(arglist):
        if arg.type == syms.argument and arg.children[1].type == token.EQUAL:
            # argument < NAME '=' any >
            slot = arg.children[0].value
            ret_mapping[slot] = arg.children[2]
        else:
            slot = scheme[i]
            ret_mapping[slot] = arg

    return ret_mapping


# def is_import_from(node):
#     """Returns true if the node is a statement "from ... import ..."
#     """
#     return node.type == syms.import_from 
开发者ID:QData,项目名称:deepWordBug,代码行数:26,代码来源:fixer_util.py

示例3: test_is_shebang_comment

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def test_is_shebang_comment(self):
        """
        Tests whether the fixer_util.is_encoding_comment() function is working.
        """
        shebang_comments = [u'#!/usr/bin/env python\n'
                             u"#!/usr/bin/python2\n",
                             u"#! /usr/bin/python3\n",
                            ]
        not_shebang_comments = [u"# I saw a giant python\n",
                                 u"# I have never seen a python2\n",
                               ]
        for comment in shebang_comments:
            node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
            node.prefix = comment
            self.assertTrue(is_shebang_comment(node))

        for comment in not_shebang_comments:
            node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
            node.prefix = comment
            self.assertFalse(is_shebang_comment(node)) 
开发者ID:hughperkins,项目名称:kgsgo-dataset-preprocessor,代码行数:22,代码来源:test_futurize.py

示例4: test_is_encoding_comment

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def test_is_encoding_comment(self):
        """
        Tests whether the fixer_util.is_encoding_comment() function is working.
        """
        encoding_comments = [u"# coding: utf-8",
                             u"# encoding: utf-8",
                             u"# -*- coding: latin-1 -*-",
                             u"# vim: set fileencoding=iso-8859-15 :",
                            ]
        not_encoding_comments = [u"# We use the file encoding utf-8",
                                 u"coding = 'utf-8'",
                                 u"encoding = 'utf-8'",
                                ]
        for comment in encoding_comments:
            node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
            node.prefix = comment
            self.assertTrue(is_encoding_comment(node))

        for comment in not_encoding_comments:
            node = FromImport(u'math', [Leaf(token.NAME, u'cos', prefix=" ")])
            node.prefix = comment
            self.assertFalse(is_encoding_comment(node)) 
开发者ID:hughperkins,项目名称:kgsgo-dataset-preprocessor,代码行数:24,代码来源:test_futurize.py

示例5: has_metaclass

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def has_metaclass(parent):
    results = None
    for node in parent.children:
        kids = node.children
        if node.type == syms.argument:
            if kids[0] == Leaf(token.NAME, u"metaclass") and \
                kids[1] == Leaf(token.EQUAL, u"=") and \
                kids[2]:
                #Hack to avoid "class X(=):" with this case.
                results = [node] + kids
                break
        elif node.type == syms.arglist:
            # Argument list... loop through it looking for:
            # Node(*, [*, Leaf(token.NAME, u"metaclass"), Leaf(token.EQUAL, u"="), Leaf(*, *)]
            for child in node.children:
                if results: break
                if child.type == token.COMMA:
                    #Store the last comma, which precedes the metaclass
                    comma = child
                elif type(child) == Node:
                    meta = equal = name = None
                    for arg in child.children:
                        if arg == Leaf(token.NAME, u"metaclass"):
                            #We have the (metaclass) part
                            meta = arg
                        elif meta and arg == Leaf(token.EQUAL, u"="):
                            #We have the (metaclass=) part
                            equal = arg
                        elif meta and equal:
                            #Here we go, we have (metaclass=X)
                            name = arg
                            results = (comma, meta, equal, name)
                            break
    return results 
开发者ID:remg427,项目名称:misp42splunk,代码行数:36,代码来源:fix_metaclass.py

示例6: transform

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def transform(self, node, results):
        meta_results = has_metaclass(node)
        if not meta_results: return
        for meta in meta_results:
            meta.remove()
        target = Leaf(token.NAME, u"__metaclass__")
        equal = Leaf(token.EQUAL, u"=", prefix=u" ")
        # meta is the last item in what was returned by has_metaclass(): name
        name = meta
        name.prefix = u" "
        stmt_node = Node(syms.atom, [target, equal, name])

        suitify(node)
        for item in node.children:
            if item.type == syms.suite:
                for stmt in item.children:
                    if stmt.type == token.INDENT:
                        # Insert, in reverse order, the statement, a newline,
                        # and an indent right after the first indented line
                        loc = item.children.index(stmt) + 1
                        # Keep consistent indentation form
                        ident = Leaf(token.INDENT, stmt.value)
                        item.insert_child(loc, ident)
                        item.insert_child(loc, Newline())
                        item.insert_child(loc, stmt_node)
                        break 
开发者ID:remg427,项目名称:misp42splunk,代码行数:28,代码来源:fix_metaclass.py

示例7: future_import

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def future_import(feature, node):
    """
    This seems to work
    """
    root = find_root(node)

    if does_tree_import(u"__future__", feature, node):
        return

    # Look for a shebang or encoding line
    shebang_encoding_idx = None

    for idx, node in enumerate(root.children):
        # Is it a shebang or encoding line?
        if is_shebang_comment(node) or is_encoding_comment(node):
            shebang_encoding_idx = idx
        if is_docstring(node):
            # skip over docstring
            continue
        names = check_future_import(node)
        if not names:
            # not a future statement; need to insert before this
            break
        if feature in names:
            # already imported
            return

    import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")])
    if shebang_encoding_idx == 0 and idx == 0:
        # If this __future__ import would go on the first line,
        # detach the shebang / encoding prefix from the current first line.
        # and attach it to our new __future__ import node.
        import_.prefix = root.children[0].prefix
        root.children[0].prefix = u''
        # End the __future__ import line with a newline and add a blank line
        # afterwards:
    children = [import_ , Newline()]
    root.insert_child(idx, Node(syms.simple_stmt, children)) 
开发者ID:remg427,项目名称:misp42splunk,代码行数:40,代码来源:fixer_util.py

示例8: future_import2

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def future_import2(feature, node):
    """
    An alternative to future_import() which might not work ...
    """
    root = find_root(node)

    if does_tree_import(u"__future__", feature, node):
        return

    insert_pos = 0
    for idx, node in enumerate(root.children):
        if node.type == syms.simple_stmt and node.children and \
           node.children[0].type == token.STRING:
            insert_pos = idx + 1
            break

    for thing_after in root.children[insert_pos:]:
        if thing_after.type == token.NEWLINE:
            insert_pos += 1
            continue

        prefix = thing_after.prefix
        thing_after.prefix = u""
        break
    else:
        prefix = u""

    import_ = FromImport(u"__future__", [Leaf(token.NAME, feature, prefix=u" ")])

    children = [import_, Newline()]
    root.insert_child(insert_pos, Node(syms.simple_stmt, children, prefix=prefix)) 
开发者ID:remg427,项目名称:misp42splunk,代码行数:33,代码来源:fixer_util.py

示例9: transform

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def transform(self, node, results):
        meta_results = has_metaclass(node)
        if not meta_results: return
        for meta in meta_results:
            meta.remove()
        target = Leaf(token.NAME, u"__metaclass__")
        equal = Leaf(token.EQUAL, u"=", prefix=u" ")
        # meta is the last item in what was returned by has_metaclass(): name
        name = meta
        name.prefix = u" "
        stmt_node = Node(syms.atom, [target, equal, name])
        
        suitify(node)
        for item in node.children:
            if item.type == syms.suite:
                for stmt in item.children:
                    if stmt.type == token.INDENT:
                        # Insert, in reverse order, the statement, a newline,
                        # and an indent right after the first indented line
                        loc = item.children.index(stmt) + 1
                        # Keep consistent indentation form
                        ident = Leaf(token.INDENT, stmt.value)
                        item.insert_child(loc, ident)
                        item.insert_child(loc, Newline())
                        item.insert_child(loc, stmt_node)
                        break 
开发者ID:QData,项目名称:deepWordBug,代码行数:28,代码来源:fix_metaclass.py

示例10: future_import2

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def future_import2(feature, node):
    """
    An alternative to future_import() which might not work ...
    """
    root = find_root(node)
    
    if does_tree_import(u"__future__", feature, node):
        return

    insert_pos = 0
    for idx, node in enumerate(root.children):
        if node.type == syms.simple_stmt and node.children and \
           node.children[0].type == token.STRING:
            insert_pos = idx + 1
            break

    for thing_after in root.children[insert_pos:]:
        if thing_after.type == token.NEWLINE:
            insert_pos += 1
            continue

        prefix = thing_after.prefix
        thing_after.prefix = u""
        break
    else:
        prefix = u""

    import_ = FromImport(u"__future__", [Leaf(token.NAME, feature, prefix=u" ")])

    children = [import_, Newline()]
    root.insert_child(insert_pos, Node(syms.simple_stmt, children, prefix=prefix)) 
开发者ID:QData,项目名称:deepWordBug,代码行数:33,代码来源:fixer_util.py

示例11: future_import

# 需要导入模块: from lib2to3.pygram import token [as 别名]
# 或者: from lib2to3.pygram.token import NAME [as 别名]
def future_import(feature, node):
    """
    This seems to work
    """
    root = find_root(node)

    if does_tree_import(u"__future__", feature, node):
        return

    # Look for a shebang or encoding line
    shebang_encoding_idx = None

    for idx, node in enumerate(root.children):
        # Is it a shebang or encoding line?
        if is_shebang_comment(node) or is_encoding_comment(node):
            shebang_encoding_idx = idx
        if node.type == syms.simple_stmt and \
           len(node.children) > 0 and node.children[0].type == token.STRING:
            # skip over docstring
            continue
        names = check_future_import(node)
        if not names:
            # not a future statement; need to insert before this
            break
        if feature in names:
            # already imported
            return

    import_ = FromImport(u'__future__', [Leaf(token.NAME, feature, prefix=" ")])
    if shebang_encoding_idx == 0 and idx == 0:
        # If this __future__ import would go on the first line,
        # detach the shebang / encoding prefix from the current first line.
        # and attach it to our new __future__ import node.
        import_.prefix = root.children[0].prefix
        root.children[0].prefix = u''
        # End the __future__ import line with a newline and add a blank line
        # afterwards:
    children = [import_ , Newline()]
    root.insert_child(idx, Node(syms.simple_stmt, children)) 
开发者ID:hughperkins,项目名称:kgsgo-dataset-preprocessor,代码行数:41,代码来源:fixer_util.py


注:本文中的lib2to3.pygram.token.NAME属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。