当前位置: 首页>>代码示例>>Python>>正文


Python PythonLexer.add_filter方法代码示例

本文整理汇总了Python中pygments.lexers.PythonLexer.add_filter方法的典型用法代码示例。如果您正苦于以下问题:Python PythonLexer.add_filter方法的具体用法?Python PythonLexer.add_filter怎么用?Python PythonLexer.add_filter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.lexers.PythonLexer的用法示例。


在下文中一共展示了PythonLexer.add_filter方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: findBadUseImport

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def findBadUseImport(code):
    """
    Find when use from foo import *
    Documentation: http://python.net/~goodger/projects/pycon/2007/idiomatic/handout.html#importing
                   https://docs.python.org/2/howto/doanddont.html#from-module-import
    """
    sequence = [(Token.Keyword.Namespace, '^from$'),
                (Token.Name.Namespace, '.*'),
                (Token.Keyword.Namespace, '^import$'),
                (Token.Operator, '\*')]
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    badUseImport = PythonIdiom('badImport')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        badUseImport.addNew(lineNumber)
    log("badUseImport found in lines {0}".format(badUseImport.getLines()))

    return badUseImport
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:27,代码来源:pythonic.py

示例2: checkNotRange

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def checkNotRange(code):
    """
    Check if there is: for xx in [0,1,2] instead of for xxx in (x)range
    Documentation: https://youtu.be/OSGv2VnC0go?t=3m4s
    """
    sequence = [(Token.Keyword, '^for$'),
                (Token.Name, '^\w+$'),
                (Token.Operator.Word, '^in$'),
                (Token.Punctuation, '^\[$'),
                (Token.Literal.Number.Integer, '^\d$')]

    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    notRangeIdiom = PythonIdiom('notRange')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        notRangeIdiom.addNew(lineNumber)
    log("badForIn found in lines {0}".format(notRangeIdiom.getLines()))
    return notRangeIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:27,代码来源:pythonic.py

示例3: checkBadLoopCollect

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def checkBadLoopCollect(code):
    """
    Look for bad loop like 'for i in range(len(list))'
    Documentation: https://youtu.be/OSGv2VnC0go?t=4m47s
    """
    sequence = [(Token.Keyword, '^for$'),
                (Token.Name, '^\w+$'),
                (Token.Operator.Word, '^in$'),
                (Token.Name.Builtin, '^range$|^xrange$'),
                (Token.Punctuation, '^\($'),
                (Token.Name.Builtin, '^len$'),
                (Token.Punctuation, '^\($'),
                (Token.Name, '^\w+$')]
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    badLoopCollectIdiom = PythonIdiom('badLoop')

    lineNumber = 1
    while True:
        lineAux = _findSeqInTokens(sequence, tokens)
        if lineAux < 0:
            break
        lineNumber += lineAux -1
        badLoopCollectIdiom.addNew(lineNumber)
    log("badLoopCollect found in lines {0}".format(badLoopCollectIdiom.getLines()))

    return badLoopCollectIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:30,代码来源:pythonic.py

示例4: findDocstring

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def findDocstring(code):
    """Find the use of documentation in the functions, classes or script
    Documentation: https://www.python.org/dev/peps/pep-0257/
    """
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')

    classDefToken = (Token.Keyword, '^class$')
    functDefToken = (Token.Keyword, '^def$')
    tokens = pygments.lex(code, lexer)

    docIdiom = PythonIdiom('docstring')
    docstringFound = defaultdict(int)
    typeDoc = 'module'
    lineNumber = 1


    for ttype, word in tokens:
        if _sameToken((ttype, word), classDefToken):
            typeDoc = 'class'
        elif _sameToken((ttype, word), functDefToken):
            typeDoc = 'function'
        elif ttype == Token.Literal.String.Doc:
            docstringFound[typeDoc] += 1
            docIdiom.addNew(lineNumber)
        lineNumber += _getNewLines((ttype, word))

    for typeDoc in docstringFound:
        log("type %s: %d found" % (typeDoc, docstringFound[typeDoc]))
    log('DocString found in lines: ' + str(docIdiom.getLines()))
    return docIdiom
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:33,代码来源:pythonic.py

示例5: basicStructure

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def basicStructure(code):
    sequence = []
    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    for token in tokens:
        print token
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:9,代码来源:pythonicSimple.py

示例6: redent

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def redent(s):
    """
    Shamelessly stolen from infobob(#python bot) code
    https://code.launchpad.net/~pound-python/infobat/infobob
    """
    lexer = PythonLexer()
    lexer.add_filter(_RedentFilter())
    return highlight(s, lexer, NullFormatter())
开发者ID:stranac,项目名称:raritythebot,代码行数:10,代码来源:utils.py

示例7: python_prettify

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def python_prettify(code, style):
    lexer = PythonLexer()
    lexer.add_filter(VisibleWhitespaceFilter(spaces="&nbsp"))
    pretty_code = highlight(
        code, lexer, HtmlFormatter(
            linenos=style, linenostart=0))
    # print(pretty_code)
    return format_html('{}', mark_safe(pretty_code))
开发者ID:ifjorissen,项目名称:vrfy,代码行数:10,代码来源:pretty_code.py

示例8: findUseMapFilterReduce

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def findUseMapFilterReduce(code):
    """
    Find the use of map, filter and reduce builtins in the code.
    A better option is the use of generators and list comprehensions
    Documentation: Fluent Python page 142
                   https://docs.python.org/2/library/functions.html#map
                   https://docs.python.org/2/library/functions.html#filter
                   https://docs.python.org/2/library/functions.html#reduce
    """
    filterToken = (Token.Name.Builtin, '^filter$')
    mapToken = (Token.Name.Builtin, '^map$')
    reduceToken = (Token.Name.Builtin, '^reduce$')
    tokensFound = {'filter': 0,
                   'map': 0,
                   'reduce': 0}

    lexer = PythonLexer()
    lexer.add_filter('tokenmerge')
    tokens = pygments.lex(code, lexer)
    lineNumber = 1

    mapIdiom = PythonIdiom('map')
    filterIdiom = PythonIdiom('filter')
    reduceIdiom = PythonIdiom('reduce')

    for token in tokens:
        lineNumber += _getNewLines(token)
        if _sameToken(token, filterToken):
            tokensFound['filter'] += 1
            filterIdiom.addNew(lineNumber)
        elif _sameToken(token, reduceToken):
            tokensFound['reduce'] += 1
            reduceIdiom.addNew(lineNumber)
        elif _sameToken(token, mapToken):
            tokensFound['map'] += 1
            mapIdiom.addNew(lineNumber)
    log('filter found in lines: ' + str(filterIdiom.getLines()))
    log('map found in lines: ' + str(mapIdiom.getLines()))
    log('reduce found in lines: ' + str(reduceIdiom.getLines()))
    return [mapIdiom, filterIdiom, reduceIdiom]
开发者ID:jjmerchante,项目名称:Pythonic,代码行数:42,代码来源:pythonic.py

示例9: if

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
                        if (not indents) and seen_def:
                            yield token.Text, "\n"

                    decorator_started = False

                    for t, v in indents:
                        yield t, v
                    indents = []

                    seen_def = True # print leading new lines

            if def_started or decorator_started:
                yield ttype, value

            if  (ttype is token.Punctuation) and (value == ':'):
                def_started = False

if __name__ == "__main__":
    from pygments import highlight
    from pygments.lexers import PythonLexer
    from pygments.formatters import NullFormatter

    lex = PythonLexer()
    lex.add_filter(PythonAPIFilter())

    for f in [__file__, "../views.py", '../admin.py']:
        code = open(f, 'r').read()
        print "---------- %s ----------" % f
        print highlight(code, lex, NullFormatter())
开发者ID:Acidburn0zzz,项目名称:difio,代码行数:31,代码来源:python.py

示例10: redent

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import add_filter [as 别名]
def redent(s):
    lexer = PythonLexer()
    lexer.add_filter(_RedentFilter())
    return highlight(s, lexer, NullFormatter())
开发者ID:dustinlacewell,项目名称:PySession,代码行数:6,代码来源:redent.py


注:本文中的pygments.lexers.PythonLexer.add_filter方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。