当前位置: 首页>>代码示例>>Python>>正文


Python pygments.lex方法代码示例

本文整理汇总了Python中pygments.lex方法的典型用法代码示例。如果您正苦于以下问题:Python pygments.lex方法的具体用法?Python pygments.lex怎么用?Python pygments.lex使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments的用法示例。


在下文中一共展示了pygments.lex方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: print_lexer

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def print_lexer(
    body: str, lexer: Lexer, label: str = None, prefix: str = None, indent: int = None
):
    if COLORIZE:
        prefix_str = prefix + " " if prefix else ""
        if prefix_str or indent:
            prefix_body = prefix_str + " " * (indent or 0)
            lexer.add_filter(PrefixFilter(prefix=prefix_body))
        tokens = list(pygments.lex(body, lexer=lexer))
        if label:
            fmt_label = [("fg:ansimagenta", label)]
            if prefix_str:
                fmt_label.insert(0, ("", prefix_str))
            print_formatted(FormattedText(fmt_label))
        print_formatted(PygmentsTokens(tokens))
    else:
        print_ext(body, label=label, prefix=prefix) 
开发者ID:hyperledger,项目名称:aries-cloudagent-python,代码行数:19,代码来源:utils.py

示例2: _parse_src

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def _parse_src(cls, src_contents, src_filename):
        """
        Return a stream of `(token_type, value)` tuples
        parsed from `src_contents` (str)

        Uses `src_filename` to guess the type of file
        so it can highlight syntax correctly.
        """

        # Parse the source into tokens
        try:
            lexer = guess_lexer_for_filename(src_filename, src_contents)
        except ClassNotFound:
            lexer = TextLexer()

        # Ensure that we don't strip newlines from
        # the source file when lexing.
        lexer.stripnl = False

        return pygments.lex(src_contents, lexer) 
开发者ID:Bachmann1234,项目名称:diff_cover,代码行数:22,代码来源:snippets.py

示例3: __init__

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def __init__(self, disassembly, lexer=lexer, msg=None):

        self.lines = []
        if isinstance(disassembly, list):
            self.lines = disassembly
        elif disassembly:
            line = []
            if msg:
                current_function = msg.rsplit(None, 1)[-1][:-1]
            else:
                current_function = None
            with currentfunctiontfilter.current_function(current_function):
                for ttype, value in pygments.lex(disassembly, lexer):
                    if '\n' in value:
                        self.lines.append(DisassemblyLine(line))
                        line = []
                    else:
                        line.append((ttype, value))

        self.linenos = {}
        for i, line in enumerate(self.lines):
            self.linenos[line.address] = line, i

        self.lexer = lexer
        self.msg = msg 
开发者ID:wapiflapi,项目名称:gxf,代码行数:27,代码来源:disassembly.py

示例4: tokenize_first_pass

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def tokenize_first_pass(self, code):
        """Tokenize input code for Comments and Delimit blocks

        Args:
            code (str):
                Input string. Should use `\\n` for end of lines.

        Return:
            (List[Tuple[Token, str]]):
                List of token tuples. The only token types currently used in the
                lexer are:
                - Text (plain text)
                - Comment.Single (// and *)
                - Comment.Special (///)
                - Comment.Multiline (/* */)
                - Keyword.Namespace (code inside #delimit ; block)
                - Keyword.Reserved (; delimiter)
        """
        comment_lexer = CommentAndDelimitLexer(stripall=False, stripnl=False)
        return [x for x in lex(code, comment_lexer)] 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:22,代码来源:code_manager.py

示例5: tokenize_second_pass

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def tokenize_second_pass(self, code):
        """Tokenize clean code for syntactic blocks

        Args:
            code (str):
                Input string. Should have `\\n` as the delimiter. Should have no
                comments. Should use `\\n` for end of lines.

        Return:
            (List[Tuple[Token, str]]):
                List of token tuples. Some of the token types:
                lexer are:
                - Text (plain text)
                - Comment.Single (// and *)
                - Comment.Special (///)
                - Comment.Multiline (/* */)
                - Keyword.Namespace (code inside #delimit ; block)
                - Keyword.Reserved (; delimiter)
        """
        block_lexer = StataLexer(stripall=False, stripnl=False)
        return [x for x in lex(code, block_lexer)] 
开发者ID:kylebarron,项目名称:stata_kernel,代码行数:23,代码来源:code_manager.py

示例6: print_packets

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def print_packets(path: list, nodes: dict) -> None:
    tokens = []
    for e in path[:-1]:
        node = nodes[e.dst]
        p = node.render()
        line = '{} = {}'.format(node.name.replace('-', '_'), repr(p))
        tokens.extend(list(pygments.lex(line, lexer=Python3Lexer())))

    # p = self.fuzz_node.render()
    node = nodes[path[-1].dst]
    p = node.render()
    line = '{} = {}'.format(node.name.replace('-', '_'), repr(p))

    print(pygments.highlight(line, Python3Lexer(), Terminal256Formatter(style='rrt')))

    # tokens.extend(list(pygments.lex(line, lexer=Python3Lexer())))
    # style = style_from_pygments_cls(get_style_by_name('colorful'))
    # print_formatted_text(PygmentsTokens(tokens), style=style)


# --------------------------------------------------------------- # 
开发者ID:nccgroup,项目名称:fuzzowski,代码行数:23,代码来源:printers.py

示例7: __iter__

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def __iter__(self):
        """parse code string and yield "clasified" tokens
        """
        try:
            tokens = self.lex()
        except IOError:
            log.info("Pygments lexer not found, using fallback")
            # TODO: write message to INFO
            yield ('', self.code)
            return

        for ttype, value in self.join(tokens):
            yield (_get_ttype_class(ttype), value)


# code_block_directive
# --------------------
# :: 
开发者ID:rst2pdf,项目名称:rst2pdf,代码行数:20,代码来源:pygments_code_block_directive.py

示例8: __iter__

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def __iter__(self):
        """Parse self.code and yield "classified" tokens.
        """
        if self.lexer is None:
            yield ([], self.code)
            return
        tokens = pygments.lex(self.code, self.lexer)
        for tokentype, value in self.merge(tokens):
            if self.tokennames == 'long': # long CSS class args
                classes = str(tokentype).lower().split('.')
            else: # short CSS class args
                classes = [_get_ttype_class(tokentype)]
            classes = [cls for cls in classes if cls not in unstyled_tokens]
            yield (classes, value) 
开发者ID:skarlekar,项目名称:faces,代码行数:16,代码来源:code_analyzer.py

示例9: test_bare_class_handler

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def test_bare_class_handler():
    from pygments.formatters import HtmlFormatter
    from pygments.lexers import PythonLexer
    try:
        lex('test\n', PythonLexer)
    except TypeError as e:
        assert 'lex() argument must be a lexer instance' in str(e)
    else:
        assert False, 'nothing raised'
    try:
        format([], HtmlFormatter)
    except TypeError as e:
        assert 'format() argument must be a formatter instance' in str(e)
    else:
        assert False, 'nothing raised' 
开发者ID:pygments,项目名称:pygments,代码行数:17,代码来源:test_basic_api.py

示例10: main

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def main():
    # Printing a manually constructed list of (Token, text) tuples.
    text = [
        (Token.Keyword, "print"),
        (Token.Punctuation, "("),
        (Token.Literal.String.Double, '"'),
        (Token.Literal.String.Double, "hello"),
        (Token.Literal.String.Double, '"'),
        (Token.Punctuation, ")"),
        (Token.Text, "\n"),
    ]

    print_formatted_text(PygmentsTokens(text))

    # Printing the output of a pygments lexer.
    tokens = list(pygments.lex('print("Hello")', lexer=PythonLexer()))
    print_formatted_text(PygmentsTokens(tokens))

    # With a custom style.
    style = Style.from_dict(
        {
            "pygments.keyword": "underline",
            "pygments.literal.string": "bg:#00ff00 #ffffff",
        }
    )
    print_formatted_text(PygmentsTokens(tokens), style=style) 
开发者ID:prompt-toolkit,项目名称:python-prompt-toolkit,代码行数:28,代码来源:pygments-tokens.py

示例11: test_print_hunter

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def test_print_hunter():
    """Verify that there are no print statements in the codebase."""
    root_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    assert 'tests' in os.listdir(root_directory)
    generator = (os.path.join(r, s) for r, d, f in os.walk(root_directory) if '.egg/' not in r and '/.tox/' not in r
                 for s in f if s.endswith('.py') and not s.startswith('example_'))
    regex_print = re.compile(r'^(.*)(?<!\w)print(\(|\s)(.*)$', re.MULTILINE)

    # Find all potential prints in Python files. May or may not be in strings.
    potential_prints = set()
    for file_path in generator:
        with open(file_path) as f:
            for line in f:
                if regex_print.search(line):
                    potential_prints.add(file_path)
                    break
    if not potential_prints:
        return

    # Perform lexical analysis on the source code and find all valid print statements/function calls.
    current_line = list()
    actual_prints = dict()
    for file_path in potential_prints:
        with open(file_path) as f:
            code = f.read(52428800)  # Up to 50 MiB.
        for token, code_piece in lex(code, get_lexer_by_name('Python')):
            if code_piece == '\n':
                current_line = list()  # References new list, doesn't necessarily remove old list.
                continue
            current_line.append(code_piece)
            if (str(token), code_piece) != ('Token.Keyword', 'print'):
                continue
            # If this is reached, there is a print statement in the library!
            if file_path not in actual_prints:
                actual_prints[file_path] = list()
            actual_prints[file_path].append(current_line)  # Keeps reference to current list() alive.
    actual_prints = dict((f, [''.join(l) for l in lst]) for f, lst in actual_prints.items())
    assert not actual_prints 
开发者ID:Robpol86,项目名称:libnl,代码行数:40,代码来源:test__meta.py

示例12: lex

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def lex(self, code, lex):
        """Return tokenified code.

        Return a list of tuples (scope, word) where word is the word to be
        printed and scope the scope name representing the context.

        :param str code: Code to tokenify.
        :param lex: Lexer to use.
        :return:
        """
        if lex is None:
            if not type(code) is str:
                # if not suitable lexer is found, return decoded code
                code = code.decode("utf-8")
            return (("global", code),)

        words = pygments.lex(code, lex)

        scopes = []
        for word in words:
            token = word[0]
            scope = "global"

            if token in self.token_map.keys():
                scope = self.token_map[token]

            scopes.append((scope, word[1]))
        return scopes 
开发者ID:richrd,项目名称:suplemon,代码行数:30,代码来源:lexer.py

示例13: print_python

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def print_python(path: list) -> None:
    tokens = []
    block_code = path_to_python(path)
    print(pygments.highlight(block_code, Python3Lexer(), Terminal256Formatter(style='rrt')))

    # tokens.extend(list(pygments.lex(block_code, lexer=Python3Lexer())))
    # print_formatted_text(PygmentsTokens(tokens))

# --------------------------------------------------------------- # 
开发者ID:nccgroup,项目名称:fuzzowski,代码行数:11,代码来源:printers.py

示例14: print_poc

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def print_poc(target: Target, path: list,
              receive_data_after_each_request, receive_data_after_fuzz) -> None:
    tokens = []

    exploit_code = get_exploit_code(target, path, receive_data_after_each_request, receive_data_after_fuzz)
    print(pygments.highlight(exploit_code, Python3Lexer(), Terminal256Formatter(style='rrt')))

    # tokens.extend(list(pygments.lex(exploit_code, lexer=Python3Lexer())))
    # print_formatted_text(PygmentsTokens(tokens))

# --------------------------------------------------------------- # 
开发者ID:nccgroup,项目名称:fuzzowski,代码行数:13,代码来源:printers.py

示例15: lex

# 需要导入模块: import pygments [as 别名]
# 或者: from pygments import lex [as 别名]
def lex(self):
        # Get lexer for language (use text as fallback)
        try:
            if self.language and str(self.language).lower() != 'none':
                lexer = get_lexer_by_name(self.language.lower(), **self.custom_args)
            else:
                lexer = get_lexer_by_name('text', **self.custom_args)
        except ValueError:
            log.info("no pygments lexer for %s, using 'text'" % self.language)
            # what happens if pygment isn't present ?
            lexer = get_lexer_by_name('text')
        return pygments.lex(self.code, lexer) 
开发者ID:rst2pdf,项目名称:rst2pdf,代码行数:14,代码来源:pygments_code_block_directive.py


注:本文中的pygments.lex方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。