當前位置: 首頁>>代碼示例>>Python>>正文


Python pygments.lex方法代碼示例

本文整理匯總了Python中pygments.lex方法的典型用法代碼示例。如果您正苦於以下問題:Python pygments.lex方法的具體用法?Python pygments.lex怎麽用?Python pygments.lex使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pygments的用法示例。


在下文中一共展示了pygments.lex方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: print_lexer

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def print_lexer(
    body: str, lexer: Lexer, label: str = None, prefix: str = None, indent: int = None
):
    if COLORIZE:
        prefix_str = prefix + " " if prefix else ""
        if prefix_str or indent:
            prefix_body = prefix_str + " " * (indent or 0)
            lexer.add_filter(PrefixFilter(prefix=prefix_body))
        tokens = list(pygments.lex(body, lexer=lexer))
        if label:
            fmt_label = [("fg:ansimagenta", label)]
            if prefix_str:
                fmt_label.insert(0, ("", prefix_str))
            print_formatted(FormattedText(fmt_label))
        print_formatted(PygmentsTokens(tokens))
    else:
        print_ext(body, label=label, prefix=prefix) 
開發者ID:hyperledger,項目名稱:aries-cloudagent-python,代碼行數:19,代碼來源:utils.py

示例2: _parse_src

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def _parse_src(cls, src_contents, src_filename):
        """
        Return a stream of `(token_type, value)` tuples
        parsed from `src_contents` (str)

        Uses `src_filename` to guess the type of file
        so it can highlight syntax correctly.
        """

        # Parse the source into tokens
        try:
            lexer = guess_lexer_for_filename(src_filename, src_contents)
        except ClassNotFound:
            lexer = TextLexer()

        # Ensure that we don't strip newlines from
        # the source file when lexing.
        lexer.stripnl = False

        return pygments.lex(src_contents, lexer) 
開發者ID:Bachmann1234,項目名稱:diff_cover,代碼行數:22,代碼來源:snippets.py

示例3: __init__

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def __init__(self, disassembly, lexer=lexer, msg=None):

        self.lines = []
        if isinstance(disassembly, list):
            self.lines = disassembly
        elif disassembly:
            line = []
            if msg:
                current_function = msg.rsplit(None, 1)[-1][:-1]
            else:
                current_function = None
            with currentfunctiontfilter.current_function(current_function):
                for ttype, value in pygments.lex(disassembly, lexer):
                    if '\n' in value:
                        self.lines.append(DisassemblyLine(line))
                        line = []
                    else:
                        line.append((ttype, value))

        self.linenos = {}
        for i, line in enumerate(self.lines):
            self.linenos[line.address] = line, i

        self.lexer = lexer
        self.msg = msg 
開發者ID:wapiflapi,項目名稱:gxf,代碼行數:27,代碼來源:disassembly.py

示例4: tokenize_first_pass

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def tokenize_first_pass(self, code):
        """Tokenize input code for Comments and Delimit blocks

        Args:
            code (str):
                Input string. Should use `\\n` for end of lines.

        Return:
            (List[Tuple[Token, str]]):
                List of token tuples. The only token types currently used in the
                lexer are:
                - Text (plain text)
                - Comment.Single (// and *)
                - Comment.Special (///)
                - Comment.Multiline (/* */)
                - Keyword.Namespace (code inside #delimit ; block)
                - Keyword.Reserved (; delimiter)
        """
        comment_lexer = CommentAndDelimitLexer(stripall=False, stripnl=False)
        return [x for x in lex(code, comment_lexer)] 
開發者ID:kylebarron,項目名稱:stata_kernel,代碼行數:22,代碼來源:code_manager.py

示例5: tokenize_second_pass

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def tokenize_second_pass(self, code):
        """Tokenize clean code for syntactic blocks

        Args:
            code (str):
                Input string. Should have `\\n` as the delimiter. Should have no
                comments. Should use `\\n` for end of lines.

        Return:
            (List[Tuple[Token, str]]):
                List of token tuples. Some of the token types:
                lexer are:
                - Text (plain text)
                - Comment.Single (// and *)
                - Comment.Special (///)
                - Comment.Multiline (/* */)
                - Keyword.Namespace (code inside #delimit ; block)
                - Keyword.Reserved (; delimiter)
        """
        block_lexer = StataLexer(stripall=False, stripnl=False)
        return [x for x in lex(code, block_lexer)] 
開發者ID:kylebarron,項目名稱:stata_kernel,代碼行數:23,代碼來源:code_manager.py

示例6: print_packets

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def print_packets(path: list, nodes: dict) -> None:
    tokens = []
    for e in path[:-1]:
        node = nodes[e.dst]
        p = node.render()
        line = '{} = {}'.format(node.name.replace('-', '_'), repr(p))
        tokens.extend(list(pygments.lex(line, lexer=Python3Lexer())))

    # p = self.fuzz_node.render()
    node = nodes[path[-1].dst]
    p = node.render()
    line = '{} = {}'.format(node.name.replace('-', '_'), repr(p))

    print(pygments.highlight(line, Python3Lexer(), Terminal256Formatter(style='rrt')))

    # tokens.extend(list(pygments.lex(line, lexer=Python3Lexer())))
    # style = style_from_pygments_cls(get_style_by_name('colorful'))
    # print_formatted_text(PygmentsTokens(tokens), style=style)


# --------------------------------------------------------------- # 
開發者ID:nccgroup,項目名稱:fuzzowski,代碼行數:23,代碼來源:printers.py

示例7: __iter__

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def __iter__(self):
        """parse code string and yield "clasified" tokens
        """
        try:
            tokens = self.lex()
        except IOError:
            log.info("Pygments lexer not found, using fallback")
            # TODO: write message to INFO
            yield ('', self.code)
            return

        for ttype, value in self.join(tokens):
            yield (_get_ttype_class(ttype), value)


# code_block_directive
# --------------------
# :: 
開發者ID:rst2pdf,項目名稱:rst2pdf,代碼行數:20,代碼來源:pygments_code_block_directive.py

示例8: __iter__

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def __iter__(self):
        """Parse self.code and yield "classified" tokens.
        """
        if self.lexer is None:
            yield ([], self.code)
            return
        tokens = pygments.lex(self.code, self.lexer)
        for tokentype, value in self.merge(tokens):
            if self.tokennames == 'long': # long CSS class args
                classes = str(tokentype).lower().split('.')
            else: # short CSS class args
                classes = [_get_ttype_class(tokentype)]
            classes = [cls for cls in classes if cls not in unstyled_tokens]
            yield (classes, value) 
開發者ID:skarlekar,項目名稱:faces,代碼行數:16,代碼來源:code_analyzer.py

示例9: test_bare_class_handler

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def test_bare_class_handler():
    from pygments.formatters import HtmlFormatter
    from pygments.lexers import PythonLexer
    try:
        lex('test\n', PythonLexer)
    except TypeError as e:
        assert 'lex() argument must be a lexer instance' in str(e)
    else:
        assert False, 'nothing raised'
    try:
        format([], HtmlFormatter)
    except TypeError as e:
        assert 'format() argument must be a formatter instance' in str(e)
    else:
        assert False, 'nothing raised' 
開發者ID:pygments,項目名稱:pygments,代碼行數:17,代碼來源:test_basic_api.py

示例10: main

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def main():
    # Printing a manually constructed list of (Token, text) tuples.
    text = [
        (Token.Keyword, "print"),
        (Token.Punctuation, "("),
        (Token.Literal.String.Double, '"'),
        (Token.Literal.String.Double, "hello"),
        (Token.Literal.String.Double, '"'),
        (Token.Punctuation, ")"),
        (Token.Text, "\n"),
    ]

    print_formatted_text(PygmentsTokens(text))

    # Printing the output of a pygments lexer.
    tokens = list(pygments.lex('print("Hello")', lexer=PythonLexer()))
    print_formatted_text(PygmentsTokens(tokens))

    # With a custom style.
    style = Style.from_dict(
        {
            "pygments.keyword": "underline",
            "pygments.literal.string": "bg:#00ff00 #ffffff",
        }
    )
    print_formatted_text(PygmentsTokens(tokens), style=style) 
開發者ID:prompt-toolkit,項目名稱:python-prompt-toolkit,代碼行數:28,代碼來源:pygments-tokens.py

示例11: test_print_hunter

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def test_print_hunter():
    """Verify that there are no print statements in the codebase."""
    root_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
    assert 'tests' in os.listdir(root_directory)
    generator = (os.path.join(r, s) for r, d, f in os.walk(root_directory) if '.egg/' not in r and '/.tox/' not in r
                 for s in f if s.endswith('.py') and not s.startswith('example_'))
    regex_print = re.compile(r'^(.*)(?<!\w)print(\(|\s)(.*)$', re.MULTILINE)

    # Find all potential prints in Python files. May or may not be in strings.
    potential_prints = set()
    for file_path in generator:
        with open(file_path) as f:
            for line in f:
                if regex_print.search(line):
                    potential_prints.add(file_path)
                    break
    if not potential_prints:
        return

    # Perform lexical analysis on the source code and find all valid print statements/function calls.
    current_line = list()
    actual_prints = dict()
    for file_path in potential_prints:
        with open(file_path) as f:
            code = f.read(52428800)  # Up to 50 MiB.
        for token, code_piece in lex(code, get_lexer_by_name('Python')):
            if code_piece == '\n':
                current_line = list()  # References new list, doesn't necessarily remove old list.
                continue
            current_line.append(code_piece)
            if (str(token), code_piece) != ('Token.Keyword', 'print'):
                continue
            # If this is reached, there is a print statement in the library!
            if file_path not in actual_prints:
                actual_prints[file_path] = list()
            actual_prints[file_path].append(current_line)  # Keeps reference to current list() alive.
    actual_prints = dict((f, [''.join(l) for l in lst]) for f, lst in actual_prints.items())
    assert not actual_prints 
開發者ID:Robpol86,項目名稱:libnl,代碼行數:40,代碼來源:test__meta.py

示例12: lex

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def lex(self, code, lex):
        """Return tokenified code.

        Return a list of tuples (scope, word) where word is the word to be
        printed and scope the scope name representing the context.

        :param str code: Code to tokenify.
        :param lex: Lexer to use.
        :return:
        """
        if lex is None:
            if not type(code) is str:
                # if not suitable lexer is found, return decoded code
                code = code.decode("utf-8")
            return (("global", code),)

        words = pygments.lex(code, lex)

        scopes = []
        for word in words:
            token = word[0]
            scope = "global"

            if token in self.token_map.keys():
                scope = self.token_map[token]

            scopes.append((scope, word[1]))
        return scopes 
開發者ID:richrd,項目名稱:suplemon,代碼行數:30,代碼來源:lexer.py

示例13: print_python

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def print_python(path: list) -> None:
    tokens = []
    block_code = path_to_python(path)
    print(pygments.highlight(block_code, Python3Lexer(), Terminal256Formatter(style='rrt')))

    # tokens.extend(list(pygments.lex(block_code, lexer=Python3Lexer())))
    # print_formatted_text(PygmentsTokens(tokens))

# --------------------------------------------------------------- # 
開發者ID:nccgroup,項目名稱:fuzzowski,代碼行數:11,代碼來源:printers.py

示例14: print_poc

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def print_poc(target: Target, path: list,
              receive_data_after_each_request, receive_data_after_fuzz) -> None:
    tokens = []

    exploit_code = get_exploit_code(target, path, receive_data_after_each_request, receive_data_after_fuzz)
    print(pygments.highlight(exploit_code, Python3Lexer(), Terminal256Formatter(style='rrt')))

    # tokens.extend(list(pygments.lex(exploit_code, lexer=Python3Lexer())))
    # print_formatted_text(PygmentsTokens(tokens))

# --------------------------------------------------------------- # 
開發者ID:nccgroup,項目名稱:fuzzowski,代碼行數:13,代碼來源:printers.py

示例15: lex

# 需要導入模塊: import pygments [as 別名]
# 或者: from pygments import lex [as 別名]
def lex(self):
        # Get lexer for language (use text as fallback)
        try:
            if self.language and str(self.language).lower() != 'none':
                lexer = get_lexer_by_name(self.language.lower(), **self.custom_args)
            else:
                lexer = get_lexer_by_name('text', **self.custom_args)
        except ValueError:
            log.info("no pygments lexer for %s, using 'text'" % self.language)
            # what happens if pygment isn't present ?
            lexer = get_lexer_by_name('text')
        return pygments.lex(self.code, lexer) 
開發者ID:rst2pdf,項目名稱:rst2pdf,代碼行數:14,代碼來源:pygments_code_block_directive.py


注:本文中的pygments.lex方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。