当前位置: 首页>>代码示例>>Python>>正文


Python PythonLexer.get_tokens方法代码示例

本文整理汇总了Python中pygments.lexers.PythonLexer.get_tokens方法的典型用法代码示例。如果您正苦于以下问题:Python PythonLexer.get_tokens方法的具体用法?Python PythonLexer.get_tokens怎么用?Python PythonLexer.get_tokens使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.lexers.PythonLexer的用法示例。


在下文中一共展示了PythonLexer.get_tokens方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: python_line_tokens

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
def python_line_tokens(code_lines, blank_lines=False):
    from pygments.lexers import PythonLexer
    lexer = PythonLexer()
    code_str = "".join(code_lines)
    all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
    line_tokens = []
    current_line = []

    for t in all_tokens:
        if t[1] == u"\n":
            line_tokens.append(current_line)
            current_line = []
        else:
            current_line.append(t)

    rows = []
    for i, tokens in enumerate(line_tokens):
        # Check for blank line
        line_str = code_lines[i].rstrip()
        if (not blank_lines) and len(line_str.strip()) == 0:
            continue

        for t in tokens:
            kind, value = str(t[0]), t[1]
            yield line_str, i, kind, value, t
开发者ID:synesthesiam,项目名称:eyecode,代码行数:27,代码来源:__init__.py

示例2: PythonTest

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
class PythonTest(unittest.TestCase):
    def setUp(self):
        self.lexer = PythonLexer()

    def test_cls_builtin(self):
        """
        Tests that a cls token gets interpreted as a Token.Name.Builtin.Pseudo

        """
        fragment = 'class TestClass():\n    @classmethod\n    def hello(cls):\n        pass\n'
        tokens = [
            (Token.Keyword, 'class'),
            (Token.Text, ' '),
            (Token.Name.Class, 'TestClass'),
            (Token.Punctuation, '('),
            (Token.Punctuation, ')'),
            (Token.Punctuation, ':'),
            (Token.Text, '\n'),
            (Token.Text, '    '),
            (Token.Name.Decorator, '@classmethod'),
            (Token.Text, '\n'),
            (Token.Text, '    '),
            (Token.Keyword, 'def'),
            (Token.Text, ' '),
            (Token.Name.Function, 'hello'),
            (Token.Punctuation, '('),
            (Token.Name.Builtin.Pseudo, 'cls'),
            (Token.Punctuation, ')'),
            (Token.Punctuation, ':'),
            (Token.Text, '\n'),
            (Token.Text, '        '),
            (Token.Keyword, 'pass'),
            (Token.Text, '\n'),
        ]
        self.assertEqual(tokens, list(self.lexer.get_tokens(fragment)))
开发者ID:sol,项目名称:pygments,代码行数:37,代码来源:test_python.py

示例3: PygmentsHighlighter

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
class PygmentsHighlighter(object):
    """ highlight python code with a QSyntaxHighlighter,
        callable class (e.g. function with a state) to """

    def __init__(self):
        """ constructor """
        self._lexer = PythonLexer()
        self._formatter = HtmlFormatter()
        self._document = QtGui.QTextDocument()
        self._document.setDefaultStyleSheet(self._formatter.get_style_defs())
        self._format_cache = dict()

    def __call__(self, code):
        """ makes this class callable, actually do the highlightning """
        index = 0
        for token, text in self._lexer.get_tokens(code):
            length = len(text)
            char_format = self._get_format(token)
            pygmentsHighlighter._setFormat(index, length, char_format)
            index += length

    def _get_format(self, token):
        """ get the QTextCharFormat for a token """
        if token in self._format_cache:
            return self._format_cache[token]

        # get format from document
        code, html = next(self._formatter._format_lines([(token, u"dummy")]))
        self._document.setHtml(html)
        char_format = QtGui.QTextCursor(self._document).charFormat()

        # cache result
        self._format_cache[token] = char_format

        return char_format
开发者ID:knossos-project,项目名称:PythonQt,代码行数:37,代码来源:PygmentsHighlighter.py

示例4: _analyse_source_code

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
    def _analyse_source_code(self, source_code):

        lexer = PythonLexer()
        token_source = lexer.get_tokens(source_code)
        for token_type, value in token_source:
            if len(value) > 3 and value.startswith('gl') and ord('A') <= ord(value[2]) <= ord('Z'):
                self.gl_functions.add(value)
            elif len(value) > 3 and value.startswith('GL_'):
                self.gl_constants.add(value)
开发者ID:rougier,项目名称:PyOpenGLng,代码行数:11,代码来源:opengl-usage.py

示例5: python_token_metrics

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
def python_token_metrics(code_lines, indent_size=4):
    from pygments.lexers import PythonLexer
    indent_regex = re.compile(r"^\s*")

    lexer = PythonLexer()
    code_str = "".join(code_lines)
    all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
    line_tokens = []
    current_line = []

    for t in all_tokens:
        if t[1] == u"\n":
            line_tokens.append(current_line)
            current_line = []
        else:
            current_line.append(t)

    rows = []
    for i, tokens in enumerate(line_tokens):
        line_number = i + 1

        # Check for blank line
        line_str = code_lines[i].rstrip()
        if len(line_str.strip()) == 0:
            rows.append([line_number, 0, 0, 0, 0, 0, 0])
            continue

        assert len(tokens) > 0, "No tokens for line"

        num_keywords = 0
        num_identifiers = 0
        num_operators = 0
        line_length = len(line_str)
        line_indent = len(indent_regex.findall(line_str)[0]) / indent_size

        # Indentation is not considered
        line_str_noindent = line_str.lstrip()
        line_length_noindent = len(line_str_noindent)
        whitespace_prop = line_str_noindent.count(" ") / float(line_length_noindent)

        for t in tokens:
            kind, value = str(t[0]), t[1]
            if kind.startswith(u"Token.Keyword"):
                num_keywords += 1
            elif kind.startswith(u"Token.Name"):
                num_identifiers += 1
            elif kind.startswith(u"Token.Operator"):
                num_operators += 1

        rows.append([line_number, line_length_noindent, num_keywords,
            num_identifiers, num_operators, whitespace_prop,
            line_indent])

    columns = ["line", "line_length", "keywords",
               "identifiers", "operators", "whitespace_prop",
               "line_indent"]
    return pandas.DataFrame(rows, columns=columns)
开发者ID:synesthesiam,项目名称:eyecode,代码行数:59,代码来源:__init__.py

示例6: __init__

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
class CursesParser:
    def __init__(self, makecolors = True, style = standardcols):
        if makecolors:
            self.makecolorpairs()
        if style is None:
            style = standardcols
        self.style = style
        self.lexer = PythonLexer()
    
    @classmethod
    def makecolorpairs(cls):
        """Initializes curses for colors, makes a color pair of 
        (color, defaultbg) for every color, and initializes self.colorpairs as
        a dictionary with a color -> colorpair mapping"""
        if hasattr(cls, 'colorpairs'):
            return cls.colorpairs
        curses.start_color()
        curses.use_default_colors()
        maxcolors = curses.COLORS
        maxpairs = curses.COLOR_PAIRS
        totalmax = min(maxcolors+1, maxpairs)
        cls.colorpairs = {}
        for colpr in range(1,totalmax):
            if colpr >= maxpairs:
                break
            col = colpr % maxcolors
            curses.init_pair(colpr, col, -1)
            cls.colorpairs[col] = curses.color_pair(colpr)
        return cls.colorpairs
    
    def get_colors(self, raw):
        """Uses pygments to parse the text, and yields (text, color, attr)
        tuples"""
        for tkn, txt in self.lexer.get_tokens(raw):
            notyielded = True
            while notyielded:
                if tkn is None:
                    yield (txt, None, None)
                    notyielded = False
                elif tkn in self.style:
                    col, attr = self.style[tkn]
                    yield (txt, col, attr)
                    notyielded = False
                else:
                    tkn = tkn.parent
    
    def parsetoscr(self, scr, raw):
        """Parses text, and uses scr.addstr to print the text directly."""
        self.makecolorpairs()
        for (txt, col, attr) in self.get_colors(raw):
            fullattr = attr
            if attr is None:
                fullattr = curses.A_NORMAL
            if col is not None: # and col in self.colorpairs:
                fullattr |= self.colorpairs[col]
            scr.addstr(txt, fullattr)
开发者ID:wackywendell,项目名称:ipycurses,代码行数:58,代码来源:cursesparser.py

示例7: testWorksAsExpected

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
    def testWorksAsExpected(self):
        code = '''
        """ Increment number of decision points in function."""
        #if tok and tok.text in McCabeKeywords:
        if (tok[0][0] == b'Keyword') and tok[1] in McCabeKeywords:
            self.metrics['mccabe'] += 1
        '''
        result = [(Token.Text, u'        '), (Token.Literal.String.Doc, u'""" Increment number of decision points in function."""'), (Token.Text, u'\n'), (Token.Text, u'        '), (Token.Comment, u'#if tok and tok.text in McCabeKeywords:'), (Token.Text, u'\n'), (Token.Text, u'        '), (Token.Keyword, u'if'), (Token.Text, u' '), (Token.Punctuation, u'('), (Token.Name, u'tok'), (Token.Punctuation, u'['), (Token.Literal.Number.Integer, u'0'), (Token.Punctuation, u']'), (Token.Punctuation, u'['), (Token.Literal.Number.Integer, u'0'), (Token.Punctuation, u']'), (Token.Text, u' '), (Token.Operator, u'=='), (Token.Text, u' '), (Token.Name, u'b'), (Token.Literal.String, u"'"), (Token.Literal.String, u'Keyword'), (Token.Literal.String, u"'"), (Token.Punctuation, u')'), (Token.Text, u' '), (Token.Operator.Word, u'and'), (Token.Text, u' '), (Token.Name, u'tok'), (Token.Punctuation, u'['), (Token.Literal.Number.Integer, u'1'), (Token.Punctuation, u']'), (Token.Text, u' '), (Token.Operator.Word, u'in'), (Token.Text, u' '), (Token.Name, u'McCabeKeywords'), (Token.Punctuation, u':'), (Token.Text, u'\n'), (Token.Text, u'            '), (Token.Name.Builtin.Pseudo, u'self'), (Token.Operator, u'.'), (Token.Name, u'metrics'), (Token.Punctuation, u'['), (Token.Literal.String, u"'"), (Token.Literal.String, u'mccabe'), (Token.Literal.String, u"'"), (Token.Punctuation, u']'), (Token.Text, u' '), (Token.Operator, u'+'), (Token.Operator, u'='), (Token.Text, u' '), (Token.Literal.Number.Integer, u'1'), (Token.Text, u'\n'), (Token.Text, u'        '), (Token.Text, u'\n')]

        lex = PythonLexer()
        tokenList = lex.get_tokens(code)
        self.assertEqual(list(tokenList), result)
开发者ID:GadgetSteve,项目名称:metrics,代码行数:14,代码来源:test_lexer.py

示例8: get_context

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
def get_context(string):
    """ Assuming the cursor is at the end of the specified string, get the
        context (a list of names) for the symbol at cursor position.
    """
    lexer = PythonLexer()
    context = []
    reversed_tokens = list(lexer.get_tokens(string))
    reversed_tokens.reverse()

    # Pygments often tacks on a newline when none is specified in the input.
    # Remove this newline.
    if reversed_tokens and reversed_tokens[0][1].endswith('\n') and \
            not string.endswith('\n'):
        reversed_tokens.pop(0)

    current_op = ''
    for token, text in reversed_tokens:

        if is_token_subtype(token, Token.Name):

            # Handle a trailing separator, e.g 'foo.bar.'
            if current_op == '.':
                if not context:
                    context.insert(0, '')

            # Handle non-separator operators and punction.
            elif current_op:
                break

            context.insert(0, text)
            current_op = ''

        # Pygments doesn't understand that, e.g., '->' is a single operator
        # in C++. This is why we have to build up an operator from
        # potentially several tokens.
        elif token is Token.Operator or token is Token.Punctuation:
            current_op = text + current_op

        # Break on anything that is not a Operator, Punctuation, or Name.
        else:
            break

    return context
开发者ID:bkerler,项目名称:PythonQt,代码行数:45,代码来源:PythonCompleter.py

示例9: PygmentsHighlighter

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
class PygmentsHighlighter(QtGui.QSyntaxHighlighter):

    """ Syntax highlighter that uses Pygments for parsing. """

    style = DefaultStyle  # Could be MonokaiStyle, FriendlyStyle, etc.

    def __init__(self, parent, lexer=None):
        super(PygmentsHighlighter, self).__init__(parent)

        try:
            self._lexer = get_lexer_by_name(lexer)
        except:
            self._lexer = PythonLexer()

        # Caches for formats and brushes.
        self._brushes = {}
        self._formats = {}

    @handle_exception_in_method
    def highlightBlock(self, qstring):
        """ Highlight a block of text.
        """
        qstring = compat.unicode(qstring)
        prev_data = self.previous_block_data()

        if prev_data is not None:
            self._lexer._epd_state_stack = prev_data.syntax_stack
        elif hasattr(self._lexer, '_epd_state_stack'):
            del self._lexer._epd_state_stack

        index = 0
        # Lex the text using Pygments
        for token, text in self._lexer.get_tokens(qstring):
            l = len(text)
            format = self._get_format(token)
            if format is not None:
                self.setFormat(index, l, format)
            index += l

        if hasattr(self._lexer, '_epd_state_stack'):
            data = BlockUserData(syntax_stack=self._lexer._epd_state_stack)
            self.currentBlock().setUserData(data)

            # there is a bug in pyside and it will crash unless we
            # hold on to the reference a little longer
            data = self.currentBlock().userData()

            # Clean up for the next go-round.
            del self._lexer._epd_state_stack

    def previous_block_data(self):
        """ Convenience method for returning the previous block's user data.
        """
        return self.currentBlock().previous().userData()

    def _get_format(self, token):
        """ Returns a QTextCharFormat for token or None.
        """
        if token in self._formats:
            return self._formats[token]
        result = None
        for key, value in self.style.style_for_token(token) .items():
            if value:
                if result is None:
                    result = QtGui.QTextCharFormat()
                if key == 'color':
                    result.setForeground(self._get_brush(value))
                elif key == 'bgcolor':
                    result.setBackground(self._get_brush(value))
                elif key == 'bold':
                    result.setFontWeight(QtGui.QFont.Bold)
                elif key == 'italic':
                    result.setFontItalic(True)
                elif key == 'underline':
                    result.setUnderlineStyle(
                        QtGui.QTextCharFormat.SingleUnderline)
                elif key == 'sans':
                    result.setFontStyleHint(QtGui.QFont.SansSerif)
                elif key == 'roman':
                    result.setFontStyleHint(QtGui.QFont.Times)
                elif key == 'mono':
                    result.setFontStyleHint(QtGui.QFont.TypeWriter)
                elif key == 'border':
                    # Borders are normally used for errors. We can't do a border
                    # so instead we do a wavy underline
                    result.setUnderlineStyle(
                        QtGui.QTextCharFormat.WaveUnderline)
                    result.setUnderlineColor(self._get_color(value))
        self._formats[token] = result
        return result

    def _get_brush(self, color):
        """ Returns a brush for the color.
        """
        result = self._brushes.get(color)
        if result is None:
            qcolor = self._get_color(color)
            result = QtGui.QBrush(qcolor)
            self._brushes[color] = result

#.........这里部分代码省略.........
开发者ID:fabioz,项目名称:pyvmmonitor-qt,代码行数:103,代码来源:pygments_highlighter.py

示例10: _lex_python_result

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
def _lex_python_result(tb):
    " Return token list for Python string. "
    lexer = PythonLexer()
    return lexer.get_tokens(tb)
开发者ID:nickos556,项目名称:ptpython,代码行数:6,代码来源:repl.py

示例11: Console

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]

#.........这里部分代码省略.........
            it = self.prompt_frame.begin()
            while not it.atEnd():
                bl = it.currentBlock()
                t = unicode(bl.text())
                if strip_prompt_strings:
                    t = t[self.prompt_len:]
                yield t
                it += 1

    def set_prompt(self, lines):
        self.render_current_prompt(lines)

    def clear_current_prompt(self):
        if self.prompt_frame is None:
            c = self.root_frame.lastCursorPosition()
            self.prompt_frame = c.insertFrame(self.prompt_frame_format)
            self.setTextCursor(c)
        else:
            c = self.prompt_frame.firstCursorPosition()
            self.setTextCursor(c)
            c.setPosition(self.prompt_frame.lastPosition(), c.KeepAnchor)
            c.removeSelectedText()
            c.setPosition(self.prompt_frame.firstPosition())

    def render_current_prompt(self, lines=None, restore_cursor=False):
        row, col = self.cursor_pos
        cp = list(self.prompt()) if lines is None else lines
        self.clear_current_prompt()

        for i, line in enumerate(cp):
            start = i == 0
            end = i == len(cp) - 1
            self.formatter.render_prompt(not start, self.cursor)
            self.formatter.render(self.lexer.get_tokens(line), self.cursor)
            if not end:
                self.cursor.insertBlock()

        if row > -1 and restore_cursor:
            self.cursor_pos = (row, col)

        self.ensureCursorVisible()

    # }}}

    # Non-prompt Rendering {{{

    def render_block(self, text, restore_prompt=True):
        self.formatter.render(self.lexer.get_tokens(text), self.cursor)
        self.cursor.insertBlock()
        self.cursor.movePosition(self.cursor.End)
        if restore_prompt:
            self.render_current_prompt()

    def show_error(self, is_syntax_err, tb, controller=None):
        if self.prompt_frame is not None:
            # At a prompt, so redirect output
            return prints(tb, end='')
        try:
            self.buf.append(tb)
            if is_syntax_err:
                self.formatter.render_syntax_error(tb, self.cursor)
            else:
                self.formatter.render(self.tb_lexer.get_tokens(tb), self.cursor)
        except:
            prints(tb, end='')
        self.ensureCursorVisible()
开发者ID:AEliu,项目名称:calibre,代码行数:70,代码来源:console.py

示例12: PygmentsHighlighter

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
class PygmentsHighlighter(QtGui.QSyntaxHighlighter):
    """ Syntax highlighter that uses Pygments for parsing. """

    #---------------------------------------------------------------------------
    # 'QSyntaxHighlighter' interface
    #---------------------------------------------------------------------------

    def __init__(self, parent, lexer=None):
        super(PygmentsHighlighter, self).__init__(parent)

        self._document = self.document()
        self._formatter = HtmlFormatter(nowrap=True)
        self.set_style('default')
        if lexer is not None:
            self._lexer = lexer
        else:
            if PY3:
                self._lexer = Python3Lexer()
            else:
                self._lexer = PythonLexer()

    def highlightBlock(self, string):
        """ Highlight a block of text.
        """
        prev_data = self.currentBlock().previous().userData()
        if prev_data is not None:
            self._lexer._saved_state_stack = prev_data.syntax_stack
        elif hasattr(self._lexer, '_saved_state_stack'):
            del self._lexer._saved_state_stack

        # Lex the text using Pygments
        index = 0
        for token, text in self._lexer.get_tokens(string):
            length = len(text)
            self.setFormat(index, length, self._get_format(token))
            index += length

        if hasattr(self._lexer, '_saved_state_stack'):
            data = PygmentsBlockUserData(
                syntax_stack=self._lexer._saved_state_stack)
            self.currentBlock().setUserData(data)
            # Clean up for the next go-round.
            del self._lexer._saved_state_stack

    #---------------------------------------------------------------------------
    # 'PygmentsHighlighter' interface
    #---------------------------------------------------------------------------

    def set_style(self, style):
        """ Sets the style to the specified Pygments style.
        """
        if isinstance(style, string_types):
            style = get_style_by_name(style)
        self._style = style
        self._clear_caches()

    def set_style_sheet(self, stylesheet):
        """ Sets a CSS stylesheet. The classes in the stylesheet should
        correspond to those generated by:

            pygmentize -S <style> -f html

        Note that 'set_style' and 'set_style_sheet' completely override each
        other, i.e. they cannot be used in conjunction.
        """
        self._document.setDefaultStyleSheet(stylesheet)
        self._style = None
        self._clear_caches()

    #---------------------------------------------------------------------------
    # Protected interface
    #---------------------------------------------------------------------------

    def _clear_caches(self):
        """ Clear caches for brushes and formats.
        """
        self._brushes = {}
        self._formats = {}

    def _get_format(self, token):
        """ Returns a QTextCharFormat for token or None.
        """
        if token in self._formats:
            return self._formats[token]

        if self._style is None:
            result = self._get_format_from_document(token, self._document)
        else:
            result = self._get_format_from_style(token, self._style)

        self._formats[token] = result
        return result

    def _get_format_from_document(self, token, document):
        """ Returns a QTextCharFormat for token by
        """
        code, html = next(self._formatter._format_lines([(token, u'dummy')]))
        self._document.setHtml(html)
        return QtGui.QTextCursor(self._document).charFormat()

#.........这里部分代码省略.........
开发者ID:SylvainCorlay,项目名称:qtconsole,代码行数:103,代码来源:pygments_highlighter.py

示例13: make_code_aois

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
def make_code_aois(code_file, font_size=(14, 25), line_offset=5,
        syntax_categories=SYNTAX_CATEGORIES):

    aoi_df = pandas.DataFrame(columns=("aoi_kind", "name",
        "x", "y", "width", "height", "note"))

    # Needed for syntax-based AOIs
    from pygments.lexers import PythonLexer
    lexer = PythonLexer()

    # Parse each file and generate AOIs
    code_lines = code_file.readlines()
    code_str = "".join(code_lines)

    # Add extra newline token to trigger last AOI block
    code_lines += [""]
    tokens = list(lexer.get_tokens(code_str, unfiltered=True)) + [("Token.Text", u"\n")]

    col = 0            # Current column
    line = 0           # Current line number
    block_start = 0    # Current whitespace separated block
    last_blank = False # Was last line blank?
    block_lines = []   # Lines in current block

    for t in tokens:
        kind = str(t[0])
        val = t[1]

        # Check if end of line
        if val == u"\n":
            line_str = code_lines[line].rstrip()
            if len(line_str.strip()) > 0:
                # Non-blank line: add AOI for whole line
                aoi_df = aoi_df.append({
                    "aoi_kind" : "line",
                    "name"     : "line {0}".format(line + 1),
                    "x"        : 0,
                    "y"        : (line * font_size[1]) + (line * line_offset) - (line_offset / 2),
                    "width"    : len(line_str) * font_size[0],
                    "height"   : font_size[1] + line_offset - 1,
                    "note"     : line_str
                }, ignore_index=True)

                # Add to current block
                last_blank = False
                block_lines.append(line_str)
            else:
                # Blank line
                if not last_blank:
                    # Add AOI for whitespace separated block of lines
                    aoi_df = aoi_df.append({
                        "aoi_kind" : "block",
                        "name"     : "lines {0}-{1}".format(block_start + 1, line + 1),
                        "x"        : 0,
                        "y"        : (block_start * font_size[1]) + (block_start * line_offset) - (line_offset / 2),
                        "width"    : max([len(l) for l in block_lines]) * font_size[0],
                        "height"   : len(block_lines) * (font_size[1] + line_offset),
                        "note"     : "\n".join(block_lines)
                    }, ignore_index=True)

                # Reset block variables
                last_blank = True
                block_lines = []
                block_start = line + 1

            # Next line
            col = 0
            line += 1
            continue

        # Add AOI for syntax token
        if (kind == "Token.Text") and (col == 0):
            kind += ".Indentation"

        aoi_df = aoi_df.append({
            "aoi_kind" : "syntax",
            "name"     : syntax_categories[kind],
            "x"        : col * font_size[0],
            "y"        : (line * font_size[1]) + (line * line_offset) - (line_offset / 2),
            "width"    : len(val) * font_size[0],
            "height"   : font_size[1] + line_offset - 1,
            "note"     : val
        }, ignore_index=True)

        col += len(val)

    return aoi_df
开发者ID:synesthesiam,项目名称:eyecode,代码行数:89,代码来源:aoi.py

示例14: python_line_categories

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
def python_line_categories(code_lines):
    from pygments.lexers import PythonLexer

    lexer = PythonLexer()
    code_str = "".join(code_lines)
    all_tokens = list(lexer.get_tokens(code_str, unfiltered=True))
    line_tokens = []
    current_line = []

    for t in all_tokens:
        if t[1] == u"\n":
            line_tokens.append(current_line)
            current_line = []
        else:
            current_line.append(t)

    line_categories = []
    for i, tokens in enumerate(line_tokens):
        # Check for blank line
        line_str = code_lines[i].rstrip()
        if len(line_str.strip()) == 0:
            line_categories.append(["blank line"])
            continue

        assert len(tokens) > 0, "No tokens for line"
        categories = []
        last_kind, last_value = None, None

        for t in tokens:
            kind, value = str(t[0]), t[1]

            if kind == u"Token.Keyword" and value == u"def":
                categories.append("function definition")
            elif kind == u"Token.Keyword" and value == u"if":
                categories.append("if statement")
            elif kind == u"Token.Keyword" and value == u"for":
                categories.append("for loop")
            elif kind == u"Token.Keyword" and value == u"return":
                categories.append("return statement")
            elif kind == u"Token.Keyword" and value == u"print":
                categories.append("print statement")
            elif kind == u"Token.Keyword" and value == u"class":
                categories.append("class definition")
            elif kind == u"Token.Operator" and value == u"=":
                categories.append("assignment")
            elif kind == u"Token.Operator" and value == u".":
                categories.append("object access")
            elif kind == u"Token.Operator" and value in [u"+", u"*"]:
                categories.append("mathematical operation")
            elif last_kind == u"Token.Operator" and last_value == u"-" and kind == "Token.Whitespace":
                categories.append("mathematical operation")
            elif kind == u"Token.Operator" and value in [u"<", u">"]:
                categories.append("comparison")
            elif last_kind == u"Token.Name" and kind == "Token.Punctuation" and value == u"(":
                categories.append("function call")
            elif kind == "Token.Punctuation" and value == u"[":
                categories.append("list creation")

            last_kind, last_value = kind, value

        if len(categories) == 0:
            categories.append("unknown")

        line_categories.append(set(categories))

    return line_categories
开发者ID:synesthesiam,项目名称:eyecode,代码行数:68,代码来源:__init__.py

示例15: PythonLexer

# 需要导入模块: from pygments.lexers import PythonLexer [as 别名]
# 或者: from pygments.lexers.PythonLexer import get_tokens [as 别名]
"""
import re
import sys
from pygments.lexers import PythonLexer
from pygments.token import Punctuation, Comment

if __name__ == "__main__":
    lexer = PythonLexer()
    pattern = re.compile(u".*(given|and_|when|then)\((.+)")
    steps = []
    print "# -*- coding: UTF-8 -*-"
    print "from pyfeature import step"
    for l in sys.stdin.xreadlines():
        m = pattern.match(l)
        if m:
            s = lexer.get_tokens(m.group(2))
            desc = ""
            for t in s:
                if t[0] != Punctuation and t[1] != u",":
                    desc += "".join(chr(c) for c in [ord(b) for b in t[1]])
                else:
                    break
            steps.append([desc.strip()])
            omit_next_token = False
            for t in s:
                if t[1] == "=":
                    omit_next_token = True
                elif not omit_next_token and t[1] != "," and t[1] != ")" and t[0] != Comment:
                    steps[-1].append(t[1])
                else:
                    omit_next_token = False
开发者ID:xiechao06,项目名称:pyfeature,代码行数:33,代码来源:pyfeature_step_gen.py


注:本文中的pygments.lexers.PythonLexer.get_tokens方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。