当前位置: 首页>>代码示例>>Python>>正文


Python Token.Operator方法代码示例

本文整理汇总了Python中pygments.token.Token.Operator方法的典型用法代码示例。如果您正苦于以下问题:Python Token.Operator方法的具体用法?Python Token.Operator怎么用?Python Token.Operator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pygments.token.Token的用法示例。


在下文中一共展示了Token.Operator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: color_mapping

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def color_mapping(curr_completion, completion, prompt, command, subcommand,
                  param, text, line, example, toolbar):

    return style_from_dict({
        # Completion colors
        Token.Menu.Completions.Completion.Current: curr_completion,
        Token.Menu.Completions.Completion: completion,
        Token.Menu.Completions.ProgressButton: 'bg:#b78991',
        Token.Menu.Completions.ProgressBar: 'bg:#ffc0cb',

        Token.Az: prompt,
        Token.Prompt.Arg: prompt,

        # Pretty Words
        Token.Keyword: command,
        Token.Keyword.Declaration: subcommand,
        Token.Name.Class: param,
        Token.Text: text,

        Token.Line: line,
        Token.Number: example,
        # toolbar
        Token.Operator: toolbar,
        Token.Toolbar: toolbar
    }) 
开发者ID:Azure,项目名称:azure-cli-shell,代码行数:27,代码来源:color_styles.py

示例2: test_gcd_expr

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_gcd_expr(lexer):
    fragment = u'1^3+(5-5)*gcd(a,b)\n'
    tokens = [
        (Token.Number.Integer, u'1'),
        (Token.Operator, u'^'),
        (Token.Literal.Number.Integer, u'3'),
        (Token.Operator, u'+'),
        (Token.Punctuation, u'('),
        (Token.Literal.Number.Integer, u'5'),
        (Token.Operator, u'-'),
        (Token.Literal.Number.Integer, u'5'),
        (Token.Punctuation, u')'),
        (Token.Operator, u'*'),
        (Token.Name, u'gcd'),
        (Token.Punctuation, u'('),
        (Token.Name, u'a'),
        (Token.Operator, u','),
        (Token.Name, u'b'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n')
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:24,代码来源:test_ezhil.py

示例3: test_escaped_bracestring

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_escaped_bracestring(lexer):
    fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
    tokens = [
        (Token.Name, u'str'),
        (Token.Operator, u'.'),
        (Token.Name, u'gsub'),
        (Token.Punctuation, u'('),
        (Token.Literal.String.Regex, u'%r{'),
        (Token.Literal.String.Regex, u'\\\\'),
        (Token.Literal.String.Regex, u'\\\\'),
        (Token.Literal.String.Regex, u'}'),
        (Token.Punctuation, u','),
        (Token.Text, u' '),
        (Token.Literal.String.Double, u'"'),
        (Token.Literal.String.Double, u'/'),
        (Token.Literal.String.Double, u'"'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:22,代码来源:test_ruby.py

示例4: test_operator_multiple_space

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_operator_multiple_space(lexer):
    """
    Test that multiple spaces with an operator doesn't get formatted to a string.
    """
    fragment = 'x  > 100;\n'
    tokens = [
        (Token.Name, 'x'),
        (Token.Text, ' '),
        (Token.Text, ' '),
        (Token.Operator, '>'),
        (Token.Text, ' '),
        (Token.Literal.Number.Integer, '100'),
        (Token.Punctuation, ';'),
        (Token.Text, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:18,代码来源:test_matlab.py

示例5: test_string_escaping_run

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_string_escaping_run(lexer):
    fragment = '<?php $x="{\\""; ?>\n'
    tokens = [
        (Token.Comment.Preproc, '<?php'),
        (Token.Text, ' '),
        (Token.Name.Variable, '$x'),
        (Token.Operator, '='),
        (Token.Literal.String.Double, '"'),
        (Token.Literal.String.Double, '{'),
        (Token.Literal.String.Escape, '\\"'),
        (Token.Literal.String.Double, '"'),
        (Token.Punctuation, ';'),
        (Token.Text, ' '),
        (Token.Comment.Preproc, '?>'),
        (Token.Other, '\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:19,代码来源:test_php.py

示例6: test_peg_basic

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_peg_basic(lexer_peg):
    fragment = u'rule<-("terminal"/nonterminal/[cls])*\n'
    tokens = [
        (Token.Name.Class, u'rule'),
        (Token.Operator, u'<-'),
        (Token.Punctuation, u'('),
        (Token.String.Double, u'"terminal"'),
        (Token.Operator, u'/'),
        (Token.Name.Class, u'nonterminal'),
        (Token.Operator, u'/'),
        (Token.Punctuation, u'['),
        (Token.String, u'cls'),
        (Token.Punctuation, u']'),
        (Token.Punctuation, u')'),
        (Token.Operator, u'*'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer_peg.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:20,代码来源:test_grammar_notation.py

示例7: test_peg_modified_strings

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_peg_modified_strings(lexer_peg):
    # see for example:
    # - http://textx.github.io/Arpeggio/
    # - https://nim-lang.org/docs/pegs.html
    # - https://github.com/erikrose/parsimonious
    fragment = u'~"regex" i"insensitive" "multimod"ilx ("not modified")\n'
    tokens = [
        # can't handle parsimonious-style regex while ~ is a cut operator
        (Token.Operator, u'~'),
        (Token.String.Double, u'"regex"'),
        (Token.Text, u' '),
        (Token.String.Double, u'i"insensitive"'),
        (Token.Text, u' '),
        (Token.String.Double, u'"multimod"ilx'),
        (Token.Text, u' '),
        (Token.Punctuation, u'('),
        (Token.String.Double, u'"not modified"'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer_peg.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:23,代码来源:test_grammar_notation.py

示例8: test_keywords_with_dollar

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_keywords_with_dollar(lexer):
    fragment = u'DIM x\nx = RIGHT$("abc", 1)\n'
    expected = [
        (Token.Keyword.Declaration, u'DIM'),
        (Token.Text.Whitespace, u' '),
        (Token.Name.Variable.Global, u'x'),
        (Token.Text, u'\n'),
        (Token.Name.Variable.Global, u'x'),
        (Token.Text.Whitespace, u' '),
        (Token.Operator, u'='),
        (Token.Text.Whitespace, u' '),
        (Token.Keyword.Reserved, u'RIGHT$'),
        (Token.Punctuation, u'('),
        (Token.Literal.String.Double, u'"abc"'),
        (Token.Punctuation, u','),
        (Token.Text.Whitespace, u' '),
        (Token.Literal.Number.Integer.Long, u'1'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == expected 
开发者ID:pygments,项目名称:pygments,代码行数:23,代码来源:test_qbasiclexer.py

示例9: test_nested_curly

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_nested_curly(lexer):
    fragment = u'{templateFunction param={anotherFunction} param2=$something}\n'
    tokens = [
        (Token.Comment.Preproc, u'{'),
        (Token.Name.Function, u'templateFunction'),
        (Token.Text, u' '),
        (Token.Name.Attribute, u'param'),
        (Token.Operator, u'='),
        (Token.Comment.Preproc, u'{'),
        (Token.Name.Attribute, u'anotherFunction'),
        (Token.Comment.Preproc, u'}'),
        (Token.Text, u' '),
        (Token.Name.Attribute, u'param2'),
        (Token.Operator, u'='),
        (Token.Name.Variable, u'$something'),
        (Token.Comment.Preproc, u'}'),
        (Token.Other, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:21,代码来源:test_smarty.py

示例10: test_function_call

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_function_call(lexer):
    fragment = u'selected("Sound", i+(a*b))\n'
    tokens = [
        (Token.Name.Function, u'selected'),
        (Token.Punctuation, u'('),
        (Token.Literal.String, u'"'),
        (Token.Literal.String, u'Sound'),
        (Token.Literal.String, u'"'),
        (Token.Punctuation, u','),
        (Token.Text, u' '),
        (Token.Text, u'i'),
        (Token.Operator, u'+'),
        (Token.Text, u'('),
        (Token.Text, u'a'),
        (Token.Operator, u'*'),
        (Token.Text, u'b'),
        (Token.Text, u')'),
        (Token.Punctuation, u')'),
        (Token.Text, u'\n'),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:23,代码来源:test_praat.py

示例11: test_inner_class

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def test_inner_class(lexer):
    fragment = "class InnerClass:\n\tvar a = 5"
    tokens = [
        (Token.Keyword, "class"),
        (Token.Text, " "),
        (Token.Name, "InnerClass"),
        (Token.Punctuation, ":"),
        (Token.Text, "\n"),
        (Token.Text, "\t"),
        (Token.Keyword, "var"),
        (Token.Text, " "),
        (Token.Name, "a"),
        (Token.Text, " "),
        (Token.Operator, "="),
        (Token.Text, " "),
        (Token.Literal.Number.Integer, "5"),
        (Token.Text, "\n"),
    ]
    assert list(lexer.get_tokens(fragment)) == tokens 
开发者ID:pygments,项目名称:pygments,代码行数:21,代码来源:test_gdscript.py

示例12: get_tokens_unprocessed

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def get_tokens_unprocessed(self, text):
        for index, token, value in super().get_tokens_unprocessed(text):
            if token is Token.Name.Variable and value in self.EXTRA_BUILTINS:
                yield index, Token.Name.Builtin, value
            elif token is Token.Name.Variable and value in self.EXTRA_KEYWORDS:
                yield index, Token.Keyword.Type, value
            elif token is Token.Punctuation:
                for c in value:
                    if c in "+-*/%^&":
                        yield index, Token.Operator, c
                    else:
                        yield index, token, c
                    index += 1
            else:
                yield index, token, value 
开发者ID:wapiflapi,项目名称:gxf,代码行数:17,代码来源:disassembly.py

示例13: filter

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def filter(self, lexer, stream):
        maybe = False
        for ttype, value in stream:
            if maybe:
                maybe = False
                if ttype is Token.Operator and value == "+":
                    yield Token.Name.Variable, self._current_function
            elif ttype is Token.Operator and value == "<":
                maybe = True
            yield ttype, value 
开发者ID:wapiflapi,项目名称:gxf,代码行数:12,代码来源:disassembly.py

示例14: _convert_intel

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def _convert_intel(self, tokens):

        relative = False
        ctokens = []

        for t, v in tokens:

            if v == "[":

                ctokens.append((Token.Operator, "*"))
                ctokens.append((Token.Punctuation, "("))

                # should convert types to cast here.
                ctokens.append((Token.Keyword.Type, "void"))
                ctokens.append((Token.Operator, "*"))

                ctokens.append((Token.Operator, "*"))
                ctokens.append((Token.Punctuation, ")"))
                ctokens.append((Token.Punctuation, "("))

            elif v == "]":
                ctokens.append((Token.Punctuation, ")"))

            elif t in (Token.Name.Variable, Token.Name.Builtin):
                if v in ("eip", "rip", "pc"):
                    ctokens.append((Token.Punctuation, "("))
                    ctokens.append((t, "$%s" % v))
                    ctokens.append((Token.Operator, "+"))
                    ctokens.append((Token.Literal.Number, "%d" % self.length))
                    ctokens.append((Token.Punctuation, ")"))
                else:
                    ctokens.append((t, "$%s" % v))

            else:
                ctokens.append((t, v))

        return ctokens, relative 
开发者ID:wapiflapi,项目名称:gxf,代码行数:39,代码来源:disassembly.py

示例15: get_expression

# 需要导入模块: from pygments.token import Token [as 别名]
# 或者: from pygments.token.Token import Operator [as 别名]
def get_expression(self):

        types = []
        tokens = []

        start = self.instidx
        while self.tokens[start][0] is not Token.Text:
            start += 1

        for t, v in self.tokens[start:]:
            if tokens and t is Token.Text:
                break
            if t is Token.Text:
                pass
            elif t is Token.Keyword.Type:
                types.append((t, v))
            else:
                tokens.append((t, v))

        if disassemblyflavor.value == "intel":
            ctokens, relative = self._convert_intel(tokens)
        elif disassemblyflavor.value == "att":
            ctokens, relative = self._convert_att(tokens)
        else:
            assert False, "not intel or att."

        if relative:
            if not len(ctokens) == 1 and ctokens[0][0] is Token.Number.Integer:

                ctokens = [(Token.Name.Variable, "$pc"),
                           (Token.Operator, "+")
                           ] + ctokens

        return gxf.Expression(ctokens) 
开发者ID:wapiflapi,项目名称:gxf,代码行数:36,代码来源:disassembly.py


注:本文中的pygments.token.Token.Operator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。