本文整理汇总了Python中pygments.token.Operator方法的典型用法代码示例。如果您正苦于以下问题:Python token.Operator方法的具体用法?Python token.Operator怎么用?Python token.Operator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pygments.token
的用法示例。
在下文中一共展示了token.Operator方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_gcd_expr
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_gcd_expr(lexer):
fragment = u'1^3+(5-5)*gcd(a,b)\n'
tokens = [
(Token.Number.Integer, u'1'),
(Token.Operator, u'^'),
(Token.Literal.Number.Integer, u'3'),
(Token.Operator, u'+'),
(Token.Punctuation, u'('),
(Token.Literal.Number.Integer, u'5'),
(Token.Operator, u'-'),
(Token.Literal.Number.Integer, u'5'),
(Token.Punctuation, u')'),
(Token.Operator, u'*'),
(Token.Name, u'gcd'),
(Token.Punctuation, u'('),
(Token.Name, u'a'),
(Token.Operator, u','),
(Token.Name, u'b'),
(Token.Punctuation, u')'),
(Token.Text, u'\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
示例2: test_escaped_bracestring
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_escaped_bracestring(lexer):
fragment = u'str.gsub(%r{\\\\\\\\}, "/")\n'
tokens = [
(Token.Name, u'str'),
(Token.Operator, u'.'),
(Token.Name, u'gsub'),
(Token.Punctuation, u'('),
(Token.Literal.String.Regex, u'%r{'),
(Token.Literal.String.Regex, u'\\\\'),
(Token.Literal.String.Regex, u'\\\\'),
(Token.Literal.String.Regex, u'}'),
(Token.Punctuation, u','),
(Token.Text, u' '),
(Token.Literal.String.Double, u'"'),
(Token.Literal.String.Double, u'/'),
(Token.Literal.String.Double, u'"'),
(Token.Punctuation, u')'),
(Token.Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例3: test_unquoted_querystring
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_unquoted_querystring(self):
self.assertEqual(self.get_tokens('`echo name`==john'), [
(Text, '`'),
(Name.Builtin, 'echo'),
(Text, 'name'),
(Text, '`'),
(Operator, '=='),
(String, 'john')
])
self.assertEqual(self.get_tokens('name==`echo john`'), [
(Name, 'name'),
(Operator, '=='),
(Text, '`'),
(Name.Builtin, 'echo'),
(Text, 'john'),
(Text, '`')
])
示例4: test_unquoted_bodystring
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_unquoted_bodystring(self):
self.assertEqual(self.get_tokens('`echo name`=john'), [
(Text, '`'),
(Name.Builtin, 'echo'),
(Text, 'name'),
(Text, '`'),
(Operator, '='),
(String, 'john')
])
self.assertEqual(self.get_tokens('name=`echo john`'), [
(Name, 'name'),
(Operator, '='),
(Text, '`'),
(Name.Builtin, 'echo'),
(Text, 'john'),
(Text, '`')
])
示例5: get_next_token
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def get_next_token(self, raw_tokens: List[RawToken]) -> Tuple[Token, int]:
# handle operators with multiple chars
for chars_count, operators in self.MULTI_CHARS_OPS:
if len(raw_tokens) <= chars_count:
continue
if not all(v[0] == token.Operator for v in raw_tokens[:chars_count]):
continue
value = "".join(v[1] for v in raw_tokens[:chars_count])
if value in operators:
return self.transform_raw_token((token.Operator, value)), chars_count
return super(JavaTokenizer, self).get_next_token(raw_tokens)
示例6: header_callback
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def header_callback(self, match):
if match.group(1).lower() == 'content-type':
content_type = match.group(5).strip()
if ';' in content_type:
content_type = content_type[:content_type.find(';')].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
示例7: header_callback
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def header_callback(self, match):
if match.group(1).lower() == "content-type":
content_type = match.group(5).strip()
if ";" in content_type:
content_type = content_type[: content_type.find(";")].strip()
self.content_type = content_type
yield match.start(1), Name.Attribute, match.group(1)
yield match.start(2), Text, match.group(2)
yield match.start(3), Operator, match.group(3)
yield match.start(4), Text, match.group(4)
yield match.start(5), Literal, match.group(5)
yield match.start(6), Text, match.group(6)
示例8: analyze
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def analyze(self, text):
if any([token is Error for token, value in self.get_tokens(text)]):
return 2 * (None, )
tokens, args, kwargs = self.get_tokens(text), [], {}
for token, value in tokens:
if token is Keyword:
token = token in ['true', 'True']
elif token is Number:
token = int(token)
if token in (Keyword, Number, String):
args.append(value)
if token is Name:
next(tokens) # pass the Operator '='
kwargs.update({value: next(tokens)[1]})
return args, kwargs
示例9: get_content_type_subtokens
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def get_content_type_subtokens(self, match):
yield match.start(1), Text, match.group(1)
yield match.start(2), Text.Whitespace, match.group(2)
yield match.start(3), Name.Attribute, match.group(3)
yield match.start(4), Operator, match.group(4)
yield match.start(5), String, match.group(5)
if match.group(3).lower() == "boundary":
boundary = match.group(5).strip()
if boundary[0] == '"' and boundary[-1] == '"':
boundary = boundary[1:-1]
self.boundary = boundary
示例10: test_sum
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_sum(lexer):
fragment = u'1+3\n'
tokens = [
(Number.Integer, u'1'),
(Operator, u'+'),
(Number.Integer, u'3'),
(Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例11: test_if_statement
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_if_statement(lexer):
fragment = u"""@( 0 > 3 ) ஆனால்
பதிப்பி "wont print"
முடி"""
tokens = [
(Token.Operator, u'@'),
(Token.Punctuation, u'('),
(Token.Text, u' '),
(Token.Literal.Number.Integer, u'0'),
(Token.Text, u' '),
(Token.Operator, u'>'),
(Token.Text, u' '),
(Token.Literal.Number.Integer, u'3'),
(Token.Text, u' '),
(Token.Punctuation, u')'),
(Token.Text, u' '),
(Token.Keyword, u'ஆனால்'),
(Token.Text, u'\n'),
(Token.Text, u' '),
(Token.Keyword, u'பதிப்பி'),
(Token.Text, u' '),
(Token.Literal.String, u'"wont print"'),
(Token.Text, u'\n'),
(Token.Keyword, u'முடி'),
(Token.Text, u'\n')
]
assert list(lexer.get_tokens(fragment)) == tokens
示例12: test_range_syntax1
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_range_syntax1(lexer):
fragment = u'1..3\n'
tokens = [
(Number.Integer, u'1'),
(Operator, u'..'),
(Number.Integer, u'3'),
(Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例13: test_range_syntax3
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_range_syntax3(lexer):
fragment = u'1 .. 3\n'
tokens = [
(Number.Integer, u'1'),
(Text, u' '),
(Operator, u'..'),
(Text, u' '),
(Number.Integer, u'3'),
(Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例14: test_operator_methods
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_operator_methods(lexer):
fragment = u'x.==4\n'
tokens = [
(Token.Name, u'x'),
(Token.Operator, u'.'),
(Token.Name.Operator, u'=='),
(Token.Literal.Number.Integer, u'4'),
(Token.Text, u'\n'),
]
assert list(lexer.get_tokens(fragment)) == tokens
示例15: test_querystring
# 需要导入模块: from pygments import token [as 别名]
# 或者: from pygments.token import Operator [as 别名]
def test_querystring(self):
self.assertEqual(self.get_tokens('foo==bar'), [
(Name, 'foo'),
(Operator, '=='),
(String, 'bar')
])