本文整理汇总了Python中tokenizer.Tokenizer.parse方法的典型用法代码示例。如果您正苦于以下问题:Python Tokenizer.parse方法的具体用法?Python Tokenizer.parse怎么用?Python Tokenizer.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tokenizer.Tokenizer
的用法示例。
在下文中一共展示了Tokenizer.parse方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: interpret_line
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def interpret_line(self, line):
tokenizer = Tokenizer()
tokenizer.parse(line)
first_token = tokenizer.getNextToken()
if first_token.type == Token.NUMBER:
self.lines[int(first_token.value)] = tokenizer.prog[tokenizer.pos:]
self.sort_lines()
else:
self.run_line(line)
示例2: test_ast_opts
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def test_ast_opts(self):
a = AST()
t = Tokenizer()
opts = {}
opts['get-me'] = 'I am superman'
a.parse(t.parse('{{ opts.get("get-me") }}'))
c = a.traverse(opts=opts)
self.assertEqual(c.buffer, 'I am superman')
a.parse(t.parse('{@ if opts.get("get-me"): @}I am superman{@ end @}'))
c = a.traverse(opts=opts)
self.assertEqual(c.buffer, 'I am superman')
示例3: test_ast_expr
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def test_ast_expr(self):
t = Tokenizer()
a = AST()
a.parse(t.parse('{@ 1 + 2 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 3)
a.parse(t.parse('{@ 2 - 1 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('{@ 2 * 3 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 6)
a.parse(t.parse('{@ 4 / 2 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 2)
a.parse(t.parse('{@ 1 + 2 * 3 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 7)
a.parse(t.parse('{@ 1 + 2 * 3 / 2 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 4)
a.parse(t.parse('{@ (1 + 2) * 3 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 9)
with self.assertRaises(AST.SyntaxError):
a.parse(t.parse('{@ (1 + 2 @}'))
a.parse(t.parse('{@ v = 1 + 2 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 3)
self.assertEqual(c.syms['v'], 3)
a.parse(t.parse('''{@
a = 1 + 2
v = a + 3
@}'''))
c = a.traverse()
self.assertEqual(c.last_expr_val, 6)
self.assertEqual(c.syms['v'], 6)
示例4: test_ast_basic
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def test_ast_basic(self):
t = Tokenizer()
a = AST()
a.parse(t.parse(''))
self.assertEqual(a.root, None)
a.parse(t.parse('abc'))
c = a.traverse()
self.assertEqual(a.root.text_block.text, 'abc')
a.parse(t.parse('{@'))
a.parse(t.parse('{@ v = "v"'))
with self.assertRaises(AST.SyntaxError):
a.parse(t.parse('{@ 1: @}'))
示例5: main
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def main():
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
try:
opts = create_opts()
src = sys.stdin.read()
tokenizer = Tokenizer()
tokens = tokenizer.parse(src)
ast = AST()
ast.parse(tokens)
context = ast.traverse(opts=opts)
print(context.buffer)
except Tokenizer.ParseError as e:
print(e)
sys.exit(1)
except AST.SyntaxError as e:
print(e)
sys.exit(2)
sys.exit(0)
示例6: test_ast_import
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def test_ast_import(self):
t = Tokenizer()
a = AST()
with self.assertRaises(AST.SyntaxError):
a.parse(t.parse('{@ import @}'))
a.parse(t.parse('{@ import alias'))
c = a.traverse()
self.assertEqual(c.imported_alias, True)
a.parse(t.parse('{@ import alias @}'))
c = a.traverse()
self.assertEqual(c.imported_alias, True)
a.parse(t.parse('aaa{@ import alias @}bbb{@ import config @}ccc'))
c = a.traverse()
self.assertEqual(c.imported_alias, True)
self.assertEqual(c.imported_config, True)
a.parse(t.parse('{@ import alias @}{@ import config @}'))
c = a.traverse()
self.assertEqual(c.imported_alias, True)
self.assertEqual(c.imported_config, True)
a.parse(t.parse('''{@
import alias
alias.set("dtl", "run bin/date-line/date-line.py")
@}'''))
c = a.traverse()
self.assertEqual(c.alias_map['dtl'], 'run bin/date-line/date-line.py')
a.parse(t.parse('''{@
import config
config.set("editor", "subl")
@}'''))
c = a.traverse()
self.assertEqual(c.config_map['editor'], 'subl')
示例7: run_line
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def run_line(self, line):
tokenizer = Tokenizer()
tokenizer.parse(line)
self.execute_statement(tokenizer)
示例8: test_tokenizer
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def test_tokenizer(self):
t = Tokenizer()
ts = t.parse('')
self.assertEqual(len(ts), 0)
ts = t.parse('abc+/%123')
self.assertEqual(len(ts), 1)
self.assertEqual(ts[0].kind, 'text-block')
self.assertEqual(ts[0].value, 'abc+/%123')
ts = t.parse('{@')
self.assertEqual(len(ts), 1)
self.assertEqual(ts[0].kind, 'lbraceat')
self.assertEqual(ts[0].value, '{@')
ts = t.parse('{@@}')
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0].kind, 'lbraceat')
self.assertEqual(ts[0].value, '{@')
self.assertEqual(ts[1].kind, 'rbraceat')
self.assertEqual(ts[1].value, '@}')
ts = t.parse('{{')
self.assertEqual(len(ts), 1)
self.assertEqual(ts[0].kind, 'ldbrace')
self.assertEqual(ts[0].value, '{{')
ts = t.parse('{{}}')
self.assertEqual(len(ts), 2)
self.assertEqual(ts[0].kind, 'ldbrace')
self.assertEqual(ts[0].value, '{{')
self.assertEqual(ts[1].kind, 'rdbrace')
self.assertEqual(ts[1].value, '}}')
ts = t.parse('{{ a }}')
self.assertEqual(len(ts), 3)
self.assertEqual(ts[0].kind, 'ldbrace')
self.assertEqual(ts[0].value, '{{')
self.assertEqual(ts[1].kind, 'identifier')
self.assertEqual(ts[1].value, 'a')
self.assertEqual(ts[2].kind, 'rdbrace')
self.assertEqual(ts[2].value, '}}')
ts = t.parse('{{ opts.get("") }}')
self.assertEqual(len(ts), 8)
self.assertEqual(ts[0].kind, 'ldbrace')
self.assertEqual(ts[0].value, '{{')
self.assertEqual(ts[1].kind, 'identifier')
self.assertEqual(ts[1].value, 'opts')
self.assertEqual(ts[2].kind, 'operator')
self.assertEqual(ts[2].value, '.')
self.assertEqual(ts[3].kind, 'identifier')
self.assertEqual(ts[3].value, 'get')
self.assertEqual(ts[4].kind, 'lparen')
self.assertEqual(ts[4].value, '(')
self.assertEqual(ts[5].kind, 'string')
self.assertEqual(ts[5].value, '')
self.assertEqual(ts[6].kind, 'rparen')
self.assertEqual(ts[6].value, ')')
self.assertEqual(ts[7].kind, 'rdbrace')
self.assertEqual(ts[7].value, '}}')
ts = t.parse('aaa{@[email protected]}ccc{@[email protected]}eee')
self.assertEqual(len(ts), 9)
self.assertEqual(ts[0].kind, 'text-block')
self.assertEqual(ts[0].value, 'aaa')
self.assertEqual(ts[1].kind, 'lbraceat')
self.assertEqual(ts[1].value, '{@')
self.assertEqual(ts[2].kind, 'identifier')
self.assertEqual(ts[2].value, 'bbb')
self.assertEqual(ts[3].kind, 'rbraceat')
self.assertEqual(ts[3].value, '@}')
self.assertEqual(ts[4].kind, 'text-block')
self.assertEqual(ts[4].value, 'ccc')
self.assertEqual(ts[5].kind, 'lbraceat')
self.assertEqual(ts[5].value, '{@')
self.assertEqual(ts[6].kind, 'identifier')
self.assertEqual(ts[6].value, 'ddd')
self.assertEqual(ts[7].kind, 'rbraceat')
self.assertEqual(ts[7].value, '@}')
self.assertEqual(ts[8].kind, 'text-block')
self.assertEqual(ts[8].value, 'eee')
with self.assertRaises(Tokenizer.ParseError):
t.parse('{@@@}')
ts = t.parse('{@ . @}')
self.assertEqual(len(ts), 3)
self.assertEqual(ts[0].kind, 'lbraceat')
self.assertEqual(ts[0].value, '{@')
self.assertEqual(ts[1].kind, 'operator')
self.assertEqual(ts[1].value, '.')
self.assertEqual(ts[2].kind, 'rbraceat')
self.assertEqual(ts[2].value, '@}')
ts = t.parse('{@ , @}')
self.assertEqual(len(ts), 3)
self.assertEqual(ts[0].kind, 'lbraceat')
self.assertEqual(ts[0].value, '{@')
#.........这里部分代码省略.........
示例9: test_ast_if
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def test_ast_if(self):
t = Tokenizer()
a = AST()
a.parse(t.parse('{@ if 1: @}abc{@ end @}'))
c = a.traverse()
self.assertEqual(c.buffer, 'abc')
with self.assertRaises(AST.SyntaxError):
a.parse(t.parse('{@ if 1 @}{@ end @}'))
with self.assertRaises(AST.SyntaxError):
a.parse(t.parse('{@ if @}{@ end @}'))
with self.assertRaises(AST.SyntaxError):
a.parse(t.parse('{@ if 1: @}{@ @}'))
a.parse(t.parse('{@ if 1: v = "v" end @}'))
c = a.traverse()
self.assertEqual(c.syms['v'], 'v')
a.parse(t.parse('{@ if 0: v = "v" else: v = "v2" end @}'))
c = a.traverse()
self.assertEqual(c.syms['v'], 'v2')
a.parse(t.parse('{@ if 0: v = "v" elif 1: v = "v2" end @}'))
c = a.traverse()
self.assertEqual(c.syms['v'], 'v2')
a.parse(t.parse('{@ if 0: v = "v" elif 0: v = "v2" else: v = "v3" end @}'))
c = a.traverse()
self.assertEqual(c.syms['v'], 'v3')
a.parse(t.parse('''{@
if 1:
v = "s"
end
@}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 's')
a.parse(t.parse('''{@
if 1:
v = "a"
elif 2:
v = "b"
else:
v = "c"
end
@}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'a')
a.parse(t.parse('''{@
if 1:
if 2:
v = "a"
end
end
@}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'a')
a.parse(t.parse('''{@
if 0:
else:
if 2:
v = "abc"
end
end
@}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'abc')
a.parse(t.parse('''{@ if 1: @}{@ end @}'''))
c = a.traverse()
a.parse(t.parse('''{@ if 0: @}{@ elif 1: @}{@ v = "a" @}{@ end @}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'a')
a.parse(t.parse('''{@ if 0: @}{@ elif 0: @}{@ else: @}{@ v = "a" @}{@ end @}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'a')
a.parse(t.parse('''{@ v = "a" @}{@ if 1: @}{@ if 2: @}{{ v }}{@ end @}{@ end @}bbb'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'a')
self.assertEqual(c.buffer, "abbb")
a.parse(t.parse('''{@ v = "a" @}{@ if 1: @}{{ v }}{{ v }}{@ end @}'''))
a.parse(t.parse('''{@
v = "cat"
if 1:
@}{{ v }}{@
end
if 1:
end
@}'''))
#.........这里部分代码省略.........
示例10: test_ast_comparison
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def test_ast_comparison(self):
t = Tokenizer()
a = AST()
a.parse(t.parse('{@ 0 == 0 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('{@ 0 != 0 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
a.parse(t.parse('{@ 1 > 0 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('{@ 1 < 0 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
a.parse(t.parse('{@ 1 >= 0 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('{@ 1 <= 0 @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
a.parse(t.parse('''{@
v = 0
v == 0
@}'''))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('''{@
lhs = 0
rhs = 0
lhs == rhs
@}'''))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('''{@
lhs = "a"
rhs = 0
lhs == rhs
@}'''))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
a.parse(t.parse('''{@
lhs = 0
rhs = "a"
lhs == rhs
@}'''))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
a.parse(t.parse('{@ "a" == "b" @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
a.parse(t.parse('{@ "a" != "b" @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('{@ "a" < "b" @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('{@ "a" > "b" @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
a.parse(t.parse('{@ "a" <= "b" @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 1)
a.parse(t.parse('{@ "a" >= "b" @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
"""
この式はPythonではTrueになる
CではFalseだ
PHP,Rubyではパースエラーになる
== 演算子の結果が bool(または int)であることを考えればこの式の結果は False になるべきだという印象を受ける
しかし、ぱっと見た感じでは True が正しいようにも見える
Cap ではこれは実装上の簡易さから False として扱う
"""
a.parse(t.parse('{@ "a" == "a" == "a" @}'))
c = a.traverse()
self.assertEqual(c.last_expr_val, 0)
示例11: test_ast_assign
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
def test_ast_assign(self):
t = Tokenizer()
a = AST()
a.parse(t.parse('''{@
a = "s"
@}'''))
c = a.traverse()
self.assertEqual(c.syms['a'], 's')
a.parse(t.parse('''{@
a = "s"
@}{{ a }}'''))
c = a.traverse()
self.assertEqual(c.syms['a'], 's')
a.parse(t.parse('''{@
v = "v"
v = ""
@}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], '')
a.parse(t.parse('''{@
v = "v"
v = "v2"
@}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'v2')
a.parse(t.parse('''{@
v = "v"
v = ""
v = "v2"
@}'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'v2')
a.parse(t.parse('''{@ v = "v" @}
{@ v = "" @}
'''))
c = a.traverse()
self.assertEqual(c.syms['v'], '')
a.parse(t.parse('''{@ v = "v" @}
{@ v = "" @}
{@ v = "v2" @}
'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'v2')
a.parse(t.parse('''{@ v = "" @}
{@ v = "v" @}
'''))
c = a.traverse()
self.assertEqual(c.syms['v'], 'v')
a.parse(t.parse('{@ v = 1 + 2 @}'))
c = a.traverse()
self.assertEqual(c.syms['v'], 3)
a.parse(t.parse('{@ v = v = 1 @}'))
c = a.traverse()
self.assertEqual(c.syms['v'], 1)
a.parse(t.parse('{@ v = 1 + 2 * 3 @}'))
c = a.traverse()
self.assertEqual(c.syms['v'], 7)
a.parse(t.parse('{@ v = (1 + 2) * 3 @}'))
c = a.traverse()
self.assertEqual(c.syms['v'], 9)
a.parse(t.parse('{@ v = opts.get("a") @}'))
c = a.traverse(opts={ 'a': 'b' })
self.assertEqual(c.syms['v'], "b")
示例12: TestParser
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
class TestParser(TestCase):
def setUp(self):
self.tokenizer = Tokenizer()
self.parser = Parser({})
def test_match_var_list(self):
self.tokenizer.parse('A, B, C')
self.assertEqual(['A', 'B', 'C'], self.parser.match_var_list(self.tokenizer))
def test_match_relop(self):
self.tokenizer.parse('2 > 1')
self.assertEqual(1, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 < 1')
self.assertEqual(0, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 <= 2')
self.assertEqual(1, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 <= 1')
self.assertEqual(0, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 >= 2')
self.assertEqual(1, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 >= 3')
self.assertEqual(0, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 = 2')
self.assertEqual(1, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 = 3')
self.assertEqual(0, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 <> 3')
self.assertEqual(1, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 <> 2')
self.assertEqual(0, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 >< 3')
self.assertEqual(1, self.parser.match_relop(self.tokenizer))
self.tokenizer.parse('2 >< 2')
self.assertEqual(0, self.parser.match_relop(self.tokenizer))
def test_match_expression_list(self):
self.tokenizer.parse('2+3+2*2, 1+2, 3, "abcd"')
self.assertEqual([9,3,3, 'abcd'], self.parser.match_expression_list(self.tokenizer))
def test_match_expression(self):
self.tokenizer.parse('2+3+2*2')
self.assertEqual(9, self.parser.match_expression(self.tokenizer))
def test_match_term(self):
self.tokenizer.parse('2*3*4')
self.assertEqual(24, self.parser.match_term(self.tokenizer))
def test_match_factor(self):
self.tokenizer.parse('123')
self.assertEqual(123, self.parser.match_factor(self.tokenizer))
self.parser._variables['A'] = 456
self.tokenizer.parse('A')
self.assertEqual(456, self.parser.match_factor(self.tokenizer))
def test_match_brackets(self):
self.tokenizer.parse('(1 + 2) * (3 + 5)')
self.assertEqual(24, self.parser.match_expression(self.tokenizer))
def test_left_associativity(self):
self.tokenizer.parse('10-2+2')
self.assertEqual(10, self.parser.match_term(self.tokenizer))
示例13: TestTokeniser
# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import parse [as 别名]
class TestTokeniser(TestCase):
def setUp(self):
self.tokenizer = Tokenizer()
def test_number(self):
self.tokenizer.parse('100')
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(100, token.value)
def test_string(self):
self.tokenizer.parse('"ABC"')
token = self.tokenizer.getNextToken()
self.assertEqual(token.STRING, token.type)
self.assertEqual('ABC', token.value)
def test_peek_token(self):
self.tokenizer.parse('1+2')
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(1, token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.OPERATOR, token.type)
self.assertEqual('+', token.value)
token = self.tokenizer.peekNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(2, token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(2, token.value)
def test_operator(self):
self.tokenizer.parse('1+2-4')
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(1, token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.OPERATOR, token.type)
self.assertEqual('+', token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(2, token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.OPERATOR, token.type)
self.assertEqual('-', token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(4, token.value)
def test_operator2(self):
self.tokenizer.parse('"A"+"B"')
token = self.tokenizer.getNextToken()
self.assertEqual(token.STRING, token.type)
self.assertEqual('A', token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.OPERATOR, token.type)
self.assertEqual('+', token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.STRING, token.type)
self.assertEqual('B', token.value)
def test_mult_operator(self):
self.tokenizer.parse('2*3')
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(2, token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.MULTOPERATOR, token.type)
self.assertEqual('*', token.value)
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(3, token.value)
def test_mult_operator2(self):
self.tokenizer.parse('2/3')
token = self.tokenizer.getNextToken()
self.assertEqual(token.NUMBER, token.type)
self.assertEqual(2, token.value)
token = self.tokenizer.getNextToken()
#.........这里部分代码省略.........