当前位置: 首页>>代码示例>>Python>>正文


Python Lexer.lex方法代码示例

本文整理汇总了Python中lexer.Lexer.lex方法的典型用法代码示例。如果您正苦于以下问题:Python Lexer.lex方法的具体用法?Python Lexer.lex怎么用?Python Lexer.lex使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lexer.Lexer的用法示例。


在下文中一共展示了Lexer.lex方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_lexer_extracts_single_integer

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
def test_lexer_extracts_single_integer():
    text = "11"

    l = Lexer()
    l.lex(text)

    assert l.get_next_token().value == 11
开发者ID:MrLokans,项目名称:SimpleInterpreter,代码行数:9,代码来源:test_lexer.py

示例2: main

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
def main():
	if(len(sys.argv)>1):
		fname=sys.argv[1]
		lexer = Lexer(fname)
	        parser = pg.build()
		mypar=parser.parse(lexer.lex())
	        recprint(mypar,0)

	else:
		while(1):
			lexer=Lexer("hw.scala")
			parser=pg.build()
			mypar=parser.parse(lexer.lex(raw_input("scaladoll> ")))
#			print mypar
			recprint(mypar,0)
开发者ID:tvanicraath,项目名称:scaladoll,代码行数:17,代码来源:parser_bkp.py

示例3: __init__

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
	def __init__(self,afterparse,debug):
		parser=self.pg.build()
	        if(len(sys.argv)>1):
        	        fname=sys.argv[1]
                	lexer = Lexer(fname,debug)
        	        mypar=parser.parse(lexer.lex())
                	if(debug):	self.recprint(mypar,0)
			afterparse(mypar)
		
		else:
	                while(1):
        	                lexer=Lexer(None,debug)
	                        mypar=parser.parse(lexer.lex(self.readfromprompt()))
        	                if(debug):	self.recprint(mypar,0)
				afterparse(mypar)
开发者ID:tvanicraath,项目名称:scaladoll,代码行数:17,代码来源:parser_bkp2.py

示例4: __init__

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
	def __init__(self,afterparse,debug):
		parser=self.pg.build()
	        if(len(sys.argv)>1):
        	        fname=sys.argv[1]
                	lexer = Lexer(fname,debug)
        	        mypar=parser.parse(lexer.lex())
                	if(debug):	self.recprint(mypar,0)
			afterparse(mypar)
		
		else:
			oldlines=""
	                while(1):
        	                lexer=Lexer(None,debug)
				newline=self.readfromprompt()
				if(newline=="CLEAR"):
					oldlines=""
					os.system('clear')
					continue
				if(newline=="EXIT"):
					return
				oldlines=oldlines+newline
	                        mypar=parser.parse(lexer.lex(oldlines))
        	                if(debug):	self.recprint(mypar,0)
				afterparse(mypar)
开发者ID:tvanicraath,项目名称:scaladoll,代码行数:26,代码来源:parser.py

示例5: eval

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
    def eval(self, line):
        if line.startswith("save "):
            filename = line.replace("save ", "")
            self.save(filename)
            return

        if line.startswith("load "):
            filename = line.replace("load ", "")
            self.load(filename)
            return

        tokens = Lexer.lex(line)
        try:
            parse_tree = Parser(tokens).parse()
            if parse_tree:
                output = self.context.evaluate(parse_tree)
                if output:
                    print(Fore.GREEN + output)

        except MatlabetteError as e:
            print(Fore.RED)
            print(" Error: " + e.message)
            print()
开发者ID:andela-hthuo,项目名称:bc-6-matlabette,代码行数:25,代码来源:repl.py

示例6: Lexer

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
from lexer import Lexer
string = 'if bob then 0  else "bob1" '

l = Lexer()
result = l.lex(string)
for x in result:
    print(x)
开发者ID:macmata,项目名称:mini-lexer-full-lazer,代码行数:9,代码来源:test.py

示例7: Parser

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
class Parser(object):

    def __init__(self, code, whitespaces=False):
        self.lexer = Lexer(code)
        self.lexer.lex()
        self.curtok = 0
        self.start_symbol = None
        self.rules = {}
        self.whitespaces = whitespaces

    def __repr__(self):
        s = []
        for r in self.rules:
            s.append(r.__repr__())
        return "\n".join(s)

    def parse(self):
        while self.curtok < len(self.lexer.tokens):
            rule = self.parse_rule()

            if not self.start_symbol:
                self.start_symbol = rule.symbol

            self.rules[rule.symbol] = rule
            self.transform_ebnf(rule)

        # add whitespace rule
        if self.whitespaces:
            ws_rule = Rule()
            ws_rule.symbol = Nonterminal("WS")
            ws_rule.add_alternative([Terminal("<ws>", "^"), Nonterminal("WS", "^")])
            ws_rule.add_alternative([Terminal("<return>", "^"), Nonterminal("WS", "^")])
            ws_rule.add_alternative([]) # or empty
            self.rules[ws_rule.symbol] = ws_rule

            self.start_symbol.folding = "^^"
            # allow whitespace/comments at beginning of file
            start_rule = Rule()
            start_rule.symbol = Nonterminal("Startrule")
            start_rule.add_alternative([Nonterminal("WS", "^"), self.start_symbol])
            self.rules[start_rule.symbol] = start_rule
            self.start_symbol = start_rule.symbol

    def transform_ebnf(self, original_rule):
        # XXX can be made faster by setting a flag if there is a ebnf token
        # in the rule or not (can be done in first parse)
        new_rules = []
        for a in original_rule.alternatives:
            symbols = []
            i = 0
            for s in a:
                if isinstance(s, ExtendedSymbol):
                    if s.name == "loop":
                        # Example: A ::= a {b} c
                        remaining_tokens = a[i+1:] # [c]
                        loop_symbol = Nonterminal("%s_loop" % (original_rule.symbol.name,))
                        a[i:] = [loop_symbol] # A ::= a A_loop

                        newrule = Rule()
                        newrule.symbol = loop_symbol
                        newrule.add_alternative(s.children + [loop_symbol]) # A_loop ::= b A_loop
                        newrule.add_alternative(remaining_tokens)           #          | c (or epsilon)
                        new_rules.append(newrule)
                    if s.name == "option":
                        # Example: A ::= a [b] c
                        remaining_tokens = a[i+1:] # [c]
                        option_symbol = Nonterminal("%s_option" % (original_rule.symbol.name,))
                        a[i:] = [option_symbol] # A ::= a A_option

                        newrule = Rule()
                        newrule.symbol = option_symbol
                        newrule.add_alternative(s.children + remaining_tokens) # A_option ::= b c
                        newrule.add_alternative(remaining_tokens)              #            | c
                        new_rules.append(newrule)
                    if s.name == "group":
                        # Example: A ::= a [b | c] d
                        remaining_tokens = a[i+1:] # [c]
                        group1_symbol = Nonterminal("%s_group1" % (original_rule.symbol.name,))
                        group2_symbol = Nonterminal("%s_group2" % (original_rule.symbol.name,))
                        a[i:] = [group1_symbol] # A ::= a A_group

                        newrule = Rule()
                        newrule.symbol = group1_symbol
                        for c in s.children:
                            newrule.add_alternative([c, group2_symbol]) # A_option ::= b A_option2 | c A_option2
                        new_rules.append(newrule)

                        newrule = Rule()
                        newrule.symbol = group2_symbol
                        newrule.add_alternative(remaining_tokens)              # A_option2 ::= d
                        new_rules.append(newrule)
                i += 1
        for rule in new_rules:
            self.rules[rule.symbol] = rule
            self.transform_ebnf(rule)

    def inc(self):
        self.curtok += 1

    def next_token(self):
#.........这里部分代码省略.........
开发者ID:snim2,项目名称:eco,代码行数:103,代码来源:gparser.py

示例8: Tokenizer

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
class Tokenizer(object):
    def __init__(self, stmt):
        self.ll = Lexer(stmt)
        self.tokens = []
        self._get_tokens()
        self.idx = 0

    def pop(self):
        try:
            token = self.tokens[self.idx]
            self.idx += 1
            return token
        except IndexError:
            return None

    def top(self):
        try:
            token = self.tokens[self.idx]
            return token
        except IndexError:
            return None

    def peek(self):
        try:
            token = self.tokens[self.idx + 1]
            return token
        except IndexError:
            return None

    # List of types
    # Operators And precedence rules
    patterns = [
        ('uop', re.compile("^(!)$"), PR_UOP),  # NOT Unary Operator
        ('lparen', re.compile("^(\()$"), PR_LPAREN),  # Left paren
        ('rparen', re.compile("^(\))$"), PR_RPAREN),  # Right paren
        ('bop', re.compile(
            "(and)$", re.IGNORECASE), PR_AND),  # AND Binary Operator
        ('bop', re.compile("(or)$", re.IGNORECASE), PR_OR),
        # OR Binary Operator
        ('term', re.compile("^(.*)$"), PR_TERM)  # Everything else is a term
    ]

    def n(self):
        term = self.ll.lex()
        col = self.ll.pos
        if not term:
            return None
        for type_, pattern, precedence in self.patterns:
            match = pattern.match(term)
            if match:
                if type_ == 'uop' or type_ == 'bop':
                    opr = match.groups(1)[0]
                    return Token(type_, opr.upper(), precedence, col)
                if type_ in ('lparen', 'rparen'):
                    paren = match.groups(1)[0]
                    return Token(type_, paren, precedence, col)
                if type_ == 'term':
                    term = match.groups(1)[0]
                    return Token(type_, term, precedence, col)

    def _get_tokens(self):
        first = True
        while True:
            token = self.n()
            if not token:
                self.tokens.append(None)
                break
            else:
                self.tokens.append(token)

        def add_ands(tokens):
            # When no operator exists we need to put an AND.
            new_tokens = []
            seen_term = False
            i = 0
            while i < (len(tokens) - 1):
                cur = tokens[i]
                nxt = tokens[i + 1]
                if not nxt:
                    new_tokens.append(cur)
                    break

                if ((cur.type_ == 'Term' and nxt.type_ == 'Term')
                    or (cur.type_ == 'Term' and nxt.type_ == 'Lparen')
                        or (cur.type_ == 'Rparen' and nxt.type_ == 'Term')):
                    new_tokens.append(cur)
                    new_tokens.append(Token('Bop', 'AND', PR_AND, None))
                    i += 1
                else:
                    new_tokens.append(cur)
                    i += 1
            return new_tokens

        def add_parens(tokens):
            new_tokens = []
            for token in self.tokens:
                if token.precedence > -1:
                    if token.type_ == 'Lparen':
                        for i in xrange(token.precedence):
                            new_tokens.append(Token('Lparen', '(',
#.........这里部分代码省略.........
开发者ID:Ziaunys,项目名称:chili,代码行数:103,代码来源:tokenizer.py

示例9: expr_op

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
    return p[0]

@pg.production("expr : expr PLUS expr")
@pg.production("expr : expr MINUS expr")
def expr_op(p):
    lhs = p[0].getint()
    rhs = p[2].getint()
    if p[1].gettokentype() == "PLUS":
        return BoxInt(lhs + rhs)
    elif p[1].gettokentype() == "MINUS":
        return BoxInt(lhs - rhs)
    else:
        raise AssertionError("This is impossible, abort the time machine!")

@pg.production("expr : NUMBER")
def expr_num(p):
    return BoxInt(int(p[0].getstr()))

lexer = Lexer("rac")
parser = pg.build()

class BoxInt(BaseBox):
    def __init__(self, value):
        self.value = value

    def getint(self):
        return self.value

mypar=parser.parse(lexer.lex("7"))
print mypar.getint()
开发者ID:tvanicraath,项目名称:scaladoll,代码行数:32,代码来源:new.py

示例10: Output

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
    blocking_factor = '10'
    format_code = '1'
    destination_name = 'ABCDEFGHIJKLMNOPQRSTUVW'
    origin_name = 'ABCDEFGHIJKLMNOPQRSTUVW'
    reference_code = 'ABCDEFGH'

    out = Output()
    lexer = Lexer(initial_state=Main, output=out)

    in_ = (record_type + priority_code + immediate_destination +
           immediate_origin + transmission_date + transmission_time +
           file_id_modifier + record_size + blocking_factor + format_code +
           destination_name + origin_name + reference_code)

    print in_, len(in_)
    lexer.lex(in_)
    g = NachaGrammar()
    tokens = out.tokens #filter(lambda _: not isinstance(_, (Whitespace, NewLine, Comment)), out.tokens)
    a = g.derive(tokens)
    if a.is_matchable():
        print a.ast()
    else:
        print "Error"


        # ('1', RecordTypeCode),
        # ('##', PriorityCode),
        # ('#' * 10, ImmediateDestination),
        # ('#' * 10, ImmediateOrigin),
        # ('#' * 6, TransmissionDate),
        # ('#' * 4, TransmissionTime),
开发者ID:selkhateeb,项目名称:syntax,代码行数:33,代码来源:nacha.py

示例11: test_lexer_extracts_signs

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
def test_lexer_extracts_signs(test_input, expected_value):
    l = Lexer()
    l.lex(test_input)
    assert l.get_next_token().value == expected_value
开发者ID:MrLokans,项目名称:SimpleInterpreter,代码行数:6,代码来源:test_lexer.py

示例12: test_lexer_extracts_integer_with_spaces

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import lex [as 别名]
def test_lexer_extracts_integer_with_spaces():
    text = "  11 "

    l = Lexer()
    l.lex(text)
    assert l.get_next_token().value == 11
开发者ID:MrLokans,项目名称:SimpleInterpreter,代码行数:8,代码来源:test_lexer.py


注:本文中的lexer.Lexer.lex方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。