当前位置: 首页>>代码示例>>Python>>正文


Python Lexer.tokens方法代码示例

本文整理汇总了Python中lexer.Lexer.tokens方法的典型用法代码示例。如果您正苦于以下问题:Python Lexer.tokens方法的具体用法?Python Lexer.tokens怎么用?Python Lexer.tokens使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lexer.Lexer的用法示例。


在下文中一共展示了Lexer.tokens方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: vhdl_unit_name

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import tokens [as 别名]
def vhdl_unit_name(file):
    """ Given the name of a VHDL file, attempts to find the unit
        (entity or package) name in this file.
        
        If several units are present, the first is returned.
        None is returned if no unit name is found.
    """
    rules = [
        ('--[^\n]*\n',  'COMMENT'),
        ('\n+',         'NEWLINE'),
        ('\w+',         'ID'),
        ('\s+',         'WHITESPACE'),
        ('[^\w\n]+',    'NONE'),
    ]

    lx = Lexer(rules, skip_whitespace=False)
    lx.input(open(file).read())
    
    window = [None, None, None]
    
    try:
        for tok in lx.tokens():
            # Implements a simple sliding window looking for 
            # (entity|package) <name> is
            # as 3 consecutive IDs
            #
            if tok.type == 'ID':
                window = window[1:3] + [tok.val.lower()]
                
                if (    window[0] in ('entity', 'package') and 
                        window[2] == 'is'):
                    return window[1]
    except LexerError, err:
        return None
开发者ID:duongbaoduy,项目名称:luz-cpu,代码行数:36,代码来源:vhdl_utils.py

示例2: __init__

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import tokens [as 别名]
	def __init__(self, query = None, options = {}):
		options.update({"anon_base" : "b0", "validate" : False})
		if not options.get('prefixes'):
			options['prefixes'] = {}
		self.options = options
		self.productions = []
		self.vars = {}
		self.nd_var_gen = 0
		if isinstance(query, list):
			self.input = None
			self.tokens = query
		else:
			if isinstance(query, Lexer):
				lexer = query
			else:
				lexer = Lexer(query, options)
			self.input = lexer.query
			self.tokens = [token for token in lexer.tokens()]
开发者ID:huyphan,项目名称:pysparql,代码行数:20,代码来源:parser.py

示例3: StateLinker

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import tokens [as 别名]
class StateLinker(Visitor):
	
	def __init__(self):
		self.visiting_statechart = None
		self.visiting_node = None
		self.lexer = Lexer()
	
	def visit_ClassDiagram(self, class_diagram): 
		for c in class_diagram.classes :
			c.accept(self)

	def visit_Class(self, c):
		if c.statechart:
			c.statechart.accept(self)
		
	def visit_StateChart(self, statechart):
		self.visiting_statechart = statechart
		for node in statechart.basics + statechart.composites:
			node.accept(self)
					 
	def visit_StateChartNode(self, node):
		self.visiting_node = node
		node.enter_action.accept(self)
		node.exit_action.accept(self)
		for transition in node.transitions :
			transition.accept(self)
			
	def visit_StateChartTransition(self, transition):
		try :
			transition.target.accept(self)
		except StateReferenceException as exception :
			raise StateReferenceException("Transition from <" + self.visiting_node.full_name + "> has invalid target. " + exception.message)
		try :
			transition.action.accept(self)
		except StateReferenceException as exception :
			raise StateReferenceException("Transition from <" + self.visiting_node.full_name + "> has invalid action. " + exception.message)
		try :
			if transition.guard :
				transition.guard.accept(self)
		except StateReferenceException as exception :
			raise StateReferenceException("Transition from <" + self.visiting_node.full_name  + "> has invalid guard. " + exception.message)
		
	def visit_StateReference(self, state_reference):
		state_reference.target_nodes = []
		
		current_node = None #Will be used to find the target state(s)
		split_stack = [] #used for branching

		self.lexer.input(state_reference.path_string)

		for token in self.lexer.tokens() :
			
			if current_node == None : #current_node is not set yet or has been reset, the CHILD token can now have a special meaning
				if token.type == TokenType.SLASH :
					#Root detected
					current_node = self.visiting_statechart.root
					#Token consumed so continue
					continue
				else :
					current_node = self.visiting_node
					
			if token.type == TokenType.DOT :
				#Advance to next token
				token = self.lexer.nextToken()
				
				if token is None or token.type == TokenType.SLASH :
					#CURRENT operator "." detected
					continue
				elif token.type == TokenType.DOT :
					#Advance to next token
					token = self.lexer.nextToken()
					if token is None or token.type == TokenType.SLASH :
						#PARENT operator ".." detected
						current_node = current_node.parent
						if current_node is None :
							raise StateReferenceException("Illegal use of PARENT \"..\" operator at position " + str(token.pos) + " in state reference. Root of statechart reached.")
					
					else :
						raise StateReferenceException("Illegal use of PARENT \"..\" operator at position " + str(token.pos) + " in state reference.")
	
				else :
					raise StateReferenceException("Illegal use of CURRENT \".\" operator at position " + str(token.pos) + " in state reference.")
					
			elif token.type == TokenType.SLASH :
				continue
			elif token.type == TokenType.WORD :
				#try to advance to next child state
				cname = token.val
				found = False
				for child in current_node.children :
					if child.name == cname : 
						found = True
						current_node = child
						break
				if not found :
					raise StateReferenceException("Refering to non exiting node at posisition " + str(token.pos) + " in state reference.")
			elif token.type == TokenType.LBRACKET :
				split_stack.append(current_node)
			elif token.type == TokenType.RBRACKET :
				if len(split_stack) > 0 :
#.........这里部分代码省略.........
开发者ID:hergin,项目名称:AToMPM,代码行数:103,代码来源:state_linker.py

示例4: Lexer

# 需要导入模块: from lexer import Lexer [as 别名]
# 或者: from lexer.Lexer import tokens [as 别名]
from time import time
from lexer import Lexer

webpage = '<h1>this is<a href="lala">my</a>page 9 la</h1>'
tokens_spec = [("left_angle_slash", r"</"), ("left_angle", r"<"), ("right_angle", r">"), ("word", r"[^</>]+")]
lexer = Lexer(tokens_spec)
start_time = time()
print webpage
print lexer.tokens(webpage)
print "time: %s ms" % ((time() - start_time) * 1000)
开发者ID:stanfeldman,项目名称:samples,代码行数:12,代码来源:main.py


注:本文中的lexer.Lexer.tokens方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。