当前位置: 首页>>代码示例>>Python>>正文


Python Tokenizer.next方法代码示例

本文整理汇总了Python中tokenizer.Tokenizer.next方法的典型用法代码示例。如果您正苦于以下问题:Python Tokenizer.next方法的具体用法?Python Tokenizer.next怎么用?Python Tokenizer.next使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tokenizer.Tokenizer的用法示例。


在下文中一共展示了Tokenizer.next方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testExecutionTreeWithItemAssignment

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
    def testExecutionTreeWithItemAssignment(self):

        c = ExpressionCompiler()
        tokenizer = Tokenizer()

        tokenizer.tokenize("A[B]= 1 + R")

        tokenizer.next()

        expr = c.compile(tokenizer)

        exec_tree = expr.get_execution_tree()

        print "Expression Tree %s\n" % (exec_tree)

        self.assertEqual(
            "( = ( item_assign ( literal A ) ( index ( literal B ) ) ) ( + ( literal 1.0 ) ( literal R ) ) )", exec_tree
        )

        # a little bit more complex
        tokenizer.tokenize("A[B+(C*3)+1]= 1 + R")

        tokenizer.next()

        expr = c.compile(tokenizer)

        exec_tree = expr.get_execution_tree()

        print "Expression Tree %s\n" % (exec_tree)

        self.assertEqual(
            "( = ( item_assign ( literal A ) ( index ( + ( + ( literal B ) ( * ( literal C ) ( literal 3.0 ) ) ) ( literal 1.0 ) ) ) ) ( + ( literal 1.0 ) ( literal R ) ) )",
            exec_tree,
        )
开发者ID:pombredanne,项目名称:java-balivernes,代码行数:36,代码来源:expr_compiler.py

示例2: testEvaluateFactors

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
    def testEvaluateFactors(self):

        c = ExpressionCompiler()

        tokenizer = Tokenizer()
        tokenizer.tokenize("7*7")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()

        print "result = %s\n" % (result)

        self.assertEqual(49.0, result)

        tokenizer.tokenize("7*7/7")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()

        print "result = %s\n" % (result)

        self.assertEqual(7.0, result)
开发者ID:pombredanne,项目名称:java-balivernes,代码行数:28,代码来源:expr_compiler.py

示例3: interpretStatement

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
 def interpretStatement(self):
     tokens = Tokenizer(self.IR)
     instr = tokens.next().lower()
     stmt = ""
     while tokens.peek() is not None:
         stmt += tokens.next()
     if instr[0] == 's':
         self.interpretSet(stmt)
     elif instr[0] == 'j':
         if len(instr) == 5:
             self.interpretJumpt(stmt)
         elif len(instr) == 4:
             self.interpretJump(stmt)
     elif instr[0] == 'h':
         self.halt(tokens)
开发者ID:aaronlaikh,项目名称:Projects,代码行数:17,代码来源:INTERPRETER.py

示例4: testEvaluateNegation

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
    def testEvaluateNegation(self):

        c = ExpressionCompiler()
        tokenizer = Tokenizer()

        tokenizer.tokenize("not 0")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()

        print "result = %s\n" % (result)

        self.assertEqual(1, result)
开发者ID:pombredanne,项目名称:java-balivernes,代码行数:17,代码来源:expr_compiler.py

示例5: correct_macro_syntax_test

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
def correct_macro_syntax_test():
    macro_string = """
!config {
output: pdf, html
table_of_contents: true
}"""
    tokenizer = Tokenizer(macro_string)
    for token in tokenizer:
        if token[0] == "!":
            open_brackets = tokenizer.next()
            if open_brackets != "{":
                raise DMLSyntaxError(open_brackets, "{")
开发者ID:Ed-von-Schleck,项目名称:dml,代码行数:14,代码来源:test-function-syntax.py

示例6: testExecutionTreeWithTerms

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
    def testExecutionTreeWithTerms(self):

        c = ExpressionCompiler()
        tokenizer = Tokenizer()

        tokenizer.tokenize("A=1")
        tokenizer.next()

        expr = c.compile(tokenizer)

        self.assertEqual("( = ( literal A ) ( literal 1.0 ) )", expr.get_execution_tree())

        tokenizer.tokenize("A=1 and B>10")
        tokenizer.next()

        expr = c.compile(tokenizer)

        print "Execution Tree = %s\n" % (expr.get_execution_tree())

        self.assertEqual(
            "( and ( = ( literal A ) ( literal 1.0 ) ) ( > ( literal B ) ( literal 10.0 ) ) )",
            expr.get_execution_tree(),
        )

        tokenizer.tokenize("(A=1 and B>10) or (C > 10)")
        tokenizer.next()

        expr = c.compile(tokenizer)

        print "Execution Tree = %s\n" % (expr.get_execution_tree())
开发者ID:pombredanne,项目名称:java-balivernes,代码行数:32,代码来源:expr_compiler.py

示例7: __init__

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
class NLReader:
	'''Class for reading neurolucida files'''
	def __init__(self, f=None):
		if f:
			self.read(f)

	def read(self, f):
		try:
			filename=f.name
		except:
			try:
				filename=f.geturl()
			except:
				filename='unknown'
		self.filename=filename
		self.object_list=[]
		self.comment_list=[]
		self.current_object=None
		self.depth=0
		self.toke =  Tokenizer(f, NL_TOKENS)
		self.waiting = None
		self.accumulate=None
		self.cellnames=[]
		while 1:
			t = self.toke.next()
			if t=="EOF":
				break
			elif self.depth==0:
				if t[0]==";" and len(self.object_list)==0:
					self.comment_list.append(t[1:].strip())
				elif t[0]=="(":
					self.current_object={'sections':[], 'current_section':[],
										 'parents':{}, 'attribs':{}, 'csd':1}
					self.depth+=1
			else:
				if t[0]==";":
					continue
				elif t=="(":
					self.increase_depth()
				elif t==")":
					self.decrease_depth()
				elif t=="|":
					self.close_section()
				elif t[0]=='"':
					if not t[-1] =='"':
						raise StandardError("Unmatched Quotes")
					self.handle_string(t[1:-1])
				else:
					try:
						t=int(t)
						self.handle_number(t)
					except:
						try:
							t=float(t)
							self.handle_number(t)
						except ValueError:
							self.handle_word(t)
					
					

	def increase_depth(self):
		self.depth+=1
		self.accumulate=[]

	def handle_string(self, s):
		if self.depth==1:
			self.current_object["attribs"]["Name"]=s
		elif self.waiting:
			self.current_object["attribs"][self.waiting]=s
			self.waiting = None	
			
	def handle_word(self, s):
		self.accumulate=None
		if not self.current_object:
			return
		if s in ["Color", "Resolution", "Name", "RGB", "Font"]:
			self.waiting = s
		elif s in ["Incomplete", "Axon"]:
			self.current_object["attribs"][s]=1
		elif self.waiting:
			self.current_object["attribs"][self.waiting]=s
			self.waiting=None
		elif self.depth==1:
			self.current_object["attribs"]["MarkerType"]=s
		elif self.depth==2 and not self.current_object["attribs"].get("Name"):
			self.current_object["attribs"]["Name"]=s

			
	def handle_number(self, s):
		if self.waiting and  self.waiting!="RGB":
			self.current_object["attribs"][self.waiting]="%.4s" % s
			self.waiting = None
		elif self.accumulate!=None:
			self.accumulate.append(s)

	def close_section(self):
		if not self.current_object["current_section"]:
			self.current_object["current_section"]=[]
			self.current_object["csd"]=self.depth
			return
#.........这里部分代码省略.........
开发者ID:gic888,项目名称:MIEN,代码行数:103,代码来源:neurolucida.py

示例8: Compiler

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
class Compiler(object):
    """ create tokens for parsing the grammar. 
        This class is a wrapper around the python tokenizer adapt to the DSL that is going to be used.
    """
    
    # Class members
    c_log = logging.getLogger("query.compiler")
    c_log.setLevel(logging.DEBUG)
    
    def __init__(self):
        """ constructor """
        
        self._tokenizer = None

        # the current statement used to create a pseudo iterator
        self._current_statement = None
        
        #use delegation
        self._expr_compiler = ExpressionCompiler()
 
    def compile(self,program):
        """ compile the passed program.
        
            Args:
               program: the program to parse
               
            Returns:
               return 
        
            Raises:
               exception 
        """ 
        self._tokenizer = Tokenizer()
        self._tokenizer.tokenize(program)
        self._tokenizer.next()
        
        return self._compile()
        
    def _compile(self):
        """ private compilation method .
        
            Args:
               program: the program to parse
               
            Returns:
               return 
        
            Raises:
               exception 
        """ 
        # need to add a block statement for the future
        block = BlockStatement()
        
        for s in self._read_statements():
            block.add(s)
        
        return block
    
    def _read_criteria_statement(self):
        """ private compilation method .
        
            Args:
               program: the program to parse
               
            Returns:
               return 
        
            Raises:
               exception 
        """
        statement = CriteriaStatement() 
        
        token = self._tokenizer.current_token()
        
        # Can have a destination or origin statement or nothing
        while token.type != 'ENDMARKER':
            
            expr = self._expr_compiler.compile(self._tokenizer)
            print "expr = %s\n"%(expr)
            statement.add_criteria(expr)
            
            token = self._tokenizer.current_token()
            # if find to or from there is a destination or origin statement
            if token.value == 'to' or token.value == 'from':
                break
                
        return statement
    
    def _read_destination_statement(self):
        """ destination statement. This is where to store the data.
        
            Args:
               None
               
            Returns:
               return 
        
            Raises:
               exception 
        """ 
#.........这里部分代码省略.........
开发者ID:gaubert,项目名称:java-balivernes,代码行数:103,代码来源:compiler.py

示例9: __init__

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
class Interpreter:

  def __init__(self, code_string=None):
    self._KEYWORDS = ['read', 'write']

    self._token = None
    self._line = 0

    self._tokenizer = Tokenizer(code_string, ['+','-','/','*','(',')',':='], ['\n',' '])
    self._symboltable = SymbolTable()

  def reset(self):
    self._line = 0
    self._token = None
    self._tokenizer.clear()

  def interpret(self, code_string=None):
    if code_string is not None:
      self._tokenizer.append(code_string)

    self._consume()
    self.program()

  def _consume(self, _nomable=None):
    if _nomable == '$$':
      self.reset()
      return True

    if _nomable == 'id':
      self._symboltable.add(self._token, self._line)
    # TODO: add current token to AST
    self._token = self._tokenizer.next()

  def _is_token_id(self, _id=None):
    if self._token is None:
      raise ParseError(self._line, 'unexpected EOF')

    if _id is None:
      _id = self._token

    if self._symboltable.has(_id):
      return True
    elif _id.isalpha() and _id not in self._KEYWORDS:
      return True
    else:
      return False

  def _is_token_num(self, _num=None):
    if self._token is None:
      raise ParseError(self._line, 'unexpected EOF')

    if _num is None:
      _num = self._token
    if _num.isdigit():
      return True
    else:
      return False

  def _is_token_id_or_num(self, _token=None):
    if _token is None:
      _token = self._token
    if self._is_token_id(_token) or self._is_token_num(_token):
      return True
    else:
      return False

  def _match(self, expected):
    # TODO: might conflict with id's named 'id' or 'number'
    if expected == self._token or expected in ['id', 'number']:
      self._consume(self._token)
    else:
      raise TokenError(self._line, self._token, expected)

  def _skip(self):
    pass

  def program(self):
    if self._token in ['read', 'write', '$$'] or self._is_token_id():
      self._stmt_list()
      self._match('$$')
    else:
      raise ParseError(self._line, 'program')

  def _stmt_list(self):
    if self._token == '$$':
      self._skip()
    elif self._token in ['read', 'write'] or self._is_token_id():
      self._line += 1
      self._stmt()
      self._stmt_list()
    else:
      raise ParseError(self._line, 'stmt_list')

  def _stmt(self):
    if self._token == 'read':
      self._match('read')
      self._match('id')
    elif self._token == 'write':
      self._match('write')
      self._expr()
#.........这里部分代码省略.........
开发者ID:pegurnee,项目名称:2016-01-542,代码行数:103,代码来源:interpreter.py

示例10: __init__

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
class HocReader:
	'''class fr reading Hoc files
Currently reads only connectivity and pt3dadd morphology'''
	def __init__(self, fobj, name):
		self.fname = name
		self.toke =  Tokenizer(fobj, HOC_TOKENS)
	

	def read(self):
		cname = re.sub(r"\.[^.]*$", "", self.fname)
		cname = re.sub(r"\W", "_", cname)
		cell = mien.nmpml.elements['Cell'](attribs={"Name":cname})
		cell.addComment("Imported from hoc file %s" % self.fname)
		self.current_section = None
		secnames = []
		srefs ={}
		while 1:
			t = self.toke.next()
			if t=="EOF":
				break
			if t.startswith("/*"):
				cell.addComment(t[2:-2])
			elif t.startswith("//"):
				cell.addComment(t[2:])
			elif t=="create":
				t = self.toke.next()
				n,i = getNameIndex(t)
				secnames.append(n)
				regl = []
				for s in range(i):
					sec = mien.nmpml.elements["Section"](attribs={'Name':"%s[%i]" % (n,s),
										   'Parent':"None"}, container=cell)
					regl.append(sec.name())
					srefs[sec.name()]=sec
					cell.elements.append(sec)
			elif t=="connect":
				first = self.toke.next()
				first = srefs[first]
				floc = float(self.toke.next())
				second = self.toke.next()
				if getNameIndex(second)[0] in secnames:
					sloc = float(self.toke.next())
					second = srefs[second]
				else:
					sloc = float(second)
					second = self.current_section
				if floc>sloc:
					floc, sloc = slco, floc
					second, first = first, second
				first.attributes["Parent"]=second.name()
			elif getNameIndex(t)[0] in secnames:
				self.current_section = srefs[t]
			elif not self.current_section:
				continue
			else:
				if t == "pt3dadd":
					pt = []
					for i in range(4):
						pt.append(float(self.toke.next()))
					self.current_section.setPoints(pt, 1)	
				else:
					#print t
					pass
		return cell		
开发者ID:gic888,项目名称:MIEN,代码行数:66,代码来源:hoc.py

示例11: testEvaluateAdditivity

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
    def testEvaluateAdditivity(self):

        c = ExpressionCompiler()

        tokenizer = Tokenizer()
        tokenizer.tokenize("1+2")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(3.0, result)

        print ("Test multiple additions \n")

        tokenizer.tokenize("1+2+3+4")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()

        print ("result = %s\n" % (result))

        self.assertEqual(10.0, result)

        print ("Test multiple substractions \n")

        tokenizer.tokenize("1-3+4+5")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()

        print ("result = %s\n" % (result))

        self.assertEqual(7.0, result)

        tokenizer.tokenize("1-7-3+4+5")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()

        print ("result = %s\n" % (result))

        self.assertEqual(0.0, result)

        tokenizer.tokenize("7-7+17-18")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()

        print ("result = %s\n" % (result))

        self.assertEqual(-1.0, result)

        tokenizer.tokenize("-7-7+17-4")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()

        print ("result = %s\n" % (result))

        self.assertEqual(-1.0, result)
开发者ID:pombredanne,项目名称:java-balivernes,代码行数:76,代码来源:expr_compiler.py

示例12: testEvaluateBooleanExpression

# 需要导入模块: from tokenizer import Tokenizer [as 别名]
# 或者: from tokenizer.Tokenizer import next [as 别名]
    def testEvaluateBooleanExpression(self):

        c = ExpressionCompiler()

        tokenizer = Tokenizer()
        tokenizer.tokenize("3 < 2")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(0, result)

        tokenizer.tokenize("1 < 2")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(1, result)

        tokenizer.tokenize("3 <= 2")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(0, result)

        tokenizer.tokenize("3 <= 10")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(1, result)

        tokenizer.tokenize("100 <= 100")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(1, result)

        tokenizer.tokenize("10 > 20")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(0, result)

        tokenizer.tokenize("1000 > 20")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(1, result)

        tokenizer.tokenize("10 >= 20")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

        print ("result = %s\n" % (result))

        self.assertEqual(0, result)

        tokenizer.tokenize("1000 >= 20")
        tokenizer.next()

        expr = c.compile(tokenizer)

        result = expr.evaluate()  # IGNORE:E1103

#.........这里部分代码省略.........
开发者ID:pombredanne,项目名称:java-balivernes,代码行数:103,代码来源:expr_compiler.py


注:本文中的tokenizer.Tokenizer.next方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。