本文整理汇总了Python中Scanner.lookahead方法的典型用法代码示例。如果您正苦于以下问题:Python Scanner.lookahead方法的具体用法?Python Scanner.lookahead怎么用?Python Scanner.lookahead使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Scanner
的用法示例。
在下文中一共展示了Scanner.lookahead方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import Scanner [as 别名]
# 或者: from Scanner import lookahead [as 别名]
class Lexer:
def __init__(self, sourceText, verbose=False):
"""
"""
# Init the scanner
self.scanner = Scanner(sourceText, verbose)
self.ENDMARK = self.scanner.ENDMARK
# Read first char
self.getChar()
self.verbose = verbose
def dq(self, s):
return '"%s"' %s
def getChar(self):
"""
get the next character
"""
self.character = self.scanner.get()
self.c1 = self.character.char
#---------------------------------------------------------------
# Every time we get a character from the scanner, we also
# lookahead to the next character and save the results in c2.
# This makes it easy to lookahead 2 characters.
#---------------------------------------------------------------
self.c2 = self.c1 + self.scanner.lookahead(1)
def get(self):
# Construct and return the next token in sourceText
#---------------------------------------------------------
# PROCESS WHITE SPACE / COMMENTS
#---------------------------------------------------------
#ignore any whitespace or comments
# print self.character
while self.c1 in WHITESPACE_CHARS or self.c2 == "/*":
# process whitespace
while self.c1 in WHITESPACE_CHARS:
token = Token(self.character)
token.type = WHITESPACE
self.getChar()
while self.c1 in WHITESPACE_CHARS:
token.cargo += self.c1
self.getChar()
# process comments
while self.c2 == "/*":
# we found comment start
token = Token(self.character)
token.type = COMMENT
token.cargo = self.c2
self.getChar() # read past the first character of a 2-character token
self.getChar() # read past the second character of a 2-character token
while not (self.c2 == "*/"):
if self.c1 == self.ENDMARK:
token.abort("Found end of file before end of comment")
token.cargo += self.c1
self.getChar()
token.cargo += self.c2 # append the */ to the token cargo
self.getChar() # read past the first character of a 2-character token
self.getChar() # read past the second character of a 2-character token
# return token # only if we want the lexer to return comments
#---------------------------------------------------------
# END PROCESS WHITE SPACE / COMMENTS
#---------------------------------------------------------
# Create a new token. It will remember position and line info from character
token = Token(self.character)
# End of file
if self.c1 == self.ENDMARK:
token.type = EOF
return token
# A variable starts
if self.c1 in VAR_START_CHARS:
token.type = VAR
# get the whole variable
self.getChar()
while self.c1 in VAR_CHARS:
token.cargo += self.c1
self.getChar()
# We found a keyword
if token.cargo in keywords:
token.type = token.cargo
return token
# A literal starts
if self.c1 in LITERAL_START_CHARS:
token.type = LITERAL
self.getChar()
#.........这里部分代码省略.........