本文整理匯總了Python中ply.lex.LexToken方法的典型用法代碼示例。如果您正苦於以下問題:Python lex.LexToken方法的具體用法?Python lex.LexToken怎麽用?Python lex.LexToken使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類ply.lex
的用法示例。
在下文中一共展示了lex.LexToken方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: from_token
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def from_token(
cls: Type[TAssign],
name_token: lex.LexToken,
equal_token: lex.LexToken = None,
value_token: lex.LexToken = None,
) -> TAssign:
"""Creates instance from parser's token."""
if equal_token is None:
raise ValueError('Empty EQUAL node is not allowed')
if value_token is None:
value_item = None
else:
value_item = Value.from_token(value_token)
return cls(
left=Name.from_token(name_token),
right=value_item,
lineno=name_token.lineno,
raw_text=equal_token.value,
)
示例2: include_end
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def include_end(self):
""" Performs and end of include.
"""
self.lex = self.filestack[-1][2]
self.input_data = self.filestack[-1][3]
self.filestack.pop()
if not self.filestack: # End of input?
return
self.filestack[-1][1] += 1 # Increment line counter of previous file
result = lex.LexToken() # Creates the token
result.value = self.put_current_line()
result.type = '_ENDFILE_'
result.lineno = self.lex.lineno
result.lexpos = self.lex.lexpos
return result
示例3: include_end
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def include_end(self):
""" Performs and end of include.
"""
self.lex = self.filestack[-1][2]
self.input_data = self.filestack[-1][3]
self.filestack.pop()
if not self.filestack: # End of input?
return
self.filestack[-1][1] += 1 # Increment line counter of previous file
result = lex.LexToken()
result.value = self.put_current_line(suffix='\n')
result.type = '_ENDFILE_'
result.lineno = self.lex.lineno
result.lexpos = self.lex.lexpos
return result
示例4: p_datainst3op_error
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def p_datainst3op_error(p):
"""datainst3op : OPDATA3OP logmnemonic flagscondandspace REG error REG COMMA op2
| OPDATA3OP logmnemonic flagscondandspace REG COMMA REG error op2
| OPDATA3OP logmnemonic flagscondandspace REG COMMA REG
| OPDATA3OP logmnemonic flagscondandspace REG error COMMA REG COMMA op2
| OPDATA3OP logmnemonic flagscondandspace REG COMMA REG error COMMA op2"""
if len(p) == 9:
raise YaccError("Les registres et/ou constantes utilisés dans une opération doivent être séparés par une virgule")
elif len(p) == 7:
raise YaccError("L'instruction {} requiert 3 arguments".format(p[1]))
elif len(p) == 10:
if isinstance(p[5], LexToken):
raise YaccError("Le registre R{}{} n'existe pas".format(p[4], p[5].value))
else:
raise YaccError("Le registre R{}{} n'existe pas".format(p[6], p[7].value))
elif len(p) == 11:
raise YaccError("TEST")
示例5: __init__
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def __init__(self, msg, parser_token=None, cim_error=None):
"""
Parameters:
msg (:term:`string`):
Message text describing the error.
parser_token (lex.LexToken or yacc.YaccProduction):
PLY lex or yacc parser token (that is, the ``p`` argument of a yacc
parser function or the ``t`` argument of a lex parser function).
This token is used to obtain the MOF source text and location
information.
`None` will result in no MOF source text and location information
to be obtained.
cim_error (:class:`~pywbem.CIMError`):
CIM error returned by the CIM repository.
"""
super(MOFRepositoryError, self).__init__(msg, parser_token)
self._cim_error = cim_error
示例6: token
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def token(self) -> lex.LexToken:
"""
Returns the next token to work with.
Should not be called directly, since it is a part of ``ply`` API.
"""
return self._lexer.token()
示例7: t_NAME
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def t_NAME(self, token: lex.LexToken) -> lex.LexToken:
"""Parsing NAME tokens."""
token.lexer.push_state('name')
return token
示例8: t_COMMENT
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def t_COMMENT(self, token: lex.LexToken) -> lex.LexToken:
"""Parsing COMMENT tokens."""
return token
示例9: t_name_EQUAL
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def t_name_EQUAL(self, token: lex.LexToken) -> lex.LexToken:
"""Parsing EQUAL tokens."""
token.lexer.push_state('value')
return token
示例10: t_value_VALUE
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def t_value_VALUE(self, token: lex.LexToken) -> lex.LexToken:
"""Parsing VALUE tokens."""
token.lexer.pop_state()
return token
示例11: t_ANY_error
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def t_ANY_error(self, token: lex.LexToken) -> None:
"""
Error handling rule.
Raises an exception that file can not be parsed.
"""
raise ParsingError(token.value)
示例12: _get_token
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def _get_token(
parsed: yacc.YaccProduction,
index: int,
) -> Optional[lex.LexToken]: # TODO: lex.LexToken is in fact just `Any`
"""YaccProduction has a broken __getitem__ method definition."""
return parsed.slice[index]
示例13: token
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def token(self):
""" Returns a token from the current input. If tok is None
from the current input, it means we are at end of current input
(e.g. at end of include file). If so, closes the current input
and discards it; then pops the previous input and lexer from
the input stack, and gets another token.
If new token is again None, repeat the process described above
until the token is either not None, or self.lex is None, wich
means we must effectively return None, because parsing has
ended.
"""
tok = None
if self.next_token is not None:
tok = lex.LexToken()
tok.value = ''
tok.lineno = self.lex.lineno
tok.lexpos = self.lex.lexpos
tok.type = self.next_token
self.next_token = None
while self.lex is not None and tok is None:
tok = self.lex.token()
if tok is not None:
break
tok = self.include_end()
return tok
示例14: token
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def token(self):
"""
Returns the next LexToken. Returns None when all tokens have been
exhausted.
"""
if self.tokens_queue:
self.last_token = self.tokens_queue.pop(0)
else:
r = self.lex.token()
if isinstance(r, MultiToken):
self.tokens_queue.extend(r.tokens)
self.last_token = self.tokens_queue.pop(0)
else:
if r is None and self.cur_indent > 0:
if (self.last_token and
self.last_token.type not in ('NEWLINE', 'LINE')):
newline_token = _create_token(
'NEWLINE', '\n', self.lex.lineno, self.lex.lexpos)
self.tokens_queue.append(newline_token)
dedent_count = self.cur_indent
dedent_token = _create_token(
'DEDENT', '\t', self.lex.lineno, self.lex.lexpos)
self.tokens_queue.extend([dedent_token] * dedent_count)
self.cur_indent = 0
self.last_token = self.tokens_queue.pop(0)
else:
self.last_token = r
return self.last_token
示例15: _create_token
# 需要導入模塊: from ply import lex [as 別名]
# 或者: from ply.lex import LexToken [as 別名]
def _create_token(token_type, value, lineno, lexpos):
"""
Helper for creating ply.lex.LexToken objects. Unfortunately, LexToken
does not have a constructor defined to make settings these values easy.
"""
token = lex.LexToken()
token.type = token_type
token.value = value
token.lineno = lineno
token.lexpos = lexpos
return token