本文整理汇总了Python中antlr4.InputStream方法的典型用法代码示例。如果您正苦于以下问题:Python antlr4.InputStream方法的具体用法?Python antlr4.InputStream怎么用?Python antlr4.InputStream使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类antlr4
的用法示例。
在下文中一共展示了antlr4.InputStream方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def __init__(self, source):
self._stream = CommonTokenStream(LuaLexer(InputStream(source)))
# contains a list of CommonTokens
self._line_count: int = 0
self._right_index: int = 0
self._last_expr_type: int = None
# following stack are used to backup values
self._index_stack: List[int] = []
self._right_index_stack: List[int] = []
self.text: str = '' # last token text
self.type: int = -1 # last token type
# contains expected token in case of invalid input code
self._expected = []
# comments waiting to be inserted into ast nodes
self._comments_index_stack: List[int] = []
self.comments: List[Comment] = []
self._hidden_handled: bool = False
self._hidden_handled_stack: List[bool] = []
示例2: prg2py_after_preproc
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def prg2py_after_preproc(data, parser_start, input_filename):
input_stream = antlr4.InputStream(data)
lexer = VisualFoxpro9Lexer(input_stream)
stream = antlr4.CommonTokenStream(lexer)
parser = VisualFoxpro9Parser(stream)
tree = run_parser(stream, parser, parser_start)
TreeCleanVisitor().visit(tree)
output_tree = PythonConvertVisitor(input_filename).visit(tree)
if not isinstance(output_tree, list):
return output_tree
output = add_indents(output_tree, 0)
options = autopep8.parse_args(['--max-line-length', '100000', '-'])
output = autopep8.fix_code(output, options)
tokens = list(tokenize.generate_tokens(io.StringIO(output).readline))
for i, token in enumerate(tokens):
token = list(token)
if token[0] == tokenize.STRING and token[1].startswith('u'):
token[1] = token[1][1:]
tokens[i] = tuple(token)
return tokenize.untokenize(tokens)
示例3: parseCSharp
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def parseCSharp(code):
code = code.replace('\\n', '\n')
parsedVersion = []
stream = antlr4.InputStream(code)
lexer = CSharp4Lexer(stream)
toks = antlr4.CommonTokenStream(lexer)
toks.fetch(500)
identifiers = {}
identCount = 0
for token in toks.tokens:
if token.type == 109:
parsedVersion += ["CODE_INTEGER"]
elif token.type == 111:
parsedVersion += ["CODE_REAL"]
elif token.type == 112:
parsedVersion += ["CODE_CHAR"]
elif token.type == 113:
parsedVersion += ["CODE_STRING"]
elif token.type == 9 or token.type == 7 or token.type == 6: # whitespace and comments and newline
pass
else:
parsedVersion += [str(token.text)]
return parsedVersion
示例4: run_parser
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def run_parser(quil):
# type: (str) -> List[AbstractInstruction]
"""
Run the ANTLR parser.
:param str quil: a single or multiline Quil program
:return: list of instructions that were parsed
"""
# Step 1: Run the Lexer
input_stream = InputStream(quil)
lexer = QuilLexer(input_stream)
stream = CommonTokenStream(lexer)
# Step 2: Run the Parser
parser = QuilParser(stream)
parser.removeErrorListeners()
parser.addErrorListener(CustomErrorListener())
tree = parser.quil()
# Step 3: Run the Listener
pyquil_listener = PyQuilListener()
walker = ParseTreeWalker()
walker.walk(pyquil_listener, tree)
return pyquil_listener.result
示例5: _parse_input_stream
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def _parse_input_stream(input_stream: antlr4.InputStream) -> RootNode:
error_listener = _ConsoleErrorListener()
lexer = JSONPathLexer(input_stream)
lexer.addErrorListener(error_listener)
token_stream = antlr4.CommonTokenStream(lexer)
parser = _JSONPathParser(token_stream)
parser.addErrorListener(error_listener)
tree = parser.jsonpath()
listener = _JSONPathListener(_stack=[])
walker = antlr4.ParseTreeWalker()
walker.walk(listener, tree)
# pylint: disable=protected-access
return listener._stack.pop()
# pylint: enable=protected-access
示例6: main
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def main():
infilename = os.path.join(os.path.dirname(__file__),'வெண்பாinput.txt')
outfilename = os.path.join(os.path.dirname(__file__),'வெண்பாoutput.txt')
data = open(infilename).read()
input_stream = antlr4.InputStream(data)
lexer = வெண்பாLexer(input_stream)
stream = CommonTokenStream(lexer)
parser = வெண்பாParser(stream)
tree = parser.வெண்பா()
# print(tree.toStringTree())
strtree = Trees.toStringTree(tree, None, parser)
print(strtree)
t = nltkTree.fromstring(strtree)
# t.pretty_print()
a = TreePrettyPrinter(t).text()
print (a)
# t.pprint(margin=70, indent=0, nodesep=u'', parens=u'()', quotes=False)
# pprint(Trees.toStringTree(tree, None, parser), width=20, indent=4)
with open(outfilename, 'w', encoding='utf8') as f:
f.write( a)
示例7: run_validator
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def run_validator(pattern, stix_version=DEFAULT_VERSION):
"""
Validates a pattern against the STIX Pattern grammar. Error messages are
returned in a list. The test passed if the returned list is empty.
"""
start = ''
if isinstance(pattern, six.string_types):
start = leading_characters(pattern, 2)
pattern = InputStream(pattern)
if not start:
start = leading_characters(pattern.readline(), 2)
pattern.seek(0)
if stix_version == '2.1':
return run_validator21(pattern, start)
else:
return run_validator20(pattern, start)
示例8: process_sympy
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def process_sympy(sympy):
matherror = MathErrorListener(sympy)
stream = antlr4.InputStream(sympy)
lex = PSLexer(stream)
lex.removeErrorListeners()
lex.addErrorListener(matherror)
tokens = antlr4.CommonTokenStream(lex)
parser = PSParser(tokens)
# remove default console error listener
parser.removeErrorListeners()
parser.addErrorListener(matherror)
relation = parser.math().relation()
expr = convert_relation(relation)
return expr
示例9: _parse_boolexpr
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def _parse_boolexpr(self, s):
lexer = BoolExprLexer(
antlr4.InputStream(s)
)
stream = antlr4.CommonTokenStream(lexer)
parser = BoolExprParser(stream)
tree = parser.booleanExpression()
eb = _BECompiler()
walker = antlr4.ParseTreeWalker()
walker.walk(eb, tree)
return eb.expression, eb.comparator, eb.value
示例10: _parse_string
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def _parse_string(s):
lexer = minemeld.ft.condition.BoolExprLexer(
antlr4.InputStream(s)
)
stream = antlr4.CommonTokenStream(lexer)
parser = minemeld.ft.condition.BoolExprParser(stream)
tree = parser.booleanExpression()
eb = ExprBuilder()
walker = antlr4.ParseTreeWalker()
walker.walk(eb, tree)
return eb
示例11: get_token_stream
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def get_token_stream(source: str) -> CommonTokenStream:
""" Get the antlr token stream.
"""
lexer = LuaLexer(InputStream(source))
stream = CommonTokenStream(lexer)
return stream
示例12: tokenize
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def tokenize(self, data):
"""Takes in a file and uses the antlr lexer to return a list of tokens"""
# Antlr expects a string, but test cases are not necessarily valid utf-8.
try:
lexer_input = antlr4.InputStream(data.decode('utf-8'))
except UnicodeDecodeError:
raise errors.AntlrDecodeError
stream = antlr4.CommonTokenStream(self._lexer(lexer_input))
end = self.fill(stream)
tokens = stream.getTokens(0, end)
return [token.text for token in tokens]
示例13: parseEQUEL
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def parseEQUEL(self, equel, inputclass=InputStream, **kwargs):
"""Parse EQUEL expression and return elasticsearch_dsl Search object according to the query expression"""
inp = inputclass(equel)
parser = EQUELParser(self)
parsetree = parser.parse(inp)
return EQUELRequest(parsetree, self)
示例14: parse
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def parse(self, query_string: str) -> list:
lexer = evaql_lexer(InputStream(query_string))
stream = CommonTokenStream(lexer)
parser = evaql_parser(stream)
# Attach error listener for debugging parser errrors
# parser._listeners = [self._error_listener]
tree = parser.root()
return self._visitor.visit(tree)
示例15: preprocess_code
# 需要导入模块: import antlr4 [as 别名]
# 或者: from antlr4 import InputStream [as 别名]
def preprocess_code(data, encoding):
input_stream = antlr4.InputStream(data)
lexer = VisualFoxpro9Lexer(input_stream)
stream = antlr4.CommonTokenStream(lexer)
parser = VisualFoxpro9Parser(stream)
tree = run_parser(stream, parser, 'preprocessorCode')
visitor = PreprocessVisitor(encoding)
visitor.tokens = visitor.visit(tree)
return visitor