本文整理汇总了Python中grammar.Grammar类的典型用法代码示例。如果您正苦于以下问题:Python Grammar类的具体用法?Python Grammar怎么用?Python Grammar使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Grammar类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: buildgrammar
def buildgrammar(self):
g = Grammar()
g.nonterminals = self.states
g.terminals = self.symbols
g.startsymbol = str(self.initialstate)
tf = self.transitions
if len(tf) > 0:
for t in tf:
if len(t) == 3:
g.add_production(t[0], t[1] + t[2])
if t[2] in self.finalstates:
g.add_production(t[0], t[1] + '')
if g.starsymbol in self.finalstates:
g.add_production(g.starsymbol, 'e')
self.grammar = g
print 'Nonterminals: ', self.grammar.nonterminals
print 'Terminals: ', self.grammar.terminals
print 'Start symbol: ', self.grammar.startsymbol
print 'Productions: ', self.grammar.productions
return
示例2: generate
def generate(input_str):
''' Parses an input string and returns another one
containing the generated program skeleton.
'''
HEADER, L, ENDCODE = parser.parse(input_str)
result = 'from skel import Grammar\n'
if HEADER is not None:
result += HEADER + '\n'
result = result + """
def generate(self):
"""
result = ''
grammar = Grammar(Parser.START_SYMBOL)
if L:
for T in L:
grammar.addRule(T)
result += grammar.generate(Parser.START_SYMBOL)
if ENDCODE is not None:
result += ENDCODE + '\n'
return result
示例3: generate_samples
def generate_samples(grammar_dir, outfiles):
"""Generates a set of samples and writes them to the output files.
Args:
grammar_dir: directory to load grammar files from.
outfiles: A list of output filenames.
"""
f = open(os.path.join(grammar_dir, 'template.html'))
template = f.read()
f.close()
jsgrammar = Grammar()
err = jsgrammar.parse_from_file(os.path.join(grammar_dir, 'jscript.txt'))
if err > 0:
print('There were errors parsing grammar')
return
for outfile in outfiles:
result = GenerateNewSample(template, jsgrammar)
if result is not None:
print('Writing a sample to ' + outfile)
try:
f = open(outfile, 'w')
f.write(result)
f.close()
except IOError:
print('Error writing to output')
示例4: ShellParser
class ShellParser(object):
services = None
def __init__(self):
self.tokens = None
def __call__(self, command_string):
self.parse(command_string)
def buildGrammar(self):
self.grammar = Grammar()
self.grammar.parser = self
self.grammar.makeBNF()
return self.grammar
def parse(self, cmd):
tokens = grammar.parseString(cmd)
out = """
command: %s
service type: %s
action: %s
""" % (tokens.commandtype, tokens.servicetype, tokens.action)
self.tokens = tokens
return out
示例5: delete_nonderivable_nonterminals
def delete_nonderivable_nonterminals(grammar):
new_grammar = Grammar()
new_grammar.axiom = grammar.axiom
new_grammar.terminals = grammar.terminals
unwatched = list([new_grammar.axiom])
watched = set()
while unwatched:
nonterminal = unwatched[0]
unwatched = unwatched.remove(nonterminal) or []
watched.add(nonterminal)
rules = find_rules_for_nonterminal(grammar.rules, nonterminal)
for rule in rules:
for symbol in rule.right_side:
if isinstance(symbol, Nonterminal):
if symbol not in watched and symbol not in unwatched:
unwatched.append(symbol)
new_grammar.nonterminals = watched
new_rules = []
for rule in grammar.rules:
if rule.left_side[0] in watched:
new_rules.append(rule)
new_grammar.rules = new_rules
return new_grammar
示例6: __init__
def __init__(self, canvas, namespace = None):
Grammar.__init__(self, canvas, namespace)
self._autoclosepath = True
self._path = None
self._canvas.size = None
self._frame = 1
self._set_initial_defaults() ### TODO Look at these
示例7: inicia
def inicia(self):
case = 0
lex = Lex()
# # 1 ER PALAVRAS RESERVADAS
a1 = lex.lexer('reservado', case)
dict = a1.getDictAutomato()
case += len(dict)
# # 2 ER IDENTIFICADORES
a2 = lex.lexer('identificadores', case)
# # 3 GRAMATICA DE SESPECIAL
terminais = ['+', '-', '=', '/', '*', '>', '<', '!']
nTerminais = ['S']
producoes = {'S': ['+', '-', '=', '/', '*', '>', '<', '!']}
inicial = 'S'
g = Grammar(producoes,terminais, nTerminais, inicial)
s, i, f = g.convertGtoAF()
a3 = Automato(s, i, f)
a3.determina()
a3.printAtomato()
print("\n")
dict = a2.getDictAutomato()
case += len(dict)
a3 = lex.renameState(a3, case)
# # 4 GRAMATICA SEPARADORES
terminais2 = [':',';', ' ', '(', ')', '[', ']', ',', '\n']
nTerminais2 = ['S']
producoes2 = {'S': [':',';', ' ', '(', ')', '[', ']', ',', '\n']}
inicial2 = 'S'
g = Grammar(producoes2,terminais2, nTerminais2, inicial2)
s2, i2, f2 = g.convertGtoAF()
a4 = Automato(s2, i2, f2)
a4.determina()
a4.printAtomato()
print("\n")
dict = a3.getDictAutomato()
case += len(dict)
a4 = lex.renameState(a4, case)
# ER CONSTANTES
dict = a4.getDictAutomato()
case += len(dict)
a5 = lex.lexer('constantes', case)
r = a5
r = a1.oU([a2, a3, a4, a5])
print ("\n")
r.determina()
r.printAtomato()
with open('automato.pkl', 'wb') as output:
pickle.dump(r, output, pickle.HIGHEST_PROTOCOL)
示例8: make_grammar
def make_grammar(self):
grammar = Grammar()
r1 = Rule(
Symbol("NP", {"AGR": "?a"}), [
Symbol("ART", {"AGR": "?a"}), Symbol("N", {"AGR": "?a"})])
r1.set_variable_code("?a", -1L)
# -1L should be default for any undefined variable
# that is referenced while constructing
grammar.add_rule(r1)
return grammar
示例9: test_grammar_productions
def test_grammar_productions(self):
grammar = Grammar("grammars/grammar1.json")
# Check start variable productions.
rules = grammar.produces("S")
self.assertEqual(rules, ["aAb"])
rules = grammar.produces("A")
self.assertEqual(rules, ["aAb", "#"])
# Check nonexistent variable productions.
rules = grammar.produces("N")
self.assertFalse(rules)
示例10: generate_samples
def generate_samples(grammar_dir, outfiles):
"""Generates a set of samples and writes them to the output files.
Args:
grammar_dir: directory to load grammar files from.
outfiles: A list of output filenames.
"""
f = open(os.path.join(grammar_dir, 'template.html'))
template = f.read()
f.close()
htmlgrammar = Grammar()
err = htmlgrammar.parse_from_file(os.path.join(grammar_dir, 'html.txt'))
# CheckGrammar(htmlgrammar)
if err > 0:
print('There were errors parsing grammar')
return
cssgrammar = Grammar()
err = cssgrammar.parse_from_file(os.path.join(grammar_dir, 'css.txt'))
# CheckGrammar(cssgrammar)
if err > 0:
print('There were errors parsing grammar')
return
jsgrammar = Grammar()
err = jsgrammar.parse_from_file(os.path.join(grammar_dir, 'js.txt'))
# CheckGrammar(jsgrammar)
if err > 0:
print('There were errors parsing grammar')
return
# JS and HTML grammar need access to CSS grammar.
# Add it as import
htmlgrammar.add_import('cssgrammar', cssgrammar)
jsgrammar.add_import('cssgrammar', cssgrammar)
for outfile in outfiles:
result = generate_new_sample(template, htmlgrammar, cssgrammar,
jsgrammar)
if result is not None:
print('Writing a sample to ' + outfile)
try:
f = open(outfile, 'w')
f.write(result)
f.close()
except IOError:
print('Error writing to output')
示例11: test2
def test2():
print("\n\n------ Test 2 ---------")
# Test grammar 2 - read from file
g = Grammar()
g.read("Reynir.test.grammar")
#print("Grammar:")
#print(str(g))
#print()
# s = "Villi leit út eða Anna og köttur komu beint heim og kona eða maður fóru snemma inn"
s = "kona með kött myrti mann með hálsbindi með hund og Páll fór út"
# s = "kona með kött myrti mann með hund og Villi fór út"
# s = "Villi leit út"
class NameToken(Token):
NÖFN_NF = ["Villi", "Anna", "Hlín", "Páll"]
NÖFN_ÞF = ["Villa", "Önnu", "Hlín", "Pál"]
NÖFN_ÞGF = ["Villa", "Önnu", "Hlín", "Páli"]
def matches(self, terminal):
""" Does this token match the given terminal? """
if not terminal.name().startswith("nafn_"):
return False
if terminal.name().endswith("_nf"):
return self._val in NameToken.NÖFN_NF
if terminal.name().endswith("_þf"):
return self._val in NameToken.NÖFN_ÞF
if terminal.name().endswith("_þgf"):
return self._val in NameToken.NÖFN_ÞGF
return False
def make_token(w):
if w[0].isupper():
return NameToken('nafn', w)
return Token('orð', w)
toklist = [make_token(w) for w in s.split()]
p = Parser.for_grammar(g)
forest = p.go(toklist)
print("Parse combinations: {0}".format(Parser.num_combinations(forest)))
Parser.print_parse_forest(forest)
示例12: test
def test(string='balance.txt', spec='g1.txt'):
from balance import BalanceLexer
G = Grammar()
source = open(spec,'r')
G.generate(source)
G.bnf2cnf()
print "grammer==",G
lexer= BalanceLexer()
balance=open(string,'r')
lexer.scanFile(balance)
S=lexer.getStream()
print "stream ===",S
C=CYKChart()
C.Build_CYK_Chart(G,S)
print C
示例13: btn_parser_clicked
def btn_parser_clicked(self):
if(self.verify_grammar_ll1()):
g = Grammar.text_to_grammar(self.ui.text_grammar.toPlainText())
r = RecursiveDescentParser(g)
self._current_parser = r
self.ui.text_parser.setText(r.parser_code(self.log).strip().replace('\t',' '))
QMessageBox.information(self,'Geração do parser descendente recursivo','O parser foi gerado!')
示例14: test_parser_code_nonterminal
def test_parser_code_nonterminal(self):
s = "S -> A B C\n"
s +="A -> a A | &\n"
s +="B -> b B | A C d\n"
s +="C -> c C | D\n"
s +="D -> &"
g = Grammar.text_to_grammar(s)
r = RecursiveDescentParser(g)
c = '''\
def S():
global current_symbol
if current_symbol in ['a', 'b', 'c', 'd']:
A()
B()
C()
\t
else:
raise Exception('S',['a', 'b', 'c', 'd'],current_symbol)'''
self.assertEqual(c.strip(),r._parser_code_nonterminal('S').strip())
c = '''\
def A():
global current_symbol
if current_symbol in ['a']:
if current_symbol == 'a':
next_lexic_symbol()
else:
raise Exception('A','a',current_symbol)
A()'''
self.assertEqual(c.strip(),r._parser_code_nonterminal('A').strip())
示例15: verify_grammar_ll1
def verify_grammar_ll1(self):
self.log('Verificando se a gramática é LL(1)')
try:
g = Grammar.text_to_grammar(self.ui.text_grammar.toPlainText())
except Exception:
QMessageBox.critical(self,'Erro durante criação da gramática','O texto que foi tentado a conversão para gramática não é válido')
raise Exception('Erro durante criação da gramática','O texto que foi tentado a conversão para gramática não é válido')
try:
g.is_ll1(self.log)
self.log('A gramática é LL(1)!')
return True
except Exception as err:
if err.args[0] == 'LEFT_RECURSION':
nts = ', '.join(err.args[1])
QMessageBox.critical(self,'Recursão à esquerda','Os seguintes não terminais levam a uma recursão à esquerda:\n\t%s'%(nts))
self.log('Recursão a esquerda encontrada encontrada nos não terminais: %s'%(nts))
raise Exception('Recursão à esquerda','Os seguintes não terminais levam a uma recursão à esquerda: %s'%(nts))
elif err.args[0] == 'LEFT_FACTORING':
nts = ', '.join(err.args[1])
QMessageBox.critical(self,'Fatoração à esquerda','Os seguintes não terminais não estão fatorados à esquerda:\n\t%s'%(nts))
self.log('Não fatoração encontrada nos não terminais: %s'%(nts))
raise Exception('Fatoração à esquerda','Os seguintes não terminais não estão fatorados à esquerda: %s'%(nts))
elif err.args[0] == 'FIRST_FOLLOW_CONFLICT':
nts = ', '.join(err.args[1])
QMessageBox.critical(self,'Conflito first/follow','Houve conflito entre o first e o follow dos seguintes não terminais:\n\t%s'%(nts))
self.log('Conflito first/follow encontrado nos não terminais: %s'%(nts))
raise Exception('Conflito first/follow','Houve conflito entre o first e o follow dos seguintes não terminais: %s'%(nts))
else:
QMessageBox.critical(self,'Erro inesperado durante verificação LL(1)',err.__repr__())
raise Exception('Erro inesperado durante verificação LL(1)',err.__repr__())