本文整理汇总了Python中nltk.draw.tree.draw_trees函数的典型用法代码示例。如果您正苦于以下问题:Python draw_trees函数的具体用法?Python draw_trees怎么用?Python draw_trees使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了draw_trees函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: demo
def demo(draw_parses=None, print_parses=None):
"""
A simple demo showing some basic functionality.
"""
demos = ['aandeelhoudersvergadering', 'hardloopwedstrijd']
trees = []
with MBMA() as program:
for word in demos:
print 'Parsing: %s' % word
results = program.classify(word)
trees.extend(program.trees(results))
if draw_parses is None:
print
print 'Draw parses (y/n)?',
draw_parses = sys.stdin.readline().strip().lower().startswith('y')
if draw_parses:
from nltk.draw.tree import draw_trees
print ' please wait...'
draw_trees(*trees)
if print_parses is None:
print
print 'Print parses (y/n)?',
print_parses = sys.stdin.readline().strip().lower().startswith('y')
if print_parses:
for parse in trees:
print parse
示例2: demo
def demo():
"""
A demonstration showing how each tree transform can be used.
"""
from nltk.draw.tree import draw_trees
from nltk import tree, treetransforms
from copy import deepcopy
# original tree from WSJ bracketed text
sentence = """(TOP
(S
(S
(VP
(VBN Turned)
(ADVP (RB loose))
(PP
(IN in)
(NP
(NP (NNP Shane) (NNP Longman) (POS 's))
(NN trading)
(NN room)))))
(, ,)
(NP (DT the) (NN yuppie) (NNS dealers))
(VP (AUX do) (NP (NP (RB little)) (ADJP (RB right))))
(. .)))"""
t = tree.Tree.parse(sentence, remove_empty_top_bracketing=True)
# collapse subtrees with only one child
collapsedTree = deepcopy(t)
treetransforms.collapse_unary(collapsedTree)
# convert the tree to CNF
cnfTree = deepcopy(collapsedTree)
treetransforms.chomsky_normal_form(cnfTree)
# convert the tree to CNF with parent annotation (one level) and horizontal smoothing of order two
parentTree = deepcopy(collapsedTree)
treetransforms.chomsky_normal_form(parentTree, horzMarkov=2, vertMarkov=1)
# convert the tree back to its original form (used to make CYK results comparable)
original = deepcopy(parentTree)
treetransforms.un_chomsky_normal_form(original)
# convert tree back to bracketed text
sentence2 = original.pprint()
print sentence
print sentence2
print "Sentences the same? ", sentence == sentence2
draw_trees(t, collapsedTree, cnfTree, parentTree, original)
示例3: testTreeFilter
def testTreeFilter(tree=None):
"""
:param tree: Sample tree string in bracket notation.
"""
if tree:
t = Tree(tree)
else:
t = Tree(
'((S(NP-SBJ (PRP They))(ADVP-TMP (RB never))(VP (VBD considered)(S (NP-SBJ (PRP themselves) (VP (TO to) (VP (VB be) (NP-PRD (NN anything) (RB else)))))))))')
t2 = t.copy(deep=True)
filterLexical(t2)
from nltk.draw.tree import draw_trees
draw_trees(t, t2)
示例4: parse
def parse(self, sentence_string):
"""Extra credit : parse an arbitrary string
This is actually what we want at the end.
Given an arbitrary string
0) split it into sentences (if you want to accept multiple sentences.)
1) tokenize
2) POS-tag and other pre-processing technique
3) parse it!
4) draw it using nltk draw_trees like in the example
it does not support labeled arc though :(
"""
#draw a tree
from nltk.draw.tree import draw_trees
from nltk.tree import Tree
import nltk
words = nltk.pos_tag(nltk.word_tokenize(sentence_string))
sentence = {'tokens': ['ROOT'], 'arcs': [], 'pos':['ROOT']}
for word, pos in words:
sentence['tokens'].append(word)
sentence['pos'].append(pos)
indices = range(len(sentence['tokens']))
fcg = self.make_fully_connected_graph(sentence)
weighted = self._featurized_to_weighted(fcg)
max_spanning_tree = mst(0, weighted)
wlist = sentence['tokens']
#print the dependencies
for i in max_spanning_tree.keys():
for j in max_spanning_tree[i].keys():
print "%s->%s" % (i, j)
t = self._build_tree(max_spanning_tree, 0, wlist)
draw_trees(Tree(t))
示例5: demo
def demo(choice=None, draw_parses=None, print_parses=None):
"""
A demonstration of the probabilistic parsers. The user is
prompted to select which demo to run, and how many parses should
be found; and then each parser is run on the same demo, and a
summary of the results are displayed.
"""
import sys, time
from nltk import tokenize, toy_pcfg1, toy_pcfg2
from nltk.parse import pchart
# Define two demos. Each demo has a sentence and a grammar.
demos = [('I saw John with my telescope', toy_pcfg1),
('the boy saw Jack with Bob under the table with a telescope',
toy_pcfg2)]
if choice is None:
# Ask the user which demo they want to use.
print
for i in range(len(demos)):
print '%3s: %s' % (i+1, demos[i][0])
print ' %r' % demos[i][1]
print
print 'Which demo (%d-%d)? ' % (1, len(demos)),
choice = int(sys.stdin.readline().strip())-1
try:
sent, grammar = demos[choice]
except:
print 'Bad sentence number'
return
# Tokenize the sentence.
tokens = sent.split()
# Define a list of parsers. We'll use all parsers.
parsers = [
pchart.InsideChartParser(grammar),
pchart.RandomChartParser(grammar),
pchart.UnsortedChartParser(grammar),
pchart.LongestChartParser(grammar),
pchart.InsideChartParser(grammar, beam_size = len(tokens)+1) # was BeamParser
]
# Run the parsers on the tokenized sentence.
times = []
average_p = []
num_parses = []
all_parses = {}
for parser in parsers:
print '\ns: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar)
parser.trace(3)
t = time.time()
parses = parser.nbest_parse(tokens)
times.append(time.time()-t)
if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
average_p.append(p)
num_parses.append(len(parses))
for p in parses: all_parses[p.freeze()] = 1
# Print some summary statistics
print
print ' Parser Beam | Time (secs) # Parses Average P(parse)'
print '------------------------+------------------------------------------'
for i in range(len(parsers)):
print '%18s %4d |%11.4f%11d%19.14f' % (parsers[i].__class__.__name__,
parsers[i].beam_size,
times[i],num_parses[i],average_p[i])
parses = all_parses.keys()
if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print '------------------------+------------------------------------------'
print '%18s |%11s%11d%19.14f' % ('(All Parses)', 'n/a', len(parses), p)
if draw_parses is None:
# Ask the user if we should draw the parses.
print
print 'Draw parses (y/n)? ',
draw_parses = sys.stdin.readline().strip().lower().startswith('y')
if draw_parses:
from nltk.draw.tree import draw_trees
print ' please wait...'
draw_trees(*parses)
if print_parses is None:
# Ask the user if we should print the parses.
print
print 'Print parses (y/n)? ',
print_parses = sys.stdin.readline().strip().lower().startswith('y')
if print_parses:
for parse in parses:
print parse
示例6: main
def main():
import sys
from optparse import OptionParser, OptionGroup
usage = """%%prog [options] [grammar_file]""" % globals()
opts = OptionParser(usage=usage)
opts.add_option("-c", "--components",
action="store_true", dest="show_components", default=0,
help="show hole semantics components")
opts.add_option("-r", "--raw",
action="store_true", dest="show_raw", default=0,
help="show the raw hole semantics expression")
opts.add_option("-d", "--drawtrees",
action="store_true", dest="draw_trees", default=0,
help="show formula trees in a GUI window")
opts.add_option("-v", "--verbose",
action="count", dest="verbosity", default=0,
help="show more information during parse")
(options, args) = opts.parse_args()
if len(args) > 0:
filename = args[0]
else:
filename = 'grammars/hole.fcfg'
print 'Reading grammar file', filename
#grammar = data.load(filename)
parser = load_earley(filename, trace=options.verbosity)
# Prompt the user for a sentence.
print 'Sentence: ',
line = sys.stdin.readline()[:-1]
# Parse the sentence.
tokens = line.split()
trees = parser.nbest_parse(tokens)
print 'Got %d different parses' % len(trees)
for tree in trees:
# Get the semantic feature from the top of the parse tree.
sem = tree[0].node['sem'].simplify()
# Skolemise away all quantifiers. All variables become unique.
sem = sem.skolemise()
# Reparse the semantic representation from its bracketed string format.
# I find this uniform structure easier to handle. It also makes the
# code mostly independent of the lambda calculus classes.
usr = bracket_parse(str(sem))
# Break the hole semantics representation down into its components
# i.e. holes, labels, formula fragments and constraints.
hole_sem = HoleSemantics(usr)
# Maybe print the raw semantic representation.
if options.show_raw:
print
print 'Raw expression'
print usr
# Maybe show the details of the semantic representation.
if options.show_components:
print
print 'Holes: ', hole_sem.holes
print 'Labels: ', hole_sem.labels
print 'Constraints: ', hole_sem.constraints
print 'Top hole: ', hole_sem.top_hole
print 'Top labels: ', hole_sem.top_most_labels
print 'Fragments:'
for (l,f) in hole_sem.fragments.items():
print '\t%s: %s' % (l, f)
# Find all the possible ways to plug the formulas together.
pluggings = hole_sem.pluggings()
# Build FOL formula trees using the pluggings.
trees = map(hole_sem.formula_tree, pluggings)
# Print out the formulas in a textual format.
n = 1
for tree in trees:
print
print '%d. %s' % (n, tree)
n += 1
# Maybe draw the formulas as trees.
if options.draw_trees:
draw_trees(*trees)
print
print 'Done.'
示例7: demo
def demo():
"""
A demonstration of the probabilistic parsers. The user is
prompted to select which demo to run, and how many parses should
be found; and then each parser is run on the same demo, and a
summary of the results are displayed.
"""
import sys, time
import nltk
from nltk import tokenize
from nltk.parse import ViterbiParser
# Define two demos. Each demo has a sentence and a grammar.
demos = [('I saw the man with my telescope', nltk.toy_pcfg1),
('the boy saw Jack with Bob under the table with a telescope', nltk.toy_pcfg2)]
# Ask the user which demo they want to use.
print
for i in range(len(demos)):
print '%3s: %s' % (i+1, demos[i][0])
print ' %r' % demos[i][1]
print
print 'Which demo (%d-%d)? ' % (1, len(demos)),
try:
snum = int(sys.stdin.readline().strip())-1
sent, grammar = demos[snum]
except:
print 'Bad sentence number'
return
# Tokenize the sentence.
tokens = sent.split()
parser = ViterbiParser(grammar)
all_parses = {}
print '\nsent: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar)
parser.trace(3)
t = time.time()
parses = parser.nbest_parse(tokens)
time = time.time()-t
if parses:
average = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else:
average = 0
num_parses = len(parses)
for p in parses:
all_parses[p.freeze()] = 1
# Print some summary statistics
print
print 'Time (secs) # Parses Average P(parse)'
print '-----------------------------------------'
print '%11.4f%11d%19.14f' % (time, num_parses, average)
parses = all_parses.keys()
if parses:
p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print '------------------------------------------'
print '%11s%11d%19.14f' % ('n/a', len(parses), p)
# Ask the user if we should draw the parses.
print
print 'Draw parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
from nltk.draw.tree import draw_trees
print ' please wait...'
draw_trees(*parses)
# Ask the user if we should print the parses.
print
print 'Print parses (y/n)? ',
if sys.stdin.readline().strip().lower().startswith('y'):
for parse in parses:
print parse
示例8: draw
def draw(self):
t = self.sem_labeled()
from nltk.draw.tree import draw_trees
draw_trees(t)
示例9: demo
#.........这里部分代码省略.........
NP -> Det N [.41]
NP -> Name [.28]
NP -> NP PP [.31]
PP -> P NP [1.0]
V -> 'saw' [.21]
V -> 'ate' [.51]
V -> 'ran' [.28]
N -> 'boy' [.11]
N -> 'cookie' [.12]
N -> 'table' [.13]
N -> 'telescope' [.14]
N -> 'hill' [.5]
Name -> 'Jack' [.52]
Name -> 'Bob' [.48]
P -> 'with' [.61]
P -> 'under' [.39]
Det -> 'the' [.41]
Det -> 'a' [.31]
Det -> 'my' [.28]
""")
demos = [('I saw John with my telescope', toy_pcfg1),
('the boy saw Jack with Bob under the table with a telescope',
toy_pcfg2)]
if choice is None:
# Ask the user which demo they want to use.
print()
for i in range(len(demos)):
print('%3s: %s' % (i+1, demos[i][0]))
print(' %r' % demos[i][1])
print()
print('Which demo (%d-%d)? ' % (1, len(demos)), end=' ')
choice = int(sys.stdin.readline().strip())-1
try:
sent, grammar = demos[choice]
except:
print('Bad sentence number')
return
# Tokenize the sentence.
tokens = sent.split()
# Define a list of parsers. We'll use all parsers.
parsers = [
pchart.InsideChartParser(grammar),
pchart.RandomChartParser(grammar),
pchart.UnsortedChartParser(grammar),
pchart.LongestChartParser(grammar),
pchart.InsideChartParser(grammar, beam_size = len(tokens)+1) # was BeamParser
]
# Run the parsers on the tokenized sentence.
times = []
average_p = []
num_parses = []
all_parses = {}
for parser in parsers:
print('\ns: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar))
parser.trace(3)
t = time.time()
parses = list(parser.parse(tokens))
times.append(time.time()-t)
p = (reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses) if parses else 0)
average_p.append(p)
num_parses.append(len(parses))
for p in parses: all_parses[p.freeze()] = 1
# Print some summary statistics
print()
print(' Parser Beam | Time (secs) # Parses Average P(parse)')
print('------------------------+------------------------------------------')
for i in range(len(parsers)):
print('%18s %4d |%11.4f%11d%19.14f' % (parsers[i].__class__.__name__,
parsers[i].beam_size,
times[i],num_parses[i],average_p[i]))
parses = all_parses.keys()
if parses: p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print('------------------------+------------------------------------------')
print('%18s |%11s%11d%19.14f' % ('(All Parses)', 'n/a', len(parses), p))
if draw_parses is None:
# Ask the user if we should draw the parses.
print()
print('Draw parses (y/n)? ', end=' ')
draw_parses = sys.stdin.readline().strip().lower().startswith('y')
if draw_parses:
from nltk.draw.tree import draw_trees
print(' please wait...')
draw_trees(*parses)
if print_parses is None:
# Ask the user if we should print the parses.
print()
print('Print parses (y/n)? ', end=' ')
print_parses = sys.stdin.readline().strip().lower().startswith('y')
if print_parses:
for parse in parses:
print(parse)
示例10: draw
def draw(self):
"""
Open a new window containing a graphical diagram of this tree.
"""
from nltk.draw.tree import draw_trees
draw_trees(self)
示例11: demo
def demo():
"""
A demonstration of the probabilistic parsers. The user is
prompted to select which demo to run, and how many parses should
be found; and then each parser is run on the same demo, and a
summary of the results are displayed.
"""
import sys, time
from nltk import tokenize
from nltk.parse import ViterbiParser
from nltk.grammar import toy_pcfg1, toy_pcfg2
# Define two demos. Each demo has a sentence and a grammar.
demos = [('حرك الكرة الخضراء في أسفل الزاوية اليسرى', learned_pcfg),
('حرك الكرة', learned_pcfg),
('take the green pyramid and put it in the top left corner', learned_pcfg),
('move the pink triangle on top of the black square', learned_pcfg),
('move the red block and place it on top of the blue block that is on top of a green block', learned_pcfg),
('move the green block on top of the blue block', learned_pcfg)]
# Ask the user which demo they want to use.
print()
for i in range(len(demos)):
print('%3s: %s' % (i+1, demos[i][0]))
# print(' %r' % demos[i][1])
print()
print('Which demo (%d-%d)? ' % (1, len(demos)), end=' ')
try:
snum = int(sys.stdin.readline().strip())-1
sent, grammar = demos[snum]
except:
print('Bad sentence number')
return
# Tokenize the sentence.
tokens = sent.split()
print(grammar)
parser = ViterbiParser(grammar)
all_parses = {}
# print('\nsent: %s\nparser: %s\ngrammar: %s' % (sent,parser,grammar))
parser.trace(3)
t = time.time()
parses = parser.parse_all(tokens)
time = time.time()-t
average = (reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
if parses else 0)
num_parses = len(parses)
for p in parses:
all_parses[p.freeze()] = 1
# Print some summary statistics
print()
print('Time (secs) # Parses Average P(parse)')
print('-----------------------------------------')
print('%11.4f%11d%19.14f' % (time, num_parses, average))
parses = all_parses.keys()
if parses:
p = reduce(lambda a,b:a+b.prob(), parses, 0)/len(parses)
else: p = 0
print('------------------------------------------')
print('%11s%11d%19.14f' % ('n/a', len(parses), p))
# Ask the user if we should draw the parses.
print()
print('Draw parses (y/n)? ', end=' ')
if sys.stdin.readline().strip().lower().startswith('y'):
from nltk.draw.tree import draw_trees
print(' please wait...')
draw_trees(*parses)
# Ask the user if we should print the parses.
print()
print('Print parses (y/n)? ', end=' ')
if sys.stdin.readline().strip().lower().startswith('y'):
for parse in parses:
print(parse)
示例12: print
VP -> V NP | VP PP
Det -> 'an' | 'my'
N -> 'elephant' | 'pajamas'
V -> 'shot'
P -> 'in'
""")
sent = ['I', 'shot', 'an', 'elephant', 'in', 'my', 'pajamas']
parser = nltk.ChartParser(groucho_grammar)
for tree1 in parser.parse(sent):
print(tree1)
grammar1 = nltk.CFG.fromstring("""
S -> NP VP
VP -> V NP | V NP PP
PP -> P NP
V -> "saw" | "ate" | "walked"
NP -> "John" | "Mary" | "Bob" | Det N | Det N PP
Det -> "a" | "an" | "the" | "my"
N -> "man" | "dog" | "cat" | "telescope" | "park"
P -> "in" | "on" | "by" | "with"
""")
sent = "Mary saw Bob".split()
rd_parser = nltk.RecursiveDescentParser(grammar1)
for tree2 in rd_parser.parse(sent):
print(tree2)
from nltk.draw.tree import draw_trees
draw_trees(tree1, tree2)
示例13: Tree
return Tree(t.node, [excise_empty_nodes(st) for st in t])
# nb: returns tree with blank nodes excised
def parse_ccgbank_tree(s):
t = Tree.parse(s,
parse_node=parse_ccgbank_node,
parse_leaf=parse_ccgbank_leaf,
node_pattern=ccgbank_node_pattern,
leaf_pattern=ccgbank_leaf_pattern)
return excise_empty_nodes(t)
print
print 'parsing: ' + deriv
t = parse_ccgbank_tree(deriv)
print t
t2 = None
if deriv2 != None:
print
print 'parsing: ' + deriv2
t2 = parse_ccgbank_tree(deriv2)
print t2
print
if t2 == None:
print 'drawing tree'
draw_trees(t)
else:
print 'drawing trees'
draw_trees(t,t2)
示例14: parseAllTestXmls
def parseAllTestXmls(fileList, grammar, allTestSolutionsDict, verbose=False, displayTrees=False):
testPitchLists = []
testIntervalLists = []
totalCorrect = 0
totalCorrectNonN = 0
totalProductions = 0
totalLeaves = 0
parseTreeStrings = {}
for filepath in fileList:
curPitchList = getPitchListFromFile(filepath)
testPitchLists.append(curPitchList)
testIntervalLists.append(getIntervalStringsFromPitchList(curPitchList, verbose))
if verbose:
print(testIntervalLists[-1])
listLen = len(testIntervalLists[-1])
if verbose:
print(tree)
parser = ViterbiParser(grammar)
if verbose:
parser.trace(0)#3
else:
parser.trace(0)
try:
parses = parser.parse_all(testIntervalLists[-1])
except Exception as errorMsg:
print("error parsing file " + filepath)
print(errorMsg)
numTrees = sum(1 for _ in parses)
if numTrees > 0 and displayTrees == True:
from nltk.draw.tree import draw_trees
draw_trees(*parses)
if numTrees == 0:
print("Couldn't find a valid parse, this is bad, very very bad")
return 0,0
numCorrect = 0
numCorrectNonN = 0
bottomCorrect = 0
bottomCorrectNonN = 0
solutionTree = None
try:
solutionTreeStr = allTestSolutionsDict[filepath]
solutionTree = Tree.fromstring(solutionTreeStr)
except Exception as errorMsg:
print("couldn't find solution for file " + filepath)
print(errorMsg)
if solutionTree != None and solutionTree != '':
parseTreeStrings[filepath] = str(parses[0])
numCorrect, numCorrectNonN = validate_tree.compareTrees(solutionTree, parses[0])
numProductions = len(solutionTree.productions())
totalProductions += numProductions
#solutionTree.draw()
#parses[0].draw()
bottomCorrect, bottomCorrectNonN = validate_tree.compareTreesBottomUp(solutionTree, parses[0])
parseTreeStrings[filepath+'_afterComparison'] = str(parses[0])
totalLeaves += len(solutionTree.leaves())
#parses[0].draw()
totalCorrect += bottomCorrect
totalCorrect += numCorrect
totalCorrectNonN += numCorrectNonN
totalCorrectNonN += bottomCorrectNonN
return totalCorrect, totalCorrectNonN, totalProductions, totalLeaves, parseTreeStrings
示例15: make
def make(op):
if op == 0:
for name in case.keys():
if name != "Corpus":
print name
print len(inst[name].grammar.productions()), "producciones en grammar"
print len(inst[name].parser.grammar().productions()), "producciones usadas en parser"
print "---"
elif op == 1:
cant_oraciones = inst["Corpus"].cant_oraciones()
print CR,"Cantidad de oraciones: "
print cant_oraciones
elif op == 2:
oracion_mas_larga = inst["Corpus"].oracion_mas_larga()
print CR,"Oracion mas larga:"
print len(oracion_mas_larga.split(' ')), "palabras"
print oracion_mas_larga
elif op == 3:
largo_promedio_oracion = inst["Corpus"].largo_promedio_oracion()
print CR,"Largo promedio de oración:"
print largo_promedio_oracion, "letras"
elif op == 4:
palabras_frecs = inst["Corpus"].palabras_frecs()
print CR,"Palabras frecuentes:"
print '\n\t'.join(map(str,sorted(palabras_frecs.items(),key=lambda x:x[1],reverse=True)[:20]))
elif op == 5:
palabras_frecs_cat = inst["Corpus"].palabras_frecs_cat()
print CR,"Palabras frecuentes por categoria:"
print '\n\t'.join(map(str,sorted(palabras_frecs_cat.items(),key=lambda x:len(x[1]),reverse=True)[:20]))
elif op == 6:
arbol_min_nodos = inst["Corpus"].arbol_min_nodos()
print CR,"Arbol con minima cantidad de nodos:"
print len(arbol_min_nodos.treepositions()), "nodos"
draw_trees(arbol_min_nodos)
elif op == 7:
arbol_max_nodos = inst["Corpus"].arbol_max_nodos()
print CR,"Arbol con maxima cantidad de nodos:"
print len(arbol_max_nodos.treepositions()), "nodos"
draw_trees(arbol_max_nodos)
elif op == 8:
lema = raw_input('\r'+' '*20+'\rLema > ')
if not lema: lema = "mostrar"
print "Procesando..."
arboles_con_lema = inst["Corpus"].arboles_con_lema(lema)
if arboles_con_lema:
print CR,"Arboles con lema \'",lema,"\'"
print len(arboles_con_lema), "arboles"
if raw_input("Dibujar? [s/n] ") == 's':draw_trees(*arboles_con_lema)
print "** Ejemplo oracion **"
print ' '.join(arboles_con_lema[randint(0,len(arboles_con_lema)-1)].leaves())
else: print CR,"No hay arboles con lema \'",lema,"\'"
elif op == 9:
print CR,len(inst["PCFG"].reglas_no_lexicas()), "reglas no lexicas"
elif op == 10:
print CR,len(inst["PCFG"].categorias_lexicas()), "categorias lexicas"
elif op == 11:
cat = raw_input(CR+'Categoria > ')
if not cat: cat = "vmip3s0"
print CR,len(inst["PCFG"].reglas_lexicas(cat)), "reglas lexicas con categoria \'",cat,"\'"
elif op == 12:
sent = inst["PCFG"].sents[0]
parsed = inst["PCFG"].parse(sent)
elif op == 13:
sent = inst["PCFG"].sents[1]
parsed = inst["PCFG"].parse(sent)
elif op == 14:
sent = inst["PCFG"].sents[2]
parsed = inst["PCFG"].parse(sent)
elif op == 15:
sent = inst["PCFG_UNK"].sents[0]
parsed = inst["PCFG_UNK"].parse(sent)
elif op == 16:
sent = inst["PCFG_UNK"].sents[1]
parsed = inst["PCFG_UNK"].parse(sent)
elif op == 17:
sent = inst["PCFG_LEX"].sents[0]
parsed = inst["PCFG"].parse(sent)
elif op == 18:
sent = inst["PCFG_LEX"].sents[0]
parsed = inst["PCFG_LEX"].parse(sent)
elif op == 19:
sent = inst["PCFG_LEX_VERB"].sents[0]
parsed = inst["PCFG_LEX"].parse(sent)
elif op == 20:
sent = inst["PCFG_LEX_VERB"].sents[1]
parsed = inst["PCFG_LEX"].parse(sent)
elif op == 21:
sent = inst["PCFG_LEX_VERB"].sents[0]
parsed = inst["PCFG_LEX_VERB"].parse(sent)
elif op == 22:
sent = inst["PCFG_LEX_VERB"].sents[1]
parsed = inst["PCFG_LEX_VERB"].parse(sent)
elif op == 23:
sent = inst["PCFG_LEX_VERB"].sents[2]
parsed = inst["PCFG_LEX"].parse(sent)
elif op == 24:
sent = inst["PCFG_LEX_VERB"].sents[2]
parsed = inst["PCFG_LEX_VERB"].parse(sent)
else:
print "Comando no valido!"
#.........这里部分代码省略.........