本文整理汇总了Python中nltk.parse.load_parser函数的典型用法代码示例。如果您正苦于以下问题:Python load_parser函数的具体用法?Python load_parser怎么用?Python load_parser使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了load_parser函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, gramfile=None):
"""
:param gramfile: name of file where grammar can be loaded
:type gramfile: str
"""
self._gramfile = (gramfile if gramfile else 'grammars/book_grammars/discourse.fcfg')
self._parser = load_parser(self._gramfile)
示例2: batch_parse
def batch_parse(inputs, grammar, trace=0):
"""
Convert input sentences into syntactic trees.
:param inputs: sentences to be parsed
:type inputs: list of str
:param grammar: L{FeatureGrammar} or name of feature-based grammar
:rtype: dict
:return: a mapping from input sentences to a list of L{Tree}s
"""
# put imports here to avoid circult dependencies
from nltk.grammar import FeatureGrammar
from nltk.parse import FeatureChartParser, load_parser
if isinstance(grammar, FeatureGrammar):
cp = FeatureChartParser(grammar)
else:
cp = load_parser(grammar, trace=trace)
parses = []
for sent in inputs:
tokens = sent.split() # use a tokenizer?
syntrees = cp.nbest_parse(tokens)
parses.append(syntrees)
return parses
示例3: parse_sents
def parse_sents(inputs, grammar, trace=0):
"""
Convert input sentences into syntactic trees.
:param inputs: sentences to be parsed
:type inputs: list(str)
:param grammar: ``FeatureGrammar`` or name of feature-based grammar
:type grammar: nltk.grammar.FeatureGrammar
:rtype: list(nltk.tree.Tree) or dict(list(str)): list(Tree)
:return: a mapping from input sentences to a list of ``Tree``s
"""
# put imports here to avoid circult dependencies
from nltk.grammar import FeatureGrammar
from nltk.parse import FeatureChartParser, load_parser
if isinstance(grammar, FeatureGrammar):
cp = FeatureChartParser(grammar)
else:
cp = load_parser(grammar, trace=trace)
parses = []
for sent in inputs:
tokens = sent.split() # use a tokenizer?
syntrees = list(cp.parse(tokens))
parses.append(syntrees)
return parses
示例4: hole_readings
def hole_readings(sentence, grammar_filename=None, verbose=False):
if not grammar_filename:
grammar_filename = 'grammars/sample_grammars/hole.fcfg'
if verbose: print 'Reading grammar file', grammar_filename
parser = load_parser(grammar_filename)
# Parse the sentence.
tokens = sentence.split()
trees = parser.nbest_parse(tokens)
if verbose: print 'Got %d different parses' % len(trees)
all_readings = []
for tree in trees:
# Get the semantic feature from the top of the parse tree.
sem = tree.node['SEM'].simplify()
# Print the raw semantic representation.
if verbose: print 'Raw: ', sem
# Skolemize away all quantifiers. All variables become unique.
while isinstance(sem, logic.LambdaExpression):
sem = sem.term
skolemized = skolemize(sem)
if verbose: print 'Skolemized:', skolemized
# Break the hole semantics representation down into its components
# i.e. holes, labels, formula fragments and constraints.
hole_sem = HoleSemantics(skolemized)
# Maybe show the details of the semantic representation.
if verbose:
print 'Holes: ', hole_sem.holes
print 'Labels: ', hole_sem.labels
print 'Constraints: ', hole_sem.constraints
print 'Top hole: ', hole_sem.top_hole
print 'Top labels: ', hole_sem.top_most_labels
print 'Fragments:'
for (l,f) in hole_sem.fragments.items():
print '\t%s: %s' % (l, f)
# Find all the possible ways to plug the formulas together.
pluggings = hole_sem.pluggings()
# Build FOL formula trees using the pluggings.
readings = map(hole_sem.formula_tree, pluggings)
# Print out the formulas in a textual format.
if verbose:
for i,r in enumerate(readings):
print
print '%d. %s' % (i, r)
print
all_readings.extend(readings)
return all_readings
示例5: sentence_analysis
def sentence_analysis(sent, out=True):
if out:
cp = parse.load_parser('pt_grammar.fcfg', trace=1)
else:
cp = parse.load_parser('pt_grammar.fcfg', trace=0)
san = sent.strip(',.').lower()
tokens = san.split()
try:
trees = cp.parse(tokens)
for tree in trees:
if out:
print(tree)
return True
except:
if out:
print("Esta sentenca nao e valida ou a gramatica ainda nao esta completa...")
return False
示例6: demo
def demo():
cp = parse.load_parser('file:rdf.fcfg', trace=0)
tokens = 'list the actors in the_shining'.split()
trees = cp.nbest_parse(tokens)
tree = trees[0]
semrep = sem.root_semrep(tree)
trans = SPARQLTranslator()
trans.translate(semrep)
print trans.query
示例7: parse_with_bindops
def parse_with_bindops(sentence, grammar=None, trace=0):
"""
Use a grammar with Binding Operators to parse a sentence.
"""
if not grammar:
grammar = 'grammars/book_grammars/storage.fcfg'
parser = load_parser(grammar, trace=trace, chart_class=InstantiateVarsChart)
# Parse the sentence.
tokens = sentence.split()
return parser.nbest_parse(tokens)
示例8: __init__
def __init__(self, gramfile=None):
"""
:param gramfile: name of file where grammar can be loaded
:type gramfile: str
"""
if gramfile is None:
self._gramfile = "grammars/book_grammars/discourse.fcfg"
else:
self._gramfile = gramfile
self._parser = load_parser(self._gramfile)
示例9: load_parser
'''
Created on 09 Ara 2012
@author: burakkerim
'''
import sys
from nltk.parse import load_parser
cp = load_parser('file:extended.fcfg')
sentences = [
#----------------------------------
# POSITIVES - already covered by the grammar
#----------------------------------
## ' ALREADY POSITIVES',
## 'Mary likes John',
## 'a boy disappeared',
## 'John eats sandwiches',
## 'a boy finds cats',
## 'the boy finds cats',
## 'Kim believes John likes her',
## 'the students vanished with the telescope',
## 'every woman likes John',
## 'Kim believes John likes her',
#----------------------------------
# MISSING - add these to the grammar
## #----------------------------------
## ' POSITIVES',
'the dog chased the cat which ate the mouse',
'people chase Sue who ate the unicorn which Tom saw',
示例10: mute
from nltk import *
from nltk.corpus import machado
from nltk import grammar, parse
from nltk.parse.featurechart import InstantiateVarsChart
sent_tokenizer=nltk.data.load('tokenizers/punkt/portuguese.pickle')
raw_text1 = machado.raw('romance/marm05.txt')
raw_text2 = machado.raw('romance/marm04.txt')
raw_text3 = machado.raw('romance/marm03.txt')
ptext1 = nltk.Text(machado.words('romance/marm01.txt'))
ptext2 = nltk.Text(machado.words('romance/marm02.txt'))
ptext3 = nltk.Text(machado.words('romance/marm03.txt'))
ptext4 = nltk.Text(machado.words('romance/marm04.txt'))
cp = parse.load_parser('grammars/book_grammars/feat0.fcfg', trace=1)
stemmer = nltk.stem.RSLPStemmer()
## Checking version of the benchmarking
if 'PyPy' in sys.version:
version = 'PyPy {}'.format(sys.version)
else:
version = 'CPython {}'.format(sys.version)
report.setup('PyPy' in version)
def mute():
sys.stdout = codecs.open('/dev/null','w','utf8') #use codecs to avoid decoding errors
def unmute():
sys.stdout = sys.__stdout__
示例11: hole_readings
def hole_readings(sentence, grammar_filename=None, verbose=False):
if not grammar_filename:
grammar_filename = "grammars/sample_grammars/hole.fcfg"
if verbose:
print("Reading grammar file", grammar_filename)
parser = load_parser(grammar_filename)
# Parse the sentence.
tokens = sentence.split()
trees = parser.nbest_parse(tokens)
if verbose:
print("Got %d different parses" % len(trees))
all_readings = []
for tree in trees:
# Get the semantic feature from the top of the parse tree.
sem = tree.label()["SEM"].simplify()
# Print the raw semantic representation.
if verbose:
print("Raw: ", sem)
# Skolemize away all quantifiers. All variables become unique.
while isinstance(sem, LambdaExpression):
sem = sem.term
skolemized = skolemize(sem)
if verbose:
print("Skolemized:", skolemized)
# Break the hole semantics representation down into its components
# i.e. holes, labels, formula fragments and constraints.
hole_sem = HoleSemantics(skolemized)
# Maybe show the details of the semantic representation.
if verbose:
print("Holes: ", hole_sem.holes)
print("Labels: ", hole_sem.labels)
print("Constraints: ", hole_sem.constraints)
print("Top hole: ", hole_sem.top_hole)
print("Top labels: ", hole_sem.top_most_labels)
print("Fragments:")
for (l, f) in hole_sem.fragments.items():
print("\t%s: %s" % (l, f))
# Find all the possible ways to plug the formulas together.
pluggings = hole_sem.pluggings()
# Build FOL formula trees using the pluggings.
readings = list(map(hole_sem.formula_tree, pluggings))
# Print out the formulas in a textual format.
if verbose:
for i, r in enumerate(readings):
print()
print("%d. %s" % (i, r))
print()
all_readings.extend(readings)
return all_readings
示例12: sorted
tbwc = tb.word_counts
srtd = sorted(tbwc, key=tbwc.get, reverse=True)
for w in srtd:
if not w in fnagl:
notinlist.append(w)
with open(r'notingloss.txt', 'w', encoding='utf-8') as f:
for w in notinlist:
print(w, file=f)
if (False):
from nltk import grammar, parse
sent = ' to 1·5–2·3 cm. tall'
tokens = ['to', '15', '-', '23', 'cm', '.', 'in', 'diam.']
# tokens = ['to','23','m','tall']
cp = parse.load_parser('../resources/simplerange.fcfg', trace=2)
trees = cp.parse(tokens)
for tree in trees:
print(tree)
if (False):
import linkgrammar as lg
sents = re.split(r'(?<=\.)\s+(?=[A-Z])|;\s+', testtext)
p = lg.Parser(lang="en", verbosity=1, max_null_count=10)
for sent in sents:
print(sent)
linkages = p.parse_sent(sent)
for linkage in linkages[0:1]:
print(linkage.num_of_links, linkage.constituent_phrases_nested)
示例13: nbest_parse
def nbest_parse(self, xx):
parser = parse.load_parser('file:hw2.fcfg', trace =2)
wordlist = xx.split()
tree = parser.nbest_parse(wordlist)
for a in tree : print a