当前位置: 首页>>代码示例>>Python>>正文


Python nltk.parse_cfg函数代码示例

本文整理汇总了Python中nltk.parse_cfg函数的典型用法代码示例。如果您正苦于以下问题:Python parse_cfg函数的具体用法?Python parse_cfg怎么用?Python parse_cfg使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了parse_cfg函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: contextFreeGrammar

def contextFreeGrammar():
    print "page 298 Context-Free Grammar"
    print "=============== A Simple Grammar ==============="
    grammar1 = nltk.parse_cfg("""
        S -> NP VP
        VP -> V NP | V NP PP  
        PP -> P NP  
        V -> "saw" | "ate" | "walked"  
        NP -> "John" | "Mary" | "Bob" | Det N | Det N PP  
        Det -> "a" | "an" | "the" | "my"  
        N -> "man" | "dog" | "cat" | "telescope" | "park"  
        P -> "in" | "on" | "by" | "with"  
        """)
    sent = "Mary saw Bob".split()
    rd_parser = nltk.RecursiveDescentParser(grammar1)
    for tree in rd_parser.nbest_parse(sent):
        print tree

    print "=============== Writing Your Own Grammars ==============="
    grammar1 = nltk.data.load('file:mygrammar.cfg')
    sent = "Mary saw Bob".split()
    rd_parser = nltk.RecursiveDescentParser(grammar1)
    for tree in rd_parser.nbest_parse(sent):
        print tree

    print "=============== Recursion in Syntactic Structure ==============="
    grammar2 = nltk.parse_cfg("""  S  -> NP VP  NP -> Det Nom | PropN  Nom -> Adj Nom | N  VP -> V Adj | V NP | V S | V NP PP  PP -> P NP  PropN -> 'Buster' | 'Chatterer' | 'Joe'  Det -> 'the' | 'a'  N -> 'bear' | 'squirrel' | 'tree' | 'fish' | 'log'  Adj  -> 'angry' | 'frightened' |  'little' | 'tall'  V ->  'chased'  | 'saw' | 'said' | 'thought' | 'was' | 'put'  P -> 'on'  """)
开发者ID:hbdhj,项目名称:python,代码行数:27,代码来源:chapter8.py

示例2: cfgMatch

def cfgMatch ( nlQuery ):
    terminalList = [ 'find','search','display','tell','faculty','student','staff','other' ]
    grammar = nltk.parse_cfg("""
                    S -> A B
                    A -> 'find'|'search'|'display'|'tell'
                    B -> 'faculty'|'student'|'staff'|'other'
                    """)
# Since grammar crashes if a non term not in grammar is used.
# We have to check and report error if such a word is used anywhere
##################################################################
# Check and errors reporting here
    tokenizedList = list( word_tokenize( nlQuery  ) )
    for word in tokenizedList:
        if word not in terminalList:
            print "ERROR"
            return -1
##################################################################
    parser = nltk.RecursiveDescentParser ( grammar )
    parseTree = parser.nbest_parse ( tokenizedList, 1 )

    for tree in parseTree:
        print tree
        for elem in tree:
            for i in tree.node:
                print i
开发者ID:amoghtolay,项目名称:iitgminiUID,代码行数:25,代码来源:queryParser.py

示例3: generate_grammar

def generate_grammar(sentence):
    grammar = "\n".join([r for r, freq in frequent_rules])
    for (word, pos_tag) in sentence:
        grammar += "%s -> '%s' \n" %(pos_tag, word)

    #print grammar
    return nltk.parse_cfg(grammar)
开发者ID:phdowling,项目名称:CompLingApplications,代码行数:7,代码来源:parseNP.py

示例4: demo

def demo():
    from nltk import Nonterminal, parse_cfg
    nonterminals = 'S VP NP PP P N Name V Det'
    (S, VP, NP, PP, P, N, Name, V, Det) = [Nonterminal(s)
                                           for s in nonterminals.split()]

    grammar = parse_cfg("""
    S -> NP VP
    PP -> P NP
    NP -> Det N
    NP -> NP PP
    VP -> V NP
    VP -> VP PP
    Det -> 'a'
    Det -> 'the'
    Det -> 'my'
    NP -> 'I'
    N -> 'dog'
    N -> 'man'
    N -> 'park'
    N -> 'statue'
    V -> 'saw'
    P -> 'in'
    P -> 'up'
    P -> 'over'
    P -> 'with'
    """)

    def cb(grammar): print(grammar)
    top = Tk()
    editor = CFGEditor(top, grammar, cb)
    Label(top, text='\nTesting CFG Editor\n').pack()
    Button(top, text='Quit', command=top.destroy).pack()
    top.mainloop()
开发者ID:Arttii,项目名称:TextBlob,代码行数:34,代码来源:cfg.py

示例5: parseSimile

def parseSimile(tokensWithIndices):
    #The grammar used to check a simile
    grammar = nltk.parse_cfg("""
    S -> NP "like" NP | "ADJ" "like" "NP" | NP V "like" NP | "EX" "like" "NP" | NP "as" "ADJ" "as" NP | V "as" "ADJ" "as" NP |OTH
    NP -> N | "ADJ" N | "DET" NP 
    N -> "NP" | "PRO" | "N"
    V -> "VD" | "V" | "VG"
    OTH -> "OTH" "PUNC" "FW" "WH" "TO" "NUM" "ADV" "VD" "VG" "L" "VN" "N" "P" "S" "EX" "V" "CNJ" "UH" "PRO" "MOD"  
    """)  
    tokens = map(lambda i: i[0], tokensWithIndices)
    indices = map(lambda i: i[1], tokensWithIndices)
    parser = nltk.ChartParser(grammar)
    simile_indices = []
    start_token = 0
    while (start_token < len(tokens) - 2):
        end_token = start_token + 2 #can't have simile smaller than 4 words
        simile = False
        while ( (not simile) and (end_token <= len(tokens))):
            if (len(parser.nbest_parse(tokens[start_token:end_token])) > 0): #If a parse tree was formed
                simile_indices.extend(indices[start_token:end_token])
                start_token = end_token
                simile = True            
            else:    
                end_token += 1
        start_token += 1
    return simile_indices
开发者ID:HalleyYoung,项目名称:Poetry_Analysis,代码行数:26,代码来源:similes.py

示例6: parse

def parse(wordlist, grammar, generator):
    """
    Parse this thang. Call off to nltk's chart parser (which is
    the only one fast enough to parse the massive grammar). Only
    use the top best tree. If no parse tree is found, the program
    dies. The pass along the tree for actual symantic analysis,
    and then print out the parse and we're done!
    """

    import nltk

    try:
        gr = nltk.parse_cfg(grammar)
        parts = [w.reduced() for w in wordlist]

        parser = nltk.BottomUpChartParser(gr)
        trees = parser.nbest_parse(parts)

        classifiers = ClassifierCollection(generator)
        ct = 0
        for tree in trees:
            rparse(tree, classifiers, False)
            ct += 1
            break

        if ct == 0:
            raise ParserException('No parse trees found')

        classifiers.finish()
        classifiers.pprint()

    except ValueError, e:
        raise ParserException(str(e))
开发者ID:eberle1080,项目名称:newman,代码行数:33,代码来源:parser.py

示例7: test_returnRelevantTuples_2

	def test_returnRelevantTuples_2(self):
		# arrange
		testGrammar = """
S -> NP VP

VP -> VP PP
VP -> V NP
VP -> 'eats'

PP -> P NP

NP -> Det N
NP -> 'she'

V -> 'eats'

P -> 'with'

N -> 'fish'
N -> 'fork'

Det -> 'a'
"""
		grammar = nltk.parse_cfg(testGrammar)

		sent = ['she', 'eats', 'a', 'fish', 'with', 'a', 'fork']

		inst = cyk.Cyk(sent, grammar.productions())

		# act		
		inst.executeAlgorithm()

		# assert
		self.assertTrue(inst.isInGrammar())
开发者ID:roylanceMichael,项目名称:compling_571_deepprocessing_washington,代码行数:34,代码来源:test.py

示例8: __init__

	def __init__(self, cfgGrammar):
		self.pb = productionBuilder.ProductionBuilder()

		self.grammar = nltk.parse_cfg(cfgGrammar)
		self.terminalTransformProductions = []
		self.nonTerminalTransformProductions = []
		self.singleNonTerminalTransformProductions = []
开发者ID:roylanceMichael,项目名称:compling_571_deepprocessing_washington,代码行数:7,代码来源:cfgToCnfBuilder.py

示例9: __init__

    def __init__(self, blackboard, tense = "present", person = 1):
        super(SentenceExpert, self).__init__(blackboard, "Sentence Expert", tense, person,5)
        self.eva = ["be", "look", "feel"]
        self.atv = ["like", "hate", "love", "know", "need", "see"]

        """ eva - emotional verb active
            evp - emotional verb passive
            ej - emotion adjective
            en - emotional noun
            atv - attitude verb
        """
        self.grammar = nltk.parse_cfg("""
            S -> P | EP | Person ATV NP
            P -> NP VP 
            EP -> Person EVA EJ | NP EVP Pron EJ | ENP VP
            ENP ->  EN OF NP 
            NP -> Det N | Det JJ N | Det EJ JJ N | Det EJ N | Det EN
            VP -> V | V ERB | ERB V
            Det -> 'the'
            N -> 'n'
            V -> 'v' 
            EVA -> 'eva'
            EVP -> 'makes' 
            EN -> 'en'
            EJ -> 'ej'
            JJ -> 'adj'
            ERB -> 'erb'
            ATV -> 'atv'
            Person -> 'person'
            Pron -> 'pron'
            OF -> 'of'
            CC -> 'and' | 'but' | 'because' | 'so'
            """)
开发者ID:JoannaMisztalRadecka,项目名称:Blackboard-Poetry-Generator,代码行数:33,代码来源:SentenceExpert.py

示例10: build_grammar

    def build_grammar(self):
        '''Use the corpus data and return a NLTK grammar.'''

        grammer_def = self.build_grammar_text().getvalue()
        grammar = nltk.parse_cfg(grammer_def.encode('utf8'))

        return grammar
开发者ID:chfoo,项目名称:CompFacts,代码行数:7,代码来源:grammar.py

示例11: Solution_parse

def Solution_parse(args):
  try:
    print "Parser option: %s " % args.parseOption
    gstring = open('solutiongrammar.cfg', 'r').read()
    grammar1 = nltk.parse_cfg(gstring)
    #print grammar1 , '\n'
    
    if (args.parseOption == 'rd'):
      parser = nltk.RecursiveDescentParser(grammar1)
    elif(args.parseOption == 'sr'):
      parser = nltk.ShiftReduceParser(grammar1)
    elif(args.parseOption == 'ec'):
      parser = nltk.parse.EarleyChartParser(grammar1)
    elif(args.parseOption == 'td'):
      parser = nltk.parse.TopDownChartParser(grammar1)
    elif(args.parseOption == 'bu'):
      parser = nltk.parse.BottomUpChartParser(grammar1)
    else:
      raise Exception("Unknown parseOption: %s" % args.parseOption)

    i = 0
    for line in open('inputfile.txt','r'):
      i += 1
      pass
      if i == 1:
        print line
        sent = wordpunct_tokenize(line)
        print sent , '\n'
        pp = parser.parse(sent)
        print pp, '\n'
        pass

  except Exception, err:
    sys.stderr.write('ERROR: %s\n' % str(err))
    raise
开发者ID:jtmhom88,项目名称:LING571_HW1,代码行数:35,代码来源:multiparser.py

示例12: test_ctor

	def test_ctor(self):
		# arrange
		testGrammar = """
S -> NP VP

VP -> VP PP
VP -> V NP
VP -> 'eats'

PP -> P NP

NP -> Det N
NP -> 'she'

V -> 'eats'

P -> 'with'

N -> 'fish'
N -> 'fork'

Det -> 'a'
"""
		grammar = nltk.parse_cfg(testGrammar)

		sent = ['she', 'eats', 'a', 'fish', 'with', 'a', 'fork']

		# act
		inst = cyk.Cyk(sent, grammar.productions())

		# assert
		self.assertTrue(inst != None)
		self.assertTrue(inst.sentence == sent)
开发者ID:roylanceMichael,项目名称:compling_571_deepprocessing_washington,代码行数:33,代码来源:test.py

示例13: demo

def demo():
    """
    A demonstration of the shift-reduce parser.
    """

    from nltk import parse, parse_cfg

    grammar = parse_cfg(
        """
    S -> NP VP
    NP -> Det N | Det N PP
    VP -> V NP | V NP PP
    PP -> P NP
    NP -> 'I'
    N -> 'man' | 'park' | 'telescope' | 'dog'
    Det -> 'the' | 'a'
    P -> 'in' | 'with'
    V -> 'saw'
    """
    )

    sent = "I saw a man in the park".split()

    parser = parse.ShiftReduceParser(grammar, trace=2)
    for p in parser.nbest_parse(sent):
        print p
开发者ID:Razin-Tailor,项目名称:ChatterBot,代码行数:26,代码来源:sr.py

示例14: test

def test():
  import nltk
	grammar1 = nltk.parse_cfg("""
	
	""")
	sr_parse = nltk.Shift Reduce Parser(grammar1)

	sent = "Lee ran away home".split()
	return sr_parse.parse(sent)
开发者ID:sprotsai,项目名称:SolomiyaProtsay,代码行数:9,代码来源:8.9.18.py

示例15: __init__

 def __init__(self, blackboard, tense = "present"):
     super(RhetoricalExpert, self).__init__(blackboard, "Rhetorical Expert", tense, 3)
     self.grammar = nltk.parse_cfg("""
         S -> WHAT BE Det NP | WHY BE Det N SO JJ
         NP -> JJ N | N
         JJ -> 'adj'
         N -> 'n'
         Det -> 'the'
         BE -> 'be'
         SO -> 'so'
         WHAT -> 'what'
         WHY -> 'why'
         """)
开发者ID:JoannaMisztalRadecka,项目名称:Blackboard-Poetry-Generator,代码行数:13,代码来源:RhetoricalExpert.py


注:本文中的nltk.parse_cfg函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。