当前位置: 首页>>代码示例>>Python>>正文


Python nltk.CFG类代码示例

本文整理汇总了Python中nltk.CFG的典型用法代码示例。如果您正苦于以下问题:Python CFG类的具体用法?Python CFG怎么用?Python CFG使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了CFG类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: respondQuestion

def respondQuestion(sentence, keyWord, POS):
	if "Tell me" not in sentence:
		grammar = ""

		if POS == "NNPS" or POS == "NNS":
			grammar = CFG.fromstring("""
			S -> H-NP1 Adj VP'?' | Wh-NP VP'?'
			H-NP1 -> 'How'
			Wh-NP -> 'Who' | 'What' | 'Where' | 'What'
			Adj -> 'big' | 'small' | 'happy' | 'sad' | 'large' | 'difficult' | 'emotional' | 'old' | 'healthy' | 'strong' | 'cute' | 'hungry'
			NP -> Pronoun | Proper-Noun | Noun
			Pronoun -> 'they' | 'those'
			Proper-Noun -> '[]'
			Noun -> 'the <>'
			VP -> Verb NP  
			Verb -> 'are' 
			""")
		elif POS == "NN" or "NNP":
			grammar = CFG.fromstring("""
			S -> H-NP1 Adj VP'?' | Wh-NP VP'?'
			H-NP1 -> 'How'
			Wh-NP -> 'Who' | 'What' | 'Where' | 'What'
			Adj -> 'big' | 'small' | 'happy' | 'sad' | 'large' | 'difficult' | 'emotional' | 'old' | 'healthy' | 'strong' | 'cute' | 'hungry'
			NP -> Pronoun | Proper-Noun | Noun
			Pronoun -> 'it' | 'that'
			Proper-Noun -> '[]'
			Noun -> 'the <>'
			VP -> Verb NP  
			Verb -> 'is' 
			""")

		rand_sent_list = []
                response = ""
		for sentence in generate(grammar):
		    rand_sent_list.append(' '.join(sentence))
		while True:
			num = randint(0, len(rand_sent_list)-1)
			response = rand_sent_list[num]
			if "<>" in response and (POS == "NNS" or POS == "NN"):
				index = response.index("<>")
				response = response[:index] + keyWord + response[index+2:]
				break
			if "[]" in response and (POS == "NNPS" or POS == "NNP"):
				index = response.index("[]")
				response = response[:index] + keyWord + response[index+2:]
				break
			if "<>" not in response and "[]" not in response:
				break
		return response
	else:
		knowledgeRep(sentence)
开发者ID:Roceso1337,项目名称:FriendBot,代码行数:51,代码来源:parser.py

示例2: Tweet_content1

def Tweet_content1():
  grammar = CFG.fromstring(demo_grammar)

  for sentence in generate(grammar, n=4): """generating sentence of 4 words depth"""
    print(' '.join(sentence))
    
    return sentence
开发者ID:BelhalK,项目名称:twitterbot,代码行数:7,代码来源:Tweet_content1.py

示例3: __init__

    def __init__(self, phonemes=None, onset=None, coda=None):
        self.phonemes = phonemes or Phoneme()

        # use CFG to structure syllables
        if onset == None: # optional onset
            onset = 'C | C C | \' \''
        elif onset: # mandatory onset
            onset = 'C | C C'
        else: # no onset
            onset = '\' \''

        if coda == None: # optional coda
            coda = 'C | \' \''
        elif coda: # mandatory coda
            coda = 'C'
        else: # no coda
            coda = '\' \''
        # nucleus is always present

        # based on the "typical model"
        grammar = '''
        S -> O V K
        O -> %s
        K -> %s
        C -> \'c\'
        V -> \'v\'
        ''' % (onset, coda)
        self.grammar = CFG.fromstring(grammar)
        self.syllables = self.generate_syllables()
开发者ID:mouse-reeve,项目名称:langmaker,代码行数:29,代码来源:syllable.py

示例4: gen_grammar3_past_plural

def gen_grammar3_past_plural(verb, direct_object, count):
	g1 ="""
	S -> W TR SUB V '?' | WA TR SUB V DO '?' 
	W -> 'who' | 'what' | 'when' | 'where' | 'why' | 'how'
	WA -> 'when' | 'where' | 'why' | 'how'
	TR -> 'have'
	SUB -> PRO
	PRO -> 'they' |'you'
	V -> '%s'
	DO -> 'the %s'
	"""%(verb, direct_object)
	grammar1 = CFG.fromstring(g1)
	multiplier = 0
	with open('sentences.csv', 'ab') as csvwriter:
		writer = csv.writer(csvwriter)
		for sentence in generate(grammar1, n=999):
			if sentence.find('who') == 0:
				multiplier = 1
			if sentence.find('what') == 0:
				multiplier = 1
			if sentence.find('when') == 0:
				multiplier = 2
			if sentence.find('where') == 0:
				multiplier = 2
			if sentence.find('why') == 0:
				multiplier = 4
			if sentence.find('how') == 0:
				multiplier = 4
			writer.writerow((' '.join(sentence) , multiplier*count))
开发者ID:danielzuot,项目名称:honeyencryption,代码行数:29,代码来源:message_generator.py

示例5: demo

def demo():
    """
    A demonstration of the recursive descent parser.
    """

    from nltk import parse, CFG

    grammar = CFG.fromstring("""
    S -> NP VP
    NP -> Det N | Det N PP
    VP -> V NP | V NP PP
    PP -> P NP
    NP -> 'I'
    N -> 'man' | 'park' | 'telescope' | 'dog'
    Det -> 'the' | 'a'
    P -> 'in' | 'with'
    V -> 'saw'
    """)

    for prod in grammar.productions():
        print(prod)

    sent = 'I saw a man in the park'.split()
    parser = parse.RecursiveDescentParser(grammar, trace=2)
    for p in parser.parse(sent):
        print(p)
开发者ID:Weiming-Hu,项目名称:text-based-six-degree,代码行数:26,代码来源:recursivedescent.py

示例6: demo

def demo():
    """
    A demonstration of the shift-reduce parser.
    """

    from nltk import parse, CFG

    grammar = CFG.fromstring(
        """
    S -> NP VP
    NP -> Det N | Det N PP
    VP -> V NP | V NP PP
    PP -> P NP
    NP -> 'I'
    N -> 'man' | 'park' | 'telescope' | 'dog'
    Det -> 'the' | 'a'
    P -> 'in' | 'with'
    V -> 'saw'
    """
    )

    sent = 'I saw a man in the park'.split()

    parser = parse.ShiftReduceParser(grammar, trace=2)
    for p in parser.parse(sent):
        print(p)
开发者ID:prz3m,项目名称:kind2anki,代码行数:26,代码来源:shiftreduce.py

示例7: setUp

 def setUp(self):
     if not exists(self.LEXICON_FILE_NAME):
         self.skipTest("Unable to find file {} as lexicon".format(
             self.LEXICON_FILE_NAME))
     if not exists(self.GRAMMAR_FILE_NAME):
         self.skipTest("Unable to find file {} as grammar".format(
             self.GRAMMAR_FILE_NAME))
     assert exists(self.PARSE_TREES_FILE_NAME)
     
     valid,lexiconText = q1utils.sanitizeAndValidateLexicon(
         self.LEXICON_FILE_NAME)
     if not valid:
         self.skipTest("Lexicon {} is invalid.".format(
             self.LEXICON_FILE_NAME))
     
     valid,grammarText = q1utils.sanitizeAndValidateGrammar(
         self.GRAMMAR_FILE_NAME)
     if not valid:
         self.skipTest("Grammar {} is invalid.".format(
             self.GRAMMAR_FILE_NAME))
     
     allRules = grammarText + '\n' + lexiconText
     
     try:
         grammar = CFG.fromstring(allRules)
         self._parser = BottomUpChartParser(grammar)
     except Exception as e:
         self.skipTest(str(e))
开发者ID:smearle,项目名称:wrking-w-wordnet,代码行数:28,代码来源:unittest_prefix.py

示例8: get_pos_tags

def get_pos_tags(pos_tuples):
    """
    Returns the POS tags from POS tuples of (word, tag)
    Updates the grammar for unknown tags
    """

    global grammar_string
    global grammar
    global terminals

    changed_grammar = False
    pos_tags = []

    for pos_tuple in pos_tuples:
        tag = pos_tuple[1]

        if tag not in terminals:

            if tag == '\'\'':
                tag = 'APOS'

            grammar_string += ' | \'' + tag + '\''

            terminals[tag] = None
            changed_grammar = True

        pos_tags.append(tag)

    if changed_grammar:
        grammar = CFG.fromstring(grammar_string)

    return pos_tags
开发者ID:adithyap,项目名称:mlcs16,代码行数:32,代码来源:generate_n_grams.py

示例9: gen_grammar_plural

def gen_grammar_plural(verb, direct_object, count):
	try:
		verb = en.verb.present_participle(verb)
	except KeyError:
		return
	if verb != "":
		g1 ="""
		S -> WA TR SUB V DO '?' | W TR SUB V '?' 
		W -> 'who' | 'what' | 'when' | 'where' | 'why' | 'how'
		WA -> 'when' | 'where' | 'why' | 'how'
		TR -> 'are' | 'were'
		SUB -> 'they' | 'you'
		V -> '%s'
		DO -> 'the %s'
		"""%(verb, direct_object)
		grammar1 = CFG.fromstring(g1)
		multiplier = 1
		with open('sentences.csv', 'ab') as csvwriter:
			writer = csv.writer(csvwriter)
			for sentence in generate(grammar1, n=999):
				sentence = ' '.join(sentence)
				if sentence.find('who') == 0:
					multiplier = 1
				if sentence.find('what') == 0:
					multiplier = 1
				if sentence.find('when') == 0:
					multiplier = 2
				if sentence.find('where') == 0:
					multiplier = 2
				if sentence.find('why') == 0:
					multiplier = 4
				if sentence.find('how') == 0:
					multiplier = 4
				writer.writerow((' '.join(sentence) , multiplier*count))
开发者ID:danielzuot,项目名称:honeyencryption,代码行数:34,代码来源:message_generator.py

示例10: chart_parse

def chart_parse(in_file, grammar_file, out_file):
    text = unicode(open(in_file, 'r').read(), errors='ignore')
    output = open(out_file, 'w')
    grammar_string = unicode(open(grammar_file, 'r').read(), errors='ignore')
    try:
        grammar = CFG.fromstring(grammar_string)
        parser = nltk.ChartParser(grammar)
        sentences = nltk.sent_tokenize(text)
        for sentence in sentences:
            words = nltk.word_tokenize(sentence)
            tree = parser.parse(words)
            for item in tree:
                output.write(str(item))
                output.write('\n')
    except Exception, e:
        message = "Error with parsing. Check the input files are correct and the grammar contains every word in the input sequence. \n----\n" + str(e)
        sys.stderr.write(message)
        sys.exit()
开发者ID:IntersectAustralia,项目名称:hcsvlab-galaxy,代码行数:18,代码来源:g_chart_parser.py

示例11: someGrammaticalDilemmas

def someGrammaticalDilemmas():
    print "page 292 8.1  Some Grammatical Dilemmas"
    print "=============== Linguistic Data and Unlimited Possibilities ==============="
    from nltk import CFG
    groucho_grammar = CFG.fromstring(""" 
        S -> NP VP
        PP -> P NP
        NP -> Det N | Det N PP | 'I' 
        VP -> V NP | VP PP 
        Det -> 'an' | 'my' 
        N -> 'elephant' | 'pajamas' 
        V -> 'shot'
        P -> 'in'
        """)
    sent = ['I', 'shot', 'an', 'elephant', 'in', 'my', 'pajamas']
    parser = nltk.ChartParser(groucho_grammar)
    trees = parser.nbest_parse(sent)
    for tree in trees:
        print tree
开发者ID:hbdhj,项目名称:python,代码行数:19,代码来源:chapter8.py

示例12: main

def main():
    parser = argparse.ArgumentParser(description='CKY and PCKY')
    parser.add_argument('-g', '--grammar', help='Input file name', required=True)
    parser.add_argument('-s', '--sentence', help='Input sentence', required=True)
    args = parser.parse_args()

    grammar_text = None
    with open(args.grammar, 'r') as f:
        grammar_text = f.read()

    grammar = None
    result = None
    try:
        grammar = CFG.fromstring(grammar_text)
    except ValueError:
        grammar = PCFG.fromstring(grammar_text)

    if type(grammar) is CFG:
        result = cky(args.sentence, grammar)
    elif type(grammar) is PCFG:
        result = pcky(args.sentence, grammar)
开发者ID:BabisK,项目名称:M36209P,代码行数:21,代码来源:cky.py

示例13: __init__

    def __init__(self, cfg_grammar=None, origin_file='save/origin.txt', oracle_file='save/oracle.txt',
                 wi_dict='save/word_index_dict.json', iw_dict='save/index_word_dict.json',
                 sequence_length=None):
        if cfg_grammar is None:
            cfg_grammar = """
              S -> S PLUS x | S SUB x |  S PROD x | S DIV x | x | '(' S ')'
              PLUS -> '+'
              SUB -> '-'
              PROD -> '*'
              DIV -> '/'
              x -> 'x' | 'y'
            """

        self.grammar = CFG.fromstring(cfg_grammar)
        self.origin_file = origin_file
        self.oracle_file = oracle_file
        self.wi_dict = wi_dict
        self.iw_dict = iw_dict
        self.sequence_length = sequence_length
        self.vocab_size = None
        return
开发者ID:IshJ,项目名称:Texygen,代码行数:21,代码来源:OracleCfg.py

示例14: CFG_grammar

def CFG_grammar():
    GOAL_FIND,ENTITY_PLACE = nonterminals('GOAL_FIND,ENTITY_PLACE')
    usr_goal = ENTITY_PLACE
    usr_find = GOAL_FIND
    VP,NP,O = nonterminals('VP,NP,O')

    # Build a CFG based on the symbols that generated above.
    grammar = CFG.fromstring("""
    VP -> GOAL_FIND O ENTITY_PLACE | GOAL_FIND ENTITY_PLACE
    NP -> P ENTITY_PLACE | ENTITY_PLACE
    GOAL_FIND -> 'find'
    GOAL_FIND  -> 'show'
    GOAL_FIND  -> 'tell'
    O -> 'me'
    P -> 'in'
    ENTITY_PLACE -> 'starbucks'
    ENTITY_PLACE -> 'the starbucks'
    ENTITY_PLACE -> 'a starbucks'
    ENTITY_PLACE -> 'coffee bean'
    ENTITY_PLACE -> 'the coffee bean'
    ENTITY_PLACE -> 'a coffee bean'

    """)
    return grammar
开发者ID:EMCSlabs,项目名称:SpokenDialogueSystem,代码行数:24,代码来源:grammar_generator.py

示例15: generate

'''

from nltk.parse.generate import generate #, demo_grammar
from nltk import CFG


demo_grammar = """
  S -> NP VP
  NP -> Det N
  PP -> P NP
  VP -> 'slept' | 'saw' NP | 'walked' PP
  Det -> 'the' | 'a'
  N -> 'man' | 'park' | 'dog'
  P -> 'in' | 'with'
"""
grammar = CFG.fromstring(demo_grammar)
print(grammar)


#Join words and generate based off of grammar - for n 
for sentence in generate(grammar, n=12):
    print(' '.join(sentence))

'''
Notes: 
Need to symbolize the grammar
Have the machine process the language
Need to integrate with Markov chain - file 'agiliq-markov.py'
'''
for sentence in generate(grammar, depth=4):
    print(' '.join(sentence))
开发者ID:johnsonbui,项目名称:Hermes-Python-Proto,代码行数:31,代码来源:grammar_test.py


注:本文中的nltk.CFG类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。