当前位置: 首页>>代码示例>>Python>>正文


Python TextBlob.parse方法代码示例

本文整理汇总了Python中textblob.TextBlob.parse方法的典型用法代码示例。如果您正苦于以下问题:Python TextBlob.parse方法的具体用法?Python TextBlob.parse怎么用?Python TextBlob.parse使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在textblob.TextBlob的用法示例。


在下文中一共展示了TextBlob.parse方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: getEntities

# 需要导入模块: from textblob import TextBlob [as 别名]
# 或者: from textblob.TextBlob import parse [as 别名]
def getEntities(parser, tweet, xEntities):
	try:
		spacyParsedObject = parser(tweet)
		sentence =  TextBlob(tweet)
		textblobTaggedObject = sentence.parse().split()
		patterntaggedObject = tag(tweet, tokenize=True)
		for word in patterntaggedObject:
			word, wordtag=word
			if  wordtag == "NNP" or  wordtag == "NN" or  wordtag == "PRP":
				v = str(word)
				v = v.strip()
				if(v not in xEntities):	
					xEntities[v]=str(wordtag)						
		for taggedObject in textblobTaggedObject:
			for word in taggedObject:
				word, wordtag=word[0], word[1]
				if wordtag == "NNP" or wordtag == "NN" or wordtag == "PRP":
					v = str(word)
					v = v.strip()
					if(v not in xEntities):	
						xEntities[v]=str(wordtag)
		for word in spacyParsedObject:
			if word.tag_ == "NNP" or word.tag_ == "NN" or word.tag_ == "PRP":
				v = str(word)
				v = v.strip()
				if(v not in xEntities):	
					xEntities[v]=str(word.tag_)
		return xEntities
	except Exception as e:
		return e
		
开发者ID:project-spinoza-dev,项目名称:tsakpy,代码行数:32,代码来源:getEntities.py

示例2: tag_documents_text

# 需要导入模块: from textblob import TextBlob [as 别名]
# 或者: from textblob.TextBlob import parse [as 别名]
def tag_documents_text(client):

	documents = client['cornell']['documents']
	for doc in documents.find():
		blob = TextBlob(doc['text'], pos_tagger=PerceptronTagger())
		parsed_blob = blob.parse()
		documents.update({'name':doc['name']},{'$set':{'parsed_perceptron':parsed_blob}})
开发者ID:matheuscas,项目名称:fuzzy_opinion_mining,代码行数:9,代码来源:model.py

示例3: extract_trigrams

# 需要导入模块: from textblob import TextBlob [as 别名]
# 或者: from textblob.TextBlob import parse [as 别名]
def extract_trigrams(client):
	documents = client['cornell']['documents']

	for doc in documents.find():
		blob = TextBlob(doc['text'])
		valid_trigrams = []
		for s in blob.sentences:
			sentence = TextBlob(s.dict['raw'])
			sentence = TextBlob(sentence.parse())
			trigrams = sentence.ngrams(n=3)
			valid_trigrams = valid_trigrams + get_valid_trigrams(trigrams)
		documents.update({'name':doc['name']},{'$set':{'trigrams':valid_trigrams}})
开发者ID:matheuscas,项目名称:fuzzy_opinion_mining,代码行数:14,代码来源:model.py

示例4: TextBlob

# 需要导入模块: from textblob import TextBlob [as 别名]
# 或者: from textblob.TextBlob import parse [as 别名]
# maybe need more than two headlines
# print sys.argv[1]
# print sys.argv[2]

# headlines 1 and 2 - analyze, mix and send back to node
# blob = TextBlob(sys.argv[1])
# # print blob.tags
# blob2 = TextBlob(sys.argv[2])
# print blob2.tags

for i, val in enumerate(news):
   headline = news[i]['title']
   headlines.append(headline)
   headblob = TextBlob(headline, np_extractor=extractor)
   headblobs.append(headblob.noun_phrases)
   parsed = headblob.parse()
   headParsed.append(parsed)



# for item in headParsed:
#    print item

# get the first noun phrase from each headline and swap them
# grab a random noun phrase from each headline
h1i = int(random.random()*20)
h1 = headlines[h1i]
r1 = int(random.random()*len(headblobs[h1i]))
np1 = headblobs[h1i][r1]
# capitalize the noun phrase
# np1 = ' '.join(word[0].upper() + word[1:] for word in np1.split())
开发者ID:studiocjp,项目名称:lsf,代码行数:33,代码来源:headline.py

示例5: parse_second

# 需要导入模块: from textblob import TextBlob [as 别名]
# 或者: from textblob.TextBlob import parse [as 别名]
    except:
        first_attempt = ""
    if first_attempt != "":
        return first_attempt
    try:
        second_attempt = parse_second(q, bigblob, uncommon, mode)
    except:
        second_attempt = ""
    if second_attempt != "":
        return second_attempt
    third_attempt = b.backup_answer(q, n.nps, raw)
    if third_attempt != "":
        return third_attempt
    if len(n.nps) > 0:
        return n.nps[0]
    else:
        return "Yes" #guess

if __name__ == "__main__":
    q = raw_input("Ask a question\n")
    q = TextBlob(q, np_extractor=extractor)
    print q.noun_phrases
    noun_phrases, idxs = n.get_nps_from_blob(q)
    print noun_phrases
    print q.words
    first =  noun_phrases[0]
    print n.get_np_tags(first, q)
    print q.tags
    print q.parse()
    #print p.extract_generic_relations(q)
开发者ID:tomshen,项目名称:sherlock,代码行数:32,代码来源:question_answer_util.py

示例6: TextBlob

# 需要导入模块: from textblob import TextBlob [as 别名]
# 或者: from textblob.TextBlob import parse [as 别名]
from nltk import Tree
from nltk.grammar import CFG
from nltk.parse.generate import generate, demo_grammar
from nltk import CFG
import string , re
import wordpolarity

s = "I do bad things for good people."
mystring = s.translate(None , string.punctuation)
#print s.parse()

b = TextBlob(mystring)
print b.sentiment.polarity


g = str(b.parse())
x = g.split()

word_list = []

mystr = mystring.split()

space_list = x
main_list = []
#print (space_list)

for word in space_list:
    new_list = word.split("/")
    main_list.append(new_list)

a={}
开发者ID:ak795,项目名称:Academic-Project,代码行数:33,代码来源:situation.py

示例7: check_sarc

# 需要导入模块: from textblob import TextBlob [as 别名]
# 或者: from textblob.TextBlob import parse [as 别名]
def check_sarc(tweet):
	blob = TextBlob(tweet, parser=PatternParser())
	tokens = blob.parse().split(' ')
	dic = defaultdict(list)	# stores all phrases by category
	temp = ''
	phrases = []	# list of all phrases
	for t in tokens:
		if t.split('/')[2] == 'O':
			if temp:
				phrases.append((ctag,temp))			
			dic[t.split('/')[2]].append(temp)
			temp = t.split('/')[0]+' '
			ctag = t.split('/')[2]
		elif 'B-' in t.split('/')[2]:
			if temp:
				phrases.append((ctag,temp))
			temp = t.split('/')[0]+' '
			dic[t.split('/')[2].split('-')[1]].append(temp)
			ctag = t.split('/')[2].split('-')[1]
		elif 'I-' in t.split('/')[2]:
			dic[t.split('/')[2].split('-')[1]][-1] += t.split('/')[0]+' '
			temp += t.split('/')[0]+' '
			ctag = t.split('/')[2].split('-')[1]
		else:
			pass
	if temp:
		phrases.append((ctag,temp))
	SF = []
	sf = []
	for i in phrases:
		if i[0] in ['NP','ADjP']:
			SF.append(i[1])
		elif i[0]=='VP':
			sf.append(i[1])
	for i in range(len(phrases)-1):
		if phrases[i][0]=='NP' and phrases[i+1][0]=='VP':
			SF.append(phrases[i][1]+' '+phrases[i+1][1])
		elif phrases[i][0]=='ADVP' and phrases[i+1][0]=='VP':
			sf.append(phrases[i][1]+' '+phrases[i+1][1])
		elif phrases[i][0]=='VP' and phrases[i+1][0]=='ADVP':
			sf.append(phrases[i][1]+' '+phrases[i+1][1])
		elif phrases[i][0]=='ADJP' and phrases[i+1][0]=='VP':
			sf.append(phrases[i][1]+' '+phrases[i+1][1])
		elif phrases[i][0]=='VP' and phrases[i+1][0]=='NP':
			sf.append(phrases[i][1]+' '+phrases[i+1][1])
	for i in range(len(phrases)-2):
		if phrases[i][0]=='VP' and phrases[i+1][0]=='ADVP' and phrases[i+2][0]=='ADJP':
			sf.append(phrases[i][1]+' '+phrases[i+1][1]+' '+phrases[i+1][1])
		elif phrases[i][0]=='VP' and phrases[i+1][0]=='ADJP' and phrases[i+2][0]=='NP':
			sf.append(phrases[i][1]+' '+phrases[i+1][1]+' '+phrases[i+2][1])
		elif phrases[i][0]=='ADVP' and phrases[i+1][0]=='ADJP' and phrases[i+2][0]=='NP':
			sf.append(phrases[i][1]+' '+phrases[i+1][1]+' '+phrases[i+2][1])
	print SF
	print sf	
	PSF = []
	NSF = []
	psf = []
	nsf = []
	for i in SF:
		blob = TextBlob(i)
		if blob.polarity > 0:
			PSF.append(i)
		elif blob.polarity < 0:
			NSF.append(i)
		elif blob.polarity == 0:
			pass
	for i in sf:
		blob = TextBlob(i)
		if blob.polarity > 0:
			psf.append(i)
		elif blob.polarity < 0:
			psf.append(i)
		elif blob.polarity == 0:
			pass	
	print PSF
	print NSF
	print psf
	print nsf
	if (PSF and nsf) or (psf and NSF):
		return 1
	else:
		return 0	
开发者ID:rkp768,项目名称:BigSarc,代码行数:84,代码来源:pblga.py

示例8: analyze

# 需要导入模块: from textblob import TextBlob [as 别名]
# 或者: from textblob.TextBlob import parse [as 别名]
 def analyze(self, text):
     text = TextBlob(text)
     return text.parse()
开发者ID:benhoff,项目名称:languageprocessing,代码行数:5,代码来源:parser.py


注:本文中的textblob.TextBlob.parse方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。