本文整理汇总了Python中glove.Glove.getVecOfTweet方法的典型用法代码示例。如果您正苦于以下问题:Python Glove.getVecOfTweet方法的具体用法?Python Glove.getVecOfTweet怎么用?Python Glove.getVecOfTweet使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类glove.Glove
的用法示例。
在下文中一共展示了Glove.getVecOfTweet方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from glove import Glove [as 别名]
# 或者: from glove.Glove import getVecOfTweet [as 别名]
#.........这里部分代码省略.........
features.append(topics_f)
elif feat=='topicVecs':
topics = []
for tweet in dataset:
topics.append(self.topicVecs[tweet[1]])
features.append(np.asarray(topics))
elif feat=='lexiconsbyword':
lex_f = np.zeros((len(dataset),len(self.corpus)*3))
for count, sample in enumerate(dataset):
for word in sample[0]:
lex_f[count][self.getWordIndex(word)] += self.getSubjectivity(word, sample[0])
lex_f[count][self.getWordIndex(word)+len(self.corpus)] += self.getPolarity(word, sample[0])
lex_f[count][self.getWordIndex(word)+2*len(self.corpus)] += self.getLiuSentiment(word, sample[0])
features.append(lex_f)
elif feat=='pos':
#needs actual text, not tokenized list
if mode=='train':
features.append(self.getPOSTags(self.data.trainTweetsText))
elif mode=='test':
features.append(self.getPOSTags(self.data.testTweetsText))
elif feat=='words2vec':
vecs = []
for tweet in dataset:
vecs.append(self.getWord2Vec(tweet[0]))
features.append(np.asarray(vecs))
elif feat=='glove':
vecs = []
for tweet in dataset:
vecs.append(self.glove_vec_model.getVecOfTweet(tweet[0]))
features.append(np.asarray(vecs))
elif feat=='polarity':
vecs = []
for tweet in dataset:
vecs.append(self.clusters.getPolarity(self.getWords2Vectors(tweet[0])))
features.append(np.asarray(vecs))
elif feat=='subjectivity':
vecs = []
for tweet in dataset:
vecs.append(self.clusters.getSubjectivity(self.getWords2Vectors(tweet[0])))
features.append(np.asarray(vecs))
elif feat=='sentiment':
vecs = []
for tweet in dataset:
vecs.append(self.clusters.getSentiment(self.getWords2Vectors(tweet[0])))
features.append(np.asarray(vecs))
elif feat=='clusteredLexicons':
vecs = [[],[],[]]
for tweet in dataset:
wordsAsVecs = self.getWords2Vectors(tweet[0])
vecs[0].append(self.clusters.getSentiment(wordsAsVecs))
vecs[1].append(self.clusters.getSubjectivity(wordsAsVecs))
vecs[2].append(self.clusters.getPolarity(wordsAsVecs))
# allvecs = np.concatenate(tuple(vecs))
for vecsi in vecs:
#print 'Adding ', np.asarray(vecsi).shape
features.append(np.asarray(vecsi))