本文整理汇总了Python中wordcloud.WordCloud.font_path方法的典型用法代码示例。如果您正苦于以下问题:Python WordCloud.font_path方法的具体用法?Python WordCloud.font_path怎么用?Python WordCloud.font_path使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类wordcloud.WordCloud
的用法示例。
在下文中一共展示了WordCloud.font_path方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_word_cloud
# 需要导入模块: from wordcloud import WordCloud [as 别名]
# 或者: from wordcloud.WordCloud import font_path [as 别名]
def make_word_cloud(content):
# read the mask image
d = path.dirname(__file__)
# alice_mask = np.array(Image.open(path.join(d, "mask/terran.jpg")))
mask = np.array(Image.open(path.join(d, mask_img)))
# font__dir = '/var/www/FlaskApp/FlaskApp/word_cloud_min/_fonts/lth.ttf'
# font__dir = 'C:\Users\zjsep_000\PycharmProjects\myDrone\word_cloud_min\_fonts\lth.ttf'
# font__dir = '_fonts/lth.ttf'
wc = WordCloud(background_color="white", max_words=1000, mask=mask)
# give the absolute dir for font ttf file
# wc.font_path = 'C:\Users\JI\Documents\GitHub\PycharmProjects\myDrone\word_cloud\_fonts\lth.ttf'
wc.font_path = abs_font_dir
# wc.font_path = 'C:\Users\zjsep_000\PycharmProjects\myDrone\word_cloud_min\_fonts\lth.ttf'
# wc.font_path = '_fonts/lth.ttf'
# wc.font_path = '/var/www/FlaskApp/FlaskApp/word_cloud_min/_fonts/lth.ttf'
# brush options: {'shoujin_brush.ttf','Japan_brush.ttf','qingke_fangzheng.ttf','KouzanBrushFont.ttf'}
# serfi-fonts:[]
wc.generate_from_frequencies(content)
# generate word cloud
# wc.generate(text)
# store to file
wc.to_file(path.join(d, "img/output.png"))
# store to static foder in web server
# wc.to_file(path.join(d, "../static/output.png"))
# show
plt.imshow(wc)
plt.axis("off")
plt.figure()
plt.imshow(mask, cmap=plt.cm.gray)
plt.axis("off")
plt.show()
示例2: unknowncoll
# 需要导入模块: from wordcloud import WordCloud [as 别名]
# 或者: from wordcloud.WordCloud import font_path [as 别名]
def unknowncoll(filename='unknownwords.p', stem=False):
"""
Word cloud from sentiment analysis.
Finds the bi-collocation of unknown words (words without sentiment)
and displays the 10 most common words based on frequency in a word-cloud,
colored green for words seen mostly in positive sentiments and red
for the opposite. Comparison is made on all comments concatenated
-> filename: name of the file to load unknown words from
-> stem: stem the words
"""
bigram_measures = nltk.collocations.BigramAssocMeasures()
subreddits = scraper.load_data('sub-reddits.txt')
fullcomment = []
print 'building comment'
for name, data in subreddits.items():
for sub_id, sub in data.items():
fullcomment += [fixer(comment, True, stem).split() for comment in sub.comments]
print 'getting unknowns'
unknownwords = unknownsent(filename)
#flatten the comment structure
fullcomment = [word for comment in fullcomment for word in comment]
basefinder = BigramCollocationFinder.from_words(fullcomment)
count = 0
for unknown, unknownscore in unknownwords:
finder = copy.copy(basefinder)
print '\n' + unknown
#only bigrams that contain the unknown word
finder.apply_ngram_filter(lambda w1, w2: unknown != w1 and unknown != w2)
wordcloud = WordCloud()
wordcloud.font_path = 'C:\Windows\Fonts\comic.ttf'
#trick the wordcloud to accept custom input
wordcloud.generate('generate')
colls = finder.score_ngrams(bigram_measures.raw_freq)
colls = colls[:10]
maximum = colls[1][1]
#generate the tuple (word, score) for the wordcloud
cloudwords = [(word, score) for ((word, _), score) in colls if word != unknown]
cloudwords += [(word, score) for ((_, word), score) in colls if word != unknown]
#normalize the scores
cloudwords = [(word, score / maximum) for (word, score) in cloudwords]
#tricking part 2.
wordcloud.fit_words(cloudwords)
wordcloud.to_image()
if(unknownscore > 0):
wordcloud = wordcloud.recolor(color_func=green_color_func, random_state=3)
else:
wordcloud = wordcloud.recolor(color_func=red_color_func, random_state=3)
count += 1
plt.figure(count)
plt.title(unknown)
plt.imshow(wordcloud)
plt.axis("off")
plt.savefig('plots/' + unknown + '.png', bbox_inches='tight')
plt.close()