本文整理汇总了Python中nltk.corpus.gutenberg.words方法的典型用法代码示例。如果您正苦于以下问题:Python gutenberg.words方法的具体用法?Python gutenberg.words怎么用?Python gutenberg.words使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nltk.corpus.gutenberg
的用法示例。
在下文中一共展示了gutenberg.words方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: app
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def app():
t1 = Text(gutenberg.words('melville-moby_dick.txt'))
plot_word_freq_dist(t1)
示例2: dispersion_plot
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError('The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/')
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = list(map(str.lower, words))
text_to_comp = list(map(str.lower, text))
else:
words_to_comp = words
text_to_comp = text
points = [(x,y) for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]]
if points:
x, y = list(zip(*points))
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=.1)
pylab.yticks(list(range(len(words))), words, color="b")
pylab.ylim(-1, len(words))
pylab.title(title)
pylab.xlabel("Word Offset")
pylab.show()
示例3: get_nltk_freq_words
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def get_nltk_freq_words():
"""Use Brown corpus frequent words
More corpora: https://www.nltk.org/book/ch02.html
"""
freq_dict = nltk.FreqDist(brown.words())
for fileid in gutenberg.fileids():
freq_dict.update(nltk.FreqDist(gutenberg.words(fileid)))
freq_words = [k for k, v in freq_dict.items() if v > 10]
return freq_words, freq_dict
示例4: app
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def app():
t1 = nltk.Text(gutenberg.words('melville-moby_dick.txt'))
plot_word_freq_dist(t1)
示例5: dispersion_plot
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def dispersion_plot(text, words, ignore_case=False):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
import pylab
except ImportError:
raise ValueError('The plot function requires the matplotlib package (aka pylab).'
'See http://matplotlib.sourceforge.net/')
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = map(str.lower, words)
text_to_comp = map(str.lower, text)
else:
words_to_comp = words
text_to_comp = text
points = [(x,y) for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]]
if points:
x, y = zip(*points)
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=.1)
pylab.yticks(range(len(words)), words, color="b")
pylab.ylim(-1, len(words))
pylab.title("Lexical Dispersion Plot")
pylab.xlabel("Word Offset")
pylab.show()
示例6: setup_class
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def setup_class(cls):
cls.corpus = gutenberg.words('melville-moby_dick.txt')
示例7: unusual_words
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def unusual_words(text):
text_vocab = set(w.lower() for w in text if w.isalpha())
english_vocab = set(w.lower() for w in nltk.corpus.words.words())
unusual = text_vocab.difference(english_vocab)
return sorted(unusual)
示例8: content_fraction
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def content_fraction(text):
stopwords = nltk.corpus.stopwords.words('english')
content = [w for w in text if w.lower() not in stopwords]
return len(content) / len(text)
示例9: dispersion_plot
# 需要导入模块: from nltk.corpus import gutenberg [as 别名]
# 或者: from nltk.corpus.gutenberg import words [as 别名]
def dispersion_plot(text, words, ignore_case=False, title="Lexical Dispersion Plot"):
"""
Generate a lexical dispersion plot.
:param text: The source text
:type text: list(str) or enum(str)
:param words: The target words
:type words: list of str
:param ignore_case: flag to set if case should be ignored when searching text
:type ignore_case: bool
"""
try:
from matplotlib import pylab
except ImportError:
raise ValueError(
'The plot function requires matplotlib to be installed.'
'See http://matplotlib.org/'
)
text = list(text)
words.reverse()
if ignore_case:
words_to_comp = list(map(str.lower, words))
text_to_comp = list(map(str.lower, text))
else:
words_to_comp = words
text_to_comp = text
points = [
(x, y)
for x in range(len(text_to_comp))
for y in range(len(words_to_comp))
if text_to_comp[x] == words_to_comp[y]
]
if points:
x, y = list(zip(*points))
else:
x = y = ()
pylab.plot(x, y, "b|", scalex=0.1)
pylab.yticks(list(range(len(words))), words, color="b")
pylab.ylim(-1, len(words))
pylab.title(title)
pylab.xlabel("Word Offset")
pylab.show()