当前位置: 首页>>代码示例>>Python>>正文


Python gutenberg.sents函数代码示例

本文整理汇总了Python中nltk.corpus.gutenberg.sents函数的典型用法代码示例。如果您正苦于以下问题:Python sents函数的具体用法?Python sents怎么用?Python sents使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了sents函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: exercise_gutenberg

def exercise_gutenberg():
    # 打印古腾堡项目的文件列表
    print gutenberg.fileids()

    # 挑选一个文本: 简-奥斯丁的《爱玛》
    emma = gutenberg.words("austen-emma.txt")

    # 查看书的长度
    print len(emma)

    # 导入文本
    emma_text = nltk.Text(emma)
    emma_text.concordance("surprize")

    for file_id in gutenberg.fileids():
        chars_list = gutenberg.raw(file_id)
        words_list = gutenberg.words(file_id)
        sents_list = gutenberg.sents(file_id)

        # 统计文件的总字符数
        num_chars = len(chars_list)
        # 统计文件的总单词数
        num_words = len(words_list)
        # 统计文件的总句子数
        num_sents = len(sents_list)
        # 统计文件的非重复单词数
        num_vocab = len(set([w.lower() for w in words_list]))
        # 打印词的平均字符数, 句子的平均单词数, 每个单词出现的平均次数, 文件名
        print num_chars / num_words, num_words / num_sents, num_words / num_vocab, file_id
开发者ID:BurnellLiu,项目名称:LiuProject,代码行数:29,代码来源:chapter_02.py

示例2: main

def main(num_couplets, num_syllables, rhyme_depth):
  for text in TEXTS:
    for sentence in gutenberg.sents(text):
      addSentence(sentence, rhyme_depth)

  for couplet_number in range(0, num_couplets):
    # Get a randomly selected couplet
    attempts = 0
    while True:
      couplet = getCouplet(num_syllables)
      if couplet is not None: break
      # Prevent an infinite loop if parameters are off
      attempts += 1
      if attempts == 1000: return
    couplet = [ pretty(line) for line in couplet ]

    # A little hack for adjusting punctuation and capitalization
    couplet[0] = couplet[0][0].upper() + couplet[0][1:]
    if couplet[0][-1] == '.' or couplet[0][-1] == ',':
      couplet[0] = couplet[0][:-1] + ','
      char = couplet[1][0].lower() if couplet[1][:2] != 'I ' else 'I'
      couplet[1] = char + couplet[1][1:]
    else:
      couplet[1] = couplet[1][0].upper() + couplet[1][1:]

    # Dump to stdout
    print couplet[0]
    print couplet[1]
开发者ID:amshenoy,项目名称:permutation-poetry,代码行数:28,代码来源:poet.py

示例3: get_book_sents

def get_book_sents(word_list):
    """Searches Jane Austen's 'Emma' for the words in the word list.
    The sentences are modified to highlight the found words by changing them to uppercase.
    Then the sentence number (in order from the book) is appended to the front
    of the sentence string.
    Returns a list of strings (sentence # + \s + sentence string).
    """
    book = 'austen-emma.txt'
    book_sents = gutenberg.sents(book)
    sent_nums = set()
    sents_to_return = []
    s_count = 0
    for s in book_sents:
        s_count += 1
        s_str = " ".join(s)
        for w in word_list:
            if ' '+w+' ' in s_str.lower():
                if s_count not in sent_nums:
                    sent_nums.add(s_count)
                    s_str = s_str.replace(' '+w+' ', ' '+w.upper()+' ')
                    s_str = s_str.replace(' '+w.title()+' ', ' '+w.upper()+' ')
                    sents_to_return.append(str(s_count)+' '+s_str)
                else:
                    s_str = s_str.replace(' '+w+' ', ' '+w.upper()+' ')
                    s_str = s_str.replace(' '+w.title()+' ', ' '+w.upper()+' ')
                    sents_to_return[-1] = str(s_count)+' '+s_str
    return sents_to_return
开发者ID:imladenoff,项目名称:lexploration,代码行数:27,代码来源:search.py

示例4: tagged_sentences

def tagged_sentences( book ):
	'''
	Generator yielding one sentence at a time, filtering out the -NONE- tagged
	sentences, which are anomalies in the words.
	'''

	for sentence in gutenberg.sents( book ):
		yield filter( lambda x: x[1] not in [':', '-NONE-', ], nltk.pos_tag( sentence ) )
开发者ID:snakecharmer1024,项目名称:poetry,代码行数:8,代码来源:luau.py

示例5: gutenberg

def gutenberg():
    from nltk.corpus import gutenberg
    for t in gutenberg.fileids():
        num_chars = len(gutenberg.raw(t))
        num_words = len(gutenberg.words(t))
        num_sents = len(gutenberg.sents(t))
        num_vocab = len(set([w.lower() for w in gutenberg.words(t)]))
        print int(num_chars/num_words), int(num_words/num_sents), int(num_words/num_vocab), t
开发者ID:kwdhd,项目名称:nlp,代码行数:8,代码来源:main.py

示例6: plot_sentiment_flow

def plot_sentiment_flow(title):
    sents = gutenberg.sents(title)
    positive_flow = [partial_sentiment(x) for x in sents]
    negative_flow = [partial_sentiment(x, positive = False) for x in sents]
    plt.plot(range(len(sents)), positive_flow, label = 'Positive')
    plt.plot(range(len(sents)), negative_flow, label = 'Negative')
    plt.ylabel('Sentiment Score')
    plt.xlabel(title)
    plt.show()
开发者ID:chandlerzuo,项目名称:chandlerzuo.github.io,代码行数:9,代码来源:SentimentAnalysis.py

示例7: gutenberg

def gutenberg():

    emma = nltk.corpus.gutenberg.words('austen-emma.txt')
    print len(emma)

    print gutenberg.fileids()
    emma = gutenberg.words('austen-emma.txt')

    macbeth_sentences = gutenberg.sents('shakespeare-macbeth.txt')
    macbeth_sentences[1037]
    longest_len = max([len(s) for s in macbeth_sentences])
    [s for s in macbeth_sentences if len(s) == longest_len]

    for fileid in gutenberg.fileids():
        num_chars = len(gutenberg.raw(fileid))
        num_words = len(gutenberg.words(fileid))
        num_sents = len(gutenberg.sents(fileid))
        num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)]))
        print int(num_chars/num_words), int(num_words/num_sents), int(num_words/num_vocab), fileid
开发者ID:AkiraKane,项目名称:Python,代码行数:19,代码来源:c02_text_corpora.py

示例8: structure

def structure():

    raw = gutenberg.raw("burgess-busterbrown.txt")
    raw[1:20]

    words = gutenberg.words("burgess-busterbrown.txt")
    words[1:20]

    sents = gutenberg.sents("burgess-busterbrown.txt")
    sents[1:20]
开发者ID:AkiraKane,项目名称:Python,代码行数:10,代码来源:c02_text_corpora.py

示例9: page59

def page59():
    """Prints the longest sentence from Macbeth"""
    from nltk.corpus import gutenberg

    macbeth_sentences = gutenberg.sents("shakespeare-macbeth.txt")
    print "macbeth_sentences=", macbeth_sentences
    print "macbeth_sentences[1037]=", macbeth_sentences[1037]
    longest_len = max([len(s) for s in macbeth_sentences])
    print "longest sentence=",
    print [s for s in macbeth_sentences if len(s) == longest_len]
开发者ID:andreoliwa,项目名称:nlp-book,代码行数:10,代码来源:book_examples.py

示例10: create_model_from_NLTK

def create_model_from_NLTK():
    filepath = "nltkcorpus.txt"
    if isfile(filepath):
        return create_model(filepath= filepath, save=False)
    else:
        from nltk.corpus import reuters, brown, gutenberg
        sents = reuters.sents() + brown.sents()
        for gsents in [gutenberg.sents(fid) for fid in gutenberg.fileids()]:
            sents += gsents

        return create_model(sentences=sents, savename=filepath)
开发者ID:ieaalto,项目名称:CCProject,代码行数:11,代码来源:semantics.py

示例11: fun02

def fun02():
    """fun02"""
    for fileid in gutenberg.fileids():
        num_chars = len(gutenberg.raw(fileid))
        num_words = len(gutenberg.words(fileid))
        num_sents = len(gutenberg.sents(fileid))
        num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)]))
        # average word length average sentence length
        print int(num_chars/num_words), int(num_words/num_sents),
        # number of times each vocabulary item appers in the text
        print int(num_words/num_vocab), fileid
开发者ID:gree2,项目名称:hobby,代码行数:11,代码来源:ch02.py

示例12: page57

def page57():
    """Statistics from the Gutenberg corpora"""
    from nltk.corpus import gutenberg

    for fileid in gutenberg.fileids():
        num_chars = len(gutenberg.raw(fileid))
        num_words = len(gutenberg.words(fileid))
        num_sents = len(gutenberg.sents(fileid))
        num_vocab = len(set([w.lower() for w in gutenberg.words(fileid)]))
        print int(num_chars / num_words), int(num_words / num_sents),
        print int(num_words / num_vocab), fileid
开发者ID:andreoliwa,项目名称:nlp-book,代码行数:11,代码来源:book_examples.py

示例13: for_print

def for_print():
    '''
    显示每个文本的三个统计量
    :return:
    '''
    for fileid in gutenberg.fileids():
        num_chars=len(gutenberg.raw(fileid))
        num_words=len(gutenberg.words(fileid))
        num_sents=len(gutenberg.sents(fileid))
        num_vocab=len(set([w.lower() for w in gutenberg.words(fileid)]))
        print int(num_chars/num_words),int(num_words/num_sents),int(num_words/num_vocab),fileid
开发者ID:Paul-Lin,项目名称:misc,代码行数:11,代码来源:toturial.py

示例14: train

 def train(self):
     self.vocabulary=set()
     
     this_bigrams=[]
     self.unigrams = FreqDist([])
     
     for fileid in gutenberg.fileids():
         for sentence in gutenberg.sents(fileid):
             words=["<s>",] + [x.lower() for x in sentence if wordRE.search(x)] + ["</s>",]
             this_bigrams += bigrams(words)
             self.vocabulary.update(words)
             self.unigrams.update(words)
     self.bigrams=ConditionalFreqDist(this_bigrams)
     self.V = len(self.vocabulary)
开发者ID:slee17,项目名称:NLP,代码行数:14,代码来源:LanguageModel.py

示例15: benchmark_sbd

    def benchmark_sbd():
        ps = []
        rs = []
        f1s = []
        c = 0
        for fileid in gutenberg.fileids():
            c += 1
            copy_sents_gold = gutenberg.sents(fileid)
            sents_gold = [s for s in copy_sents_gold]
            for sent_i in range(len(sents_gold)):
                new_sent = [w for w in sents_gold[sent_i] if w.isalpha()]
                sents_gold[sent_i] = new_sent
            text = gutenberg.raw(fileid)
            sents_obtained = split_text(text)
            copy_sents_obtained = sents_obtained.copy()
            for sent_i in range(len(sents_obtained)):
                new_sent = [w.group()
                            for w in re.finditer(r'\w+', sents_obtained[sent_i])
                            if w.group().isalpha()]
                sents_obtained[sent_i] = new_sent
            c_common = 0
            for sent in sents_obtained:
                if sent in  sents_gold:
                    c_common += 1
            p, r, f1 = get_prf(c_common, len(sents_obtained), len(sents_gold))
            print('\n\n', fileid)
            print('Precision: {:0.2f}, Recall: {:0.2f}, F1: {:0.2f}'.format(p, r, f1))
            ps.append(p)
            rs.append(r)
            f1s.append(f1)

        print('\n\nPrecision stats: {:0.3f} +- {:0.4f}'.format(np.mean(ps),
                                                           np.std(ps)))
        print('Recall stats: {:0.3f} +- {:0.4f}'.format(np.mean(rs),
                                                        np.std(rs)))
        print('F1 stats: {:0.3f} +- {:0.4f}'.format(np.mean(f1s),
                                                    np.std(f1s)))
        print(len(f1s))

        good_ps = [p for p in ps if p >= 0.8]
        good_rs = [r for r in rs if r >= 0.8]
        good_f1s = [f1 for f1 in f1s if f1 >= 0.8]
        print('\n Good precision stats: {:0.3f} +- {:0.4f}'.format(np.mean(good_ps),
                                                           np.std(good_ps)))
        print('Good Recall stats: {:0.3f} +- {:0.4f}'.format(np.mean(good_rs),
                                                        np.std(good_rs)))
        print('Good F1 stats: {:0.3f} +- {:0.4f}'.format(np.mean(good_f1s),
                                                    np.std(good_f1s)))
        print(len(good_f1s))
开发者ID:artreven,项目名称:assessment_tools,代码行数:49,代码来源:readability.py


注:本文中的nltk.corpus.gutenberg.sents函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。