当前位置: 首页>>代码示例>>Python>>正文


Python util.LazyCorpusLoader类代码示例

本文整理汇总了Python中nltk.corpus.util.LazyCorpusLoader的典型用法代码示例。如果您正苦于以下问题:Python LazyCorpusLoader类的具体用法?Python LazyCorpusLoader怎么用?Python LazyCorpusLoader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了LazyCorpusLoader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

def test():

    from nltk.corpus.util import LazyCorpusLoader

    jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")

    assert isinstance(jeita.tagged_words()[0][1], basestring)
开发者ID:Kuew,项目名称:hashtagify,代码行数:7,代码来源:chasen.py

示例2: test

def test():

    from nltk.corpus.util import LazyCorpusLoader

    jeita = LazyCorpusLoader(
        'jeita', ChasenCorpusReader, r'.*chasen', encoding='utf-8')

    assert isinstance(jeita.tagged_words()[0][1], basestring)
开发者ID:Akira55,项目名称:nltk,代码行数:8,代码来源:chasen.py

示例3: test

def test():

    from nltk.corpus.util import LazyCorpusLoader

    knbc = LazyCorpusLoader("knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp")
    assert isinstance(knbc.words()[0], basestring)
    assert isinstance(knbc.sents()[0][0], basestring)
    assert type(knbc.tagged_words()[0]) == tuple
    assert type(knbc.tagged_sents()[0][0]) == tuple
开发者ID:ongxuanhong,项目名称:jazzparser-master-thesis,代码行数:9,代码来源:knbc.py

示例4: test

def test():

    from nltk.corpus.util import LazyCorpusLoader
    knbc = LazyCorpusLoader(
        'knbc/corpus1', KNBCorpusReader, r'.*/KN.*', encoding='euc-jp')
    assert isinstance(knbc.words()[0], string_types)
    assert isinstance(knbc.sents()[0][0], string_types)
    assert isinstance(knbc.tagged_words()[0], tuple)
    assert isinstance(knbc.tagged_sents()[0][0], tuple)
开发者ID:DrDub,项目名称:nltk,代码行数:9,代码来源:knbc.py

示例5: demo

def demo():

    import nltk
    from nltk.corpus.util import LazyCorpusLoader

    jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
    print "/".join(jeita.words()[22100:22140])

    print "\nEOS\n".join(
        ["\n".join("%s/%s" % (w[0], w[1].split("\t")[2]) for w in sent) for sent in jeita.tagged_sents()[2170:2173]]
    )
开发者ID:Kuew,项目名称:hashtagify,代码行数:11,代码来源:chasen.py

示例6: demo

def demo():
    
    import nltk
    from nltk.corpus.util import LazyCorpusLoader

    jeita = LazyCorpusLoader(
        'jeita', ChasenCorpusReader, r'.*chasen', encoding='utf-8')
    print '/'.join( jeita.words()[22100:22140] ) 


    print '\nEOS\n'.join(['\n'.join("%s/%s" % (w[0],w[1].split('\t')[2]) for w in sent)
                          for sent in jeita.tagged_sents()[2170:2173]])
开发者ID:Akira55,项目名称:nltk,代码行数:12,代码来源:chasen.py

示例7: demo

def demo():

    import nltk
    from nltk.corpus.util import LazyCorpusLoader

    root = nltk.data.find('corpora/knbc/corpus1')
    fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
               if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)]

    def _knbc_fileids_sort(x):
        cells = x.split('-')
        return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))

    knbc = LazyCorpusLoader('knbc/corpus1', KNBCorpusReader,
                            sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp')

    print knbc.fileids()[:10]
    print ''.join( knbc.words()[:100] )

    print '\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[:2] )

    knbc.morphs2str = lambda morphs: '/'.join(
        "%s(%s)"%(m[0], m[1].split(' ')[2]) for m in morphs if m[0] != 'EOS'
        ).encode('utf-8')

    print '\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[:2] )

    print '\n'.join( ' '.join("%s/%s"%(w[0], w[1].split(' ')[2]) for w in sent)
                     for sent in knbc.tagged_sents()[0:2] )
开发者ID:B-Rich,项目名称:Fem-Coding-Challenge,代码行数:29,代码来源:knbc.py

示例8: parse_wsj

def parse_wsj(processes=8):
    ptb = LazyCorpusLoader( # Penn Treebank v3: WSJ portions
        'ptb', CategorizedBracketParseCorpusReader, r'wsj/\d\d/wsj_\d\d\d\d.mrg',
        cat_file='allcats.txt', tagset='wsj')

    fileids = ptb.fileids()
    params = []
    for f in fileids:
        corpus = zip(ptb.parsed_sents(f), ptb.tagged_sents(f))
        for i, (parsed, tagged) in enumerate(corpus):
            params.append((f, i, parsed, tagged))

    p = Pool(processes)
    p.starmap(get_best_parse, sorted(params, key=lambda x: (x[0], x[1])))
开发者ID:jonpiffle,项目名称:ltag_parser,代码行数:14,代码来源:run_parser.py

示例9: main

def main():
    # matplotlib.use('Qt5Agg')
    # import matplotlib.pyplot as plt

    download('punkt')
    # Download and load the english europarl corpus
    downloader.download('europarl_raw')
    english = LazyCorpusLoader('europarl_raw/english', EuroparlCorpusReader, r'ep-.*\.en', encoding='utf-8')

    words = english.words()

    # Calculate the frequency distribution of the words in the corpus
    word_frequency_distribution = FreqDist([word.lower() for word in words])

    # Get the sentences of the corpus, all in lower case, with infrequent words replaced by the token "<unknown>"
    sentences = [
        ['start0'] + [word.lower() if word_frequency_distribution[word.lower()] >= 10 else '<unknown>' for word in
                      sentence] + ['end0']
        for sentence in english.sents()]

    # create train and test dataset
    train = sentences[0:int(len(sentences) * 0.8)]
    test = sentences[int(len(sentences) * 0.8):]

    vocabulary = list(word_frequency_distribution)
    vocabulary_length = word_frequency_distribution.B()

    # Calculate bigrams
    bigrams_train = list(chain.from_iterable(ngrams_sentences(train, 2)))

    # Calculate the conditional frequency distribution for bigrams
    bigrams_fd = ConditionalFreqDist(((f,), s) for f, s in bigrams_train)

    # Calculate the conditional probability distribution for bigrams
    cpd_bigram = ConditionalProbDist(bigrams_fd, LaplaceProbDist, vocabulary_length)

    lower_case_letters = string.ascii_lowercase
    error_test = copy.deepcopy(test)
    for sentence in error_test:
        word = random.randrange(1, len(sentence)-1)
        sentence[word] = random.choice(vocabulary)
        word = random.choice(sentence[1:-2])
        word = random.randrange(1, len(sentence) - 1)
        letter = random.randrange(0, len(sentence[word]))
        sentence[word] = sentence[word][0:letter] + random.choice(lower_case_letters) + sentence[word][letter+1:]

    corrected = viterbi(error_test[25][:-1], vocabulary, cpd_bigram)

    print('Corrected:{}'.format(corrected))
    print('Original:{}'.format(test[25]))
开发者ID:BabisK,项目名称:M36209P,代码行数:50,代码来源:ex3.py

示例10: read_knbc

def read_knbc(train_file, test_file, reference_file):

	root = nltk.data.find('corpora/knbc/corpus1')
	fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
              if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)]

	knbc = LazyCorpusLoader('knbc/corpus1', KNBCorpusReader,
           sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp')

	sentences = knbc.sents()

	write_train(sentences[0:4000], train_file)
	write_test(sentences[4000:-1], test_file)
	write_reference(sentences[4000:-1], reference_file)
开发者ID:LeopoldC,项目名称:cross-language_IR,代码行数:14,代码来源:knbc_to_xml.py

示例11: demo

def demo():

    import nltk
    from nltk.corpus.util import LazyCorpusLoader

    root = nltk.data.find("corpora/knbc/corpus1")
    fileids = [
        f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)
    ]

    def _knbc_fileids_sort(x):
        cells = x.split("-")
        return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))

    knbc = LazyCorpusLoader("knbc/corpus1", KNBCorpusReader, sorted(fileids, key=_knbc_fileids_sort), encoding="euc-jp")

    print knbc.fileids()[:10]
    print "".join(knbc.words()[:100])

    print "\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])

    knbc.morphs2str = lambda morphs: "/".join(
        "%s(%s)" % (m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS"
    ).encode("utf-8")

    print "\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])

    print "\n".join(" ".join("%s/%s" % (w[0], w[1].split(" ")[2]) for w in sent) for sent in knbc.tagged_sents()[0:2])
开发者ID:ongxuanhong,项目名称:jazzparser-master-thesis,代码行数:28,代码来源:knbc.py

示例12: treebank_tagger_demo

def treebank_tagger_demo():
    from nltk.corpus.util import LazyCorpusLoader    
    from nltk.corpus.reader import PlaintextCorpusReader
    from nltk_contrib.coref.util import TreebankTaggerCorpusReader
    
    state_union = LazyCorpusLoader(
        'state_union', PlaintextCorpusReader, r'(?!\.svn).*\.txt')
    state_union = TreebankTaggerCorpusReader(state_union)
    
    print 'Treebank tagger demo...'
    print 'Tagged sentences:'
    for sent in state_union.tagged_sents()[500:505]:
        print sent
        print
    print
    print 'Tagged words:'
    for word in state_union.tagged_words()[500:505]:
        print word
    print
开发者ID:Sandy4321,项目名称:nltk_contrib,代码行数:19,代码来源:util.py

示例13: __init__

 def __init__(self, languages=['nl', 'en', 'fr', 'de', 'es', 'th', 'pt', 'pl', "id", "ru", "it", "ru", "tr"]):
     logger.info("Build " + self.__class__.__name__ + " ... ")
     self.language_trigrams = {}
     self.langid = LazyCorpusLoader('langid', LangIdCorpusReader, r'(?!\.).*\.txt')
     self.__mutex = threading.Semaphore()
     for lang in languages:
         self.language_trigrams[lang] = FreqDist()
         for f in self.langid.freqs(fileids=lang+"-3grams.txt"):
             self.language_trigrams[lang].inc(f[0], f[1])
     logger.info("Build " + self.__class__.__name__ + ": done!")
开发者ID:soldierkam,项目名称:pynews,代码行数:10,代码来源:lang.py

示例14: from_nltk

    def from_nltk(cls):
        """Returns a fully populated Propbank with the help of NLTK's interface"""
        ptb = LazyCorpusLoader(
            'ptb',
            CategorizedBracketParseCorpusReader,
            r'wsj/\d\d/wsj_\d\d\d\d.mrg',
            cat_file='allcats.txt'
        )

        propbank_ptb = LazyCorpusLoader(
            'propbank', PropbankCorpusReader,
            'prop.txt', 'frames/.*\.xml', 'verbs.txt',
            lambda filename: filename.upper(),
            ptb
        ) # Must be defined *after* ptb corpus.

        role_dict = {}
        for roleset_xml in propbank_ptb.rolesets():
            role = Role.fromxml(roleset_xml)
            role_dict[role.roleset_id] = role

        instance_dict = defaultdict(dict)
        pb_instances = propbank_ptb.instances()
        for instance in pb_instances:
            instance.fileid = instance.fileid.lower()
            file_num = instance.fileid.split("/")[-1].split(".")[0].replace("wsj_", "")
            sentnum = str(instance.sentnum)
            predicate = instance.predicate
            tree = instance.tree

            if isinstance(predicate, nltk.corpus.reader.propbank.PropbankTreePointer):
                key = Propbank.pointer_to_word(predicate, tree)
            elif isinstance(predicate, nltk.corpus.reader.propbank.PropbankSplitTreePointer):
                key = tuple([Propbank.pointer_to_word(p, tree) for p in predicate.pieces])
            else:
                ### TODO: Investigate when this is the case ###
                #assert False
                continue

            pb_instance = PropbankInstance(instance.fileid, file_num, sentnum, key, instance.roleset, instance.arguments)
            instance_dict[(file_num, sentnum)][key] = pb_instance

        return Propbank(role_dict, instance_dict)
开发者ID:jonpiffle,项目名称:xtag_verbnet,代码行数:43,代码来源:propbank.py

示例15: treebank_chunk_tagger_demo

def treebank_chunk_tagger_demo():
    from nltk.corpus.util import LazyCorpusLoader    
    from nltk.corpus.reader import PlaintextCorpusReader
    from nltk_contrib.coref.util import TreebankChunkTaggerCorpusReader
    
    state_union = LazyCorpusLoader(
        'state_union', PlaintextCorpusReader, r'(?!\.svn).*\.txt')
    state_union = TreebankChunkTaggerCorpusReader(state_union)

    print 'Treebank chunker demo...'
    print 'Chunked sentences:'
    for sent in state_union.chunked_sents()[500:505]:
        print sent
        print
    print
    print 'Parsed sentences:'
    for tree in state_union.parsed_sents()[500:505]:
        print tree
        print
    print
开发者ID:Sandy4321,项目名称:nltk_contrib,代码行数:20,代码来源:util.py


注:本文中的nltk.corpus.util.LazyCorpusLoader类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。