本文整理汇总了Python中nltk.corpus.util.LazyCorpusLoader.parsed_sents方法的典型用法代码示例。如果您正苦于以下问题:Python LazyCorpusLoader.parsed_sents方法的具体用法?Python LazyCorpusLoader.parsed_sents怎么用?Python LazyCorpusLoader.parsed_sents使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nltk.corpus.util.LazyCorpusLoader
的用法示例。
在下文中一共展示了LazyCorpusLoader.parsed_sents方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: demo
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import parsed_sents [as 别名]
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
root = nltk.data.find('corpora/knbc/corpus1')
fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)]
def _knbc_fileids_sort(x):
cells = x.split('-')
return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
knbc = LazyCorpusLoader('knbc/corpus1', KNBCorpusReader,
sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp')
print(knbc.fileids()[:10])
print(''.join( knbc.words()[:100] ))
print('\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[:2] ))
knbc.morphs2str = lambda morphs: '/'.join(
"%s(%s)"%(m[0], m[1].split(' ')[2]) for m in morphs if m[0] != 'EOS'
).encode('utf-8')
print('\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[:2] ))
print('\n'.join( ' '.join("%s/%s"%(w[0], w[1].split(' ')[2]) for w in sent)
for sent in knbc.tagged_sents()[0:2] ))
示例2: demo
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import parsed_sents [as 别名]
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
root = nltk.data.find("corpora/knbc/corpus1")
fileids = [
f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)
]
def _knbc_fileids_sort(x):
cells = x.split("-")
return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
knbc = LazyCorpusLoader("knbc/corpus1", KNBCorpusReader, sorted(fileids, key=_knbc_fileids_sort), encoding="euc-jp")
print knbc.fileids()[:10]
print "".join(knbc.words()[:100])
print "\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])
knbc.morphs2str = lambda morphs: "/".join(
"%s(%s)" % (m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS"
).encode("utf-8")
print "\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])
print "\n".join(" ".join("%s/%s" % (w[0], w[1].split(" ")[2]) for w in sent) for sent in knbc.tagged_sents()[0:2])
示例3: parse_wsj
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import parsed_sents [as 别名]
def parse_wsj(processes=8):
ptb = LazyCorpusLoader( # Penn Treebank v3: WSJ portions
'ptb', CategorizedBracketParseCorpusReader, r'wsj/\d\d/wsj_\d\d\d\d.mrg',
cat_file='allcats.txt', tagset='wsj')
fileids = ptb.fileids()
params = []
for f in fileids:
corpus = zip(ptb.parsed_sents(f), ptb.tagged_sents(f))
for i, (parsed, tagged) in enumerate(corpus):
params.append((f, i, parsed, tagged))
p = Pool(processes)
p.starmap(get_best_parse, sorted(params, key=lambda x: (x[0], x[1])))
示例4: treebank_chunk_tagger_demo
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import parsed_sents [as 别名]
def treebank_chunk_tagger_demo():
from nltk.corpus.util import LazyCorpusLoader
from nltk.corpus.reader import PlaintextCorpusReader
from nltk_contrib.coref.util import TreebankChunkTaggerCorpusReader
state_union = LazyCorpusLoader(
'state_union', PlaintextCorpusReader, r'(?!\.svn).*\.txt')
state_union = TreebankChunkTaggerCorpusReader(state_union)
print 'Treebank chunker demo...'
print 'Chunked sentences:'
for sent in state_union.chunked_sents()[500:505]:
print sent
print
print
print 'Parsed sentences:'
for tree in state_union.parsed_sents()[500:505]:
print tree
print
print
示例5: find_corpus_fileids
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import parsed_sents [as 别名]
#! /usr/bin/python
# -*- coding: utf-8 -*-
import nltk
import util
from knbc import *
from nltk.corpus.util import LazyCorpusLoader
root = nltk.data.find('corpora/KNBC_v1.0_090925/corpus1')
fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)]
def _knbc_fileids_sort(x):
cells = x.split('-')
return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
knbc = LazyCorpusLoader('KNBC_v1.0_090925/corpus1', KNBCorpusReader, sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp')
# print knbc.fileids()
# print '\n'.join( ''.join(sent) for sent in knbc.words() )
print '\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[0:2] )
print type(knbc.parsed_sents()[0])
# print '\n'.join( ' '.join("%s/%s"%(w[0], w[1][2]) for w in sent) for sent in knbc.tagged_words()[0:20] )
示例6: _knbc_fileids_sort
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import parsed_sents [as 别名]
#!/usr/bin/env python
# encoding: utf-8
# KNBCコーパスをNLTKで読み込むサンプル
from nltk_jp import *
from nltk.corpus.reader import *
from nltk.corpus.util import LazyCorpusLoader
def _knbc_fileids_sort(x):
cells = x.split('-')
return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
# コーパスを読み込み
root = nltk.data.find('corpora/knbc/corpus1')
fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)]
knbc = LazyCorpusLoader('knbc/corpus1', KNBCorpusReader, sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp')
#print "fileids :", knbc.fileids()
print "words :", pp(knbc.words()[:10])
print "parsed_sents :", str(knbc.parsed_sents()[0])
print "tagged_words :", pp(knbc.tagged_words()[:5])