本文整理汇总了Python中nltk.corpus.util.LazyCorpusLoader.tagged_sents方法的典型用法代码示例。如果您正苦于以下问题:Python LazyCorpusLoader.tagged_sents方法的具体用法?Python LazyCorpusLoader.tagged_sents怎么用?Python LazyCorpusLoader.tagged_sents使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类nltk.corpus.util.LazyCorpusLoader
的用法示例。
在下文中一共展示了LazyCorpusLoader.tagged_sents方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: demo
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
root = nltk.data.find('corpora/knbc/corpus1')
fileids = [f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*")
if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)]
def _knbc_fileids_sort(x):
cells = x.split('-')
return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
knbc = LazyCorpusLoader('knbc/corpus1', KNBCorpusReader,
sorted(fileids, key=_knbc_fileids_sort), encoding='euc-jp')
print(knbc.fileids()[:10])
print(''.join( knbc.words()[:100] ))
print('\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[:2] ))
knbc.morphs2str = lambda morphs: '/'.join(
"%s(%s)"%(m[0], m[1].split(' ')[2]) for m in morphs if m[0] != 'EOS'
).encode('utf-8')
print('\n\n'.join( '%s' % tree for tree in knbc.parsed_sents()[:2] ))
print('\n'.join( ' '.join("%s/%s"%(w[0], w[1].split(' ')[2]) for w in sent)
for sent in knbc.tagged_sents()[0:2] ))
示例2: test
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
def test():
from nltk.corpus.util import LazyCorpusLoader
knbc = LazyCorpusLoader(
'knbc/corpus1', KNBCorpusReader, r'.*/KN.*', encoding='euc-jp')
assert isinstance(knbc.words()[0], string_types)
assert isinstance(knbc.sents()[0][0], string_types)
assert isinstance(knbc.tagged_words()[0], tuple)
assert isinstance(knbc.tagged_sents()[0][0], tuple)
示例3: test
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
def test():
from nltk.corpus.util import LazyCorpusLoader
knbc = LazyCorpusLoader("knbc/corpus1", KNBCorpusReader, r".*/KN.*", encoding="euc-jp")
assert isinstance(knbc.words()[0], basestring)
assert isinstance(knbc.sents()[0][0], basestring)
assert type(knbc.tagged_words()[0]) == tuple
assert type(knbc.tagged_sents()[0][0]) == tuple
示例4: demo
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
print "/".join(jeita.words()[22100:22140])
print "\nEOS\n".join(
["\n".join("%s/%s" % (w[0], w[1].split("\t")[2]) for w in sent) for sent in jeita.tagged_sents()[2170:2173]]
)
示例5: demo
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
jeita = LazyCorpusLoader(
'jeita', ChasenCorpusReader, r'.*chasen', encoding='utf-8')
print '/'.join( jeita.words()[22100:22140] )
print '\nEOS\n'.join(['\n'.join("%s/%s" % (w[0],w[1].split('\t')[2]) for w in sent)
for sent in jeita.tagged_sents()[2170:2173]])
示例6: parse_wsj
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
def parse_wsj(processes=8):
ptb = LazyCorpusLoader( # Penn Treebank v3: WSJ portions
'ptb', CategorizedBracketParseCorpusReader, r'wsj/\d\d/wsj_\d\d\d\d.mrg',
cat_file='allcats.txt', tagset='wsj')
fileids = ptb.fileids()
params = []
for f in fileids:
corpus = zip(ptb.parsed_sents(f), ptb.tagged_sents(f))
for i, (parsed, tagged) in enumerate(corpus):
params.append((f, i, parsed, tagged))
p = Pool(processes)
p.starmap(get_best_parse, sorted(params, key=lambda x: (x[0], x[1])))
示例7: treebank_tagger_demo
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
def treebank_tagger_demo():
from nltk.corpus.util import LazyCorpusLoader
from nltk.corpus.reader import PlaintextCorpusReader
from nltk_contrib.coref.util import TreebankTaggerCorpusReader
state_union = LazyCorpusLoader(
'state_union', PlaintextCorpusReader, r'(?!\.svn).*\.txt')
state_union = TreebankTaggerCorpusReader(state_union)
print 'Treebank tagger demo...'
print 'Tagged sentences:'
for sent in state_union.tagged_sents()[500:505]:
print sent
print
print
print 'Tagged words:'
for word in state_union.tagged_words()[500:505]:
print word
print
示例8: demo
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
def demo():
import nltk
from nltk.corpus.util import LazyCorpusLoader
root = nltk.data.find("corpora/knbc/corpus1")
fileids = [
f for f in find_corpus_fileids(FileSystemPathPointer(root), ".*") if re.search(r"\d\-\d\-[\d]+\-[\d]+", f)
]
def _knbc_fileids_sort(x):
cells = x.split("-")
return (cells[0], int(cells[1]), int(cells[2]), int(cells[3]))
knbc = LazyCorpusLoader("knbc/corpus1", KNBCorpusReader, sorted(fileids, key=_knbc_fileids_sort), encoding="euc-jp")
print knbc.fileids()[:10]
print "".join(knbc.words()[:100])
print "\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])
knbc.morphs2str = lambda morphs: "/".join(
"%s(%s)" % (m[0], m[1].split(" ")[2]) for m in morphs if m[0] != "EOS"
).encode("utf-8")
print "\n\n".join("%s" % tree for tree in knbc.parsed_sents()[:2])
print "\n".join(" ".join("%s/%s" % (w[0], w[1].split(" ")[2]) for w in sent) for sent in knbc.tagged_sents()[0:2])
示例9: reload
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
import sys
import codecs
import re, pprint
from nltk.corpus.reader import *
from nltk.corpus.reader.util import *
from nltk.corpus.util import LazyCorpusLoader
# 日本語を含む文字列を標準入出力とやり取りするのに必要
sys.stdout = codecs.getwriter('utf_8')(sys.stdout)
sys.stdin = codecs.getreader('utf_8')(sys.stdin)
# sys.setdefaultencodingを使えるようにリロードする
reload(sys)
# デフォルトのエンコーディングをUTF-8にセット
sys.setdefaultencoding('utf-8')
# Unicode文字をエスケープせずに出力する
def pp(obj):
pp = pprint.PrettyPrinter(indent=4, width=160)
str = pp.pformat(obj)
return re.sub(r"\\u([0-9a-f]{4})", lambda x: unichr(int("0x"+x.group(1), 16)), str)
# import nltk; nltk.download(); みたいな感じにして jeita をダウンロードする
# ダウンロードした zip ファイルをそのまま読み込む
jeita = LazyCorpusLoader("jeita", ChasenCorpusReader, r".*chasen", encoding="utf-8")
# print '/'.join( jeita.words()[22100:22140] )
# タグ情報はタブ区切りの文字列になっており、読み、原形、品詞1 、品詞2 、活用形の情報が含まれている
print '\nEOS\n'.join(['\n'.join("%s/%s" % (w[0],w[1].split('\t')[2]) for w in sent) for sent in jeita.tagged_sents()[2170:2173]])
示例10: ChasenCorpusReader
# 需要导入模块: from nltk.corpus.util import LazyCorpusLoader [as 别名]
# 或者: from nltk.corpus.util.LazyCorpusLoader import tagged_sents [as 别名]
#!/usr/bin/env python
# encoding: utf-8
# JEITAコーパスをNLTKで読み込むサンプル
from nltk_jp import *
from nltk.corpus.reader import *
from nltk.corpus.util import LazyCorpusLoader
# コーパスを読み込み
#jeita = ChasenCorpusReader('home/ubuntu/nltk_data/corpora/jeita', r'.*chasen', encoding='utf-8')
jeita = LazyCorpusLoader('jeita', ChasenCorpusReader, r'.*chasen', encoding='utf-8')
print pp(jeita.words()[:10])
print pp(jeita.tagged_sents()[1])