本文整理汇总了Python中nltk.corpus.reader.ChunkedCorpusReader类的典型用法代码示例。如果您正苦于以下问题:Python ChunkedCorpusReader类的具体用法?Python ChunkedCorpusReader怎么用?Python ChunkedCorpusReader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ChunkedCorpusReader类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, directory="",fileids=r"haaretz.bgu",myEncoding="utf-8"):
ChunkedCorpusReader.__init__(self, directory ,fileids , str2chunktree=self.__str2BguTree,sent_tokenizer=RegexpTokenizer('\n\n', gaps=True),encoding=myEncoding)
self._format = format
示例2: chunked_paras
def chunked_paras(self, fileids=None, categories=None):
return ChunkedCorpusReader.chunked_paras(
self, self._resolve(fileids, categories))
示例3: __init__
def __init__(self, *args, **kwargs):
CategorizedCorpusReader.__init__(self, kwargs)
ChunkedCorpusReader.__init__(self, *args, **kwargs)
示例4: tagged_paras
def tagged_paras(self, fileids=None, categories=None, simplify_tags=False):
return ChunkedCorpusReader.tagged_paras(
self, self._resolve(fileids, categories), simplify_tags)
示例5: sents
def sents(self, fileids=None, categories=None):
return ChunkedCorpusReader.sents(self, self._resolve(fileids, categories))
示例6: Tree
########## CHUNKED CORPUS READER ###############
###Implementing CCR
from nltk.corpus.reader import ChunkedCorpusReader
root="C:\\Users\\Matrix\\AppData\\Roaming\\nltk_data\\corpora\\cookbook\\"
reader=ChunkedCorpusReader(root,r'.*\.chunk')
#Each chunk-represented in braces is considered as a word
print reader.chunked_words()
#Each sentence will be included in a Tree()
print reader.chunked_sents()
print reader.chunked_paras()
#Getting tagged tokens for each chunk (each chunk is a word but each word is not a chunk)
print reader.chunked_words()[0].leaves()
print reader.chunked_sents()[1].leaves()
#Cant apply leaves directly to a para - but we can access a sentence of a given para.
print reader.chunked_para()[0][0].leaves()
###Implementing CCCR
from nltk.corpus.reader import ConllChunkCorpusReader
root="C:\\Users\\Matrix\\AppData\\Roaming\\nltk_data\\corpora\\cookbook\\"
reader=ConllChunkCorpusReader(root,r'.*\.iob',('NP','VP'.'PP'))
print reader.chunked_words()
print reader.chunked_sents()
print reader.iob_words()
print reader.iob_sents()
示例7: createChunker
def createChunker():
chunks = ChunkedCorpusReader('data/chunks/','text_search.pos')
tagger_classes = [UnigramTagger, BigramTagger]
train_chunks = chunks.chunked_sents()
chunker = TagChunker(train_chunks, tagger_classes)
return chunker
示例8: tagged_sents
def tagged_sents(self, fileids=None, categories=None):
return ChunkedCorpusReader.tagged_sents(self,self_resolve(fileids,
categories))