本文整理匯總了Python中nltk.corpus.reader.ChunkedCorpusReader類的典型用法代碼示例。如果您正苦於以下問題:Python ChunkedCorpusReader類的具體用法?Python ChunkedCorpusReader怎麽用?Python ChunkedCorpusReader使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
在下文中一共展示了ChunkedCorpusReader類的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
def __init__(self, directory="",fileids=r"haaretz.bgu",myEncoding="utf-8"):
ChunkedCorpusReader.__init__(self, directory ,fileids , str2chunktree=self.__str2BguTree,sent_tokenizer=RegexpTokenizer('\n\n', gaps=True),encoding=myEncoding)
self._format = format
示例2: chunked_paras
def chunked_paras(self, fileids=None, categories=None):
return ChunkedCorpusReader.chunked_paras(
self, self._resolve(fileids, categories))
示例3: __init__
def __init__(self, *args, **kwargs):
CategorizedCorpusReader.__init__(self, kwargs)
ChunkedCorpusReader.__init__(self, *args, **kwargs)
示例4: tagged_paras
def tagged_paras(self, fileids=None, categories=None, simplify_tags=False):
return ChunkedCorpusReader.tagged_paras(
self, self._resolve(fileids, categories), simplify_tags)
示例5: sents
def sents(self, fileids=None, categories=None):
return ChunkedCorpusReader.sents(self, self._resolve(fileids, categories))
示例6: Tree
########## CHUNKED CORPUS READER ###############
###Implementing CCR
from nltk.corpus.reader import ChunkedCorpusReader
root="C:\\Users\\Matrix\\AppData\\Roaming\\nltk_data\\corpora\\cookbook\\"
reader=ChunkedCorpusReader(root,r'.*\.chunk')
#Each chunk-represented in braces is considered as a word
print reader.chunked_words()
#Each sentence will be included in a Tree()
print reader.chunked_sents()
print reader.chunked_paras()
#Getting tagged tokens for each chunk (each chunk is a word but each word is not a chunk)
print reader.chunked_words()[0].leaves()
print reader.chunked_sents()[1].leaves()
#Cant apply leaves directly to a para - but we can access a sentence of a given para.
print reader.chunked_para()[0][0].leaves()
###Implementing CCCR
from nltk.corpus.reader import ConllChunkCorpusReader
root="C:\\Users\\Matrix\\AppData\\Roaming\\nltk_data\\corpora\\cookbook\\"
reader=ConllChunkCorpusReader(root,r'.*\.iob',('NP','VP'.'PP'))
print reader.chunked_words()
print reader.chunked_sents()
print reader.iob_words()
print reader.iob_sents()
示例7: createChunker
def createChunker():
chunks = ChunkedCorpusReader('data/chunks/','text_search.pos')
tagger_classes = [UnigramTagger, BigramTagger]
train_chunks = chunks.chunked_sents()
chunker = TagChunker(train_chunks, tagger_classes)
return chunker
示例8: tagged_sents
def tagged_sents(self, fileids=None, categories=None):
return ChunkedCorpusReader.tagged_sents(self,self_resolve(fileids,
categories))