本文整理汇总了Python中persistent.mapping.PersistentMapping.clear方法的典型用法代码示例。如果您正苦于以下问题:Python PersistentMapping.clear方法的具体用法?Python PersistentMapping.clear怎么用?Python PersistentMapping.clear使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类persistent.mapping.PersistentMapping
的用法示例。
在下文中一共展示了PersistentMapping.clear方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: clear
# 需要导入模块: from persistent.mapping import PersistentMapping [as 别名]
# 或者: from persistent.mapping.PersistentMapping import clear [as 别名]
def clear(self):
BaseDict.clear(self)
self._keys = []
示例2: NounPhraseStorage
# 需要导入模块: from persistent.mapping import PersistentMapping [as 别名]
# 或者: from persistent.mapping.PersistentMapping import clear [as 别名]
class NounPhraseStorage(Persistent):
"""A storage utility to keep noun-phrases in the ZODB.
"""
implements(INounPhraseStorage)
def __init__(self):
"""
"""
self.rankedNouns = PersistentMapping()
self.rankedNPs = PersistentMapping()
self.extractor = getUtility(ITermExtractor)
self.friendlyTypes = PersistentList()
def _scoresToRanks(self,rankdict):
scored_items = sorted(rankdict.items(),key=itemgetter(1),reverse=True)
ranked_items = [
ranked_item
for ranked_item in
ranks_from_scores(scored_items)]
return ranked_items
def addDocument(self,doc_id,text):
"""
"""
(noun_scores,noun_phrase_scores) = self.extractor.extract(text)
if noun_scores:
ranked_nouns = self._scoresToRanks(noun_scores)
self.rankedNouns[doc_id] = ranked_nouns
if noun_phrase_scores:
ranked_nps = self._scoresToRanks(noun_phrase_scores)
self.rankedNPs[doc_id] = ranked_nps
def _derankTerms(self,rankedTerms):
return [term for (term,rank) in rankedTerms]
def getRankedTerms(self,doc_id,ranksToKeep=0):
"""
"""
ranked_nouns = self.rankedNouns.get(doc_id,[])
ranked_nps = self.rankedNPs.get(doc_id,[])
if ranksToKeep:
ranked_nouns = [
(noun,score)
for (noun,score) in ranked_nouns
if score < ranksToKeep]
ranked_nps = [
(np,score)
for (np,score) in ranked_nps
if score < ranksToKeep]
return (ranked_nouns,ranked_nps)
def getTerms(self,doc_id,ranksToKeep=0):
(ranked_nouns,ranked_nps) = self.getRankedTerms(doc_id,ranksToKeep)
ranked_nouns = self._derankTerms(ranked_nouns)
ranked_nps = self._derankTerms(ranked_nps)
return (ranked_nouns,ranked_nps)
def getRankedNounTerms(self,doc_id,ranksToKeep=0):
"""
"""
ranked_nouns = self.rankedNouns.get(doc_id,[])
if ranksToKeep:
ranked_nouns = [
(noun,score)
for (noun,score) in ranked_nouns
if score < ranksToKeep]
return ranked_nouns
def getRankedNPTerms(self,doc_id,ranksToKeep=0):
"""
"""
ranked_nps = self.rankedNPs.get(doc_id,[])
if ranksToKeep:
ranked_nps = [
(np,score)
for (np,score) in ranked_nps
if score < ranksToKeep]
return ranked_nps
def getNounTerms(self,doc_id,ranksToKeep=0):
ranked_nouns = self.getRankedTerms(doc_id,ranksToKeep)[0]
ranked_nouns = self._derankTerms(ranked_nouns)
return ranked_nouns
def getNPTerms(self,doc_id,ranksToKeep=0):
ranked_nps = self.getRankedTerms(doc_id,ranksToKeep)[1]
ranked_nps = self._derankTerms(ranked_nps)
return ranked_nps
def clear(self):
"""Wipes the storage
"""
self.rankedNouns.clear()
self.rankedNPs.clear()
示例3: NounBayesClassifier
# 需要导入模块: from persistent.mapping import PersistentMapping [as 别名]
# 或者: from persistent.mapping.PersistentMapping import clear [as 别名]
class NounBayesClassifier(Persistent):
"""
"""
implements(IContentClassifier)
def __init__(self,tagger=None,noNounRanksToKeep = 20):
"""
"""
self.noNounRanksToKeep = noNounRanksToKeep
self.trainingDocs = PersistentMapping()
self.allNouns = OOSet()
self.classifier = None
self.trainAfterUpdate = True
def addTrainingDocument(self,doc_id,tags):
"""
"""
storage = getUtility(INounPhraseStorage)
importantNouns = storage.getNounTerms(doc_id,self.noNounRanksToKeep)
self.trainingDocs[doc_id] = (importantNouns,tags)
self.allNouns = union(self.allNouns,OOSet(importantNouns))
def train(self):
"""
"""
presentNouns = dict()
trainingData = []
if not self.allNouns:
storage = getUtility(INounPhraseStorage)
for key in self.trainingDocs.keys():
importantNouns = storage.getNounTerms(
key,
self.noNounRanksToKeep)
self.allNouns = union(self.allNouns,OOSet(importantNouns))
for item in self.allNouns:
presentNouns.setdefault(item,0)
for (nouns,tags) in self.trainingDocs.values():
nounPresence = presentNouns.copy()
for noun in nouns:
nounPresence[noun] = 1
for tag in tags:
trainingData.append((nounPresence,tag,))
if trainingData:
self.classifier = NaiveBayesClassifier.train(trainingData)
def classify(self,doc_id):
"""
"""
if not self.classifier:
return []
presentNouns = dict()
for item in self.allNouns:
presentNouns.setdefault(item,0)
storage = getUtility(INounPhraseStorage)
importantNouns = storage.getNounTerms(doc_id,self.noNounRanksToKeep)
for noun in importantNouns:
if noun in presentNouns.keys():
presentNouns[noun] = 1
return self.classifier.classify(presentNouns)
def probabilityClassify(self,doc_id):
"""
"""
if not self.classifier:
return []
presentNouns = dict()
for item in self.allNouns:
presentNouns.setdefault(item,0)
storage = getUtility(INounPhraseStorage)
importantNouns = storage.getNounTerms(doc_id,self.noNounRanksToKeep)
for noun in importantNouns:
if noun in presentNouns.keys():
presentNouns[noun] = 1
return self.classifier.prob_classify(presentNouns)
def clear(self):
"""Wipes the classifier's data.
"""
self.allNouns.clear()
self.trainingDocs.clear()
def tags(self):
if not self.classifier:
return []
return self.classifier.labels()