本文整理汇总了Python中BTrees.IIBTree.IISet.keys方法的典型用法代码示例。如果您正苦于以下问题:Python IISet.keys方法的具体用法?Python IISet.keys怎么用?Python IISet.keys使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类BTrees.IIBTree.IISet
的用法示例。
在下文中一共展示了IISet.keys方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: count
# 需要导入模块: from BTrees.IIBTree import IISet [as 别名]
# 或者: from BTrees.IIBTree.IISet import keys [as 别名]
def count(self, context, facet, intersect=None):
if IQueryResults.providedBy(intersect):
intersect = IISet(intersect.keys())
sm = sitemanager_for(context)
unique_name = '%s.%s' % (facet.name, self.name)
cache_tools = queryUtility(ISetCacheTools, context=sm)
invalidated = cache_tools.invalidated_records
if not isinstance(invalidated, IISet):
invalidated = IISet(invalidated)
if isinstance(intersect, IISet):
invalid = len(intersection(intersect, invalidated)) > 0
if unique_name in cache_tools.filter_setid_cache:
setid = cache_tools.filter_setid_cache[unique_name]
if setid in cache_tools.set_cache:
if invalid:
del(cache_tools.set_cache[setid])
del(cache_tools.filter_setid_cache[unique_name])
else:
records = cache_tools.set_cache[setid]
if intersect is None:
return len(records)
if isinstance(intersect, IISet):
#optimal to cast smaller set to match IISet.
return len(intersection(intersect, IISet(records)))
return len(set(intersect) & records)
#otherwise, at this point, no cached value, so query catalog...
qf = self(unique_name)
runner = AdvancedQueryRunner(context)
result = runner(qf)
setid = result.setid
cache_tools.set_cache[setid] = result.frozen
cache_tools.filter_setid_cache[unique_name] = setid
if intersect is None:
return len(result)
if isinstance(intersect, IISet):
return len(intersection(intersect, IISet(result.frozen)))
return len(set(intersect) & result.frozen)
示例2: index_object
# 需要导入模块: from BTrees.IIBTree import IISet [as 别名]
# 或者: from BTrees.IIBTree.IISet import keys [as 别名]
def index_object(self, documentId, obj, threshold=None):
""" Index an object:
'documentId' is the integer id of the document
'obj' is the object to be indexed
'threshold' is the number of words to process between
commiting subtransactions. If 'None' subtransactions are
disabled. """
# sniff the object for our 'id', the 'document source' of the
# index is this attribute. If it smells callable, call it.
try:
source = getattr(obj, self.id)
if safe_callable(source):
source = source()
if not isinstance(source, UnicodeType):
source = str(source)
except (AttributeError, TypeError):
return 0
# sniff the object for 'id'+'_encoding'
try:
encoding = getattr(obj, self.id+'_encoding')
if safe_callable(encoding ):
encoding = str(encoding())
else:
encoding = str(encoding)
except (AttributeError, TypeError):
encoding = 'latin1'
lexicon = self.getLexicon()
splitter = lexicon.Splitter
wordScores = OIBTree()
last = None
# Run through the words and score them
for word in list(splitter(source,encoding=encoding)):
if word[0] == '\"':
last = self._subindex(word[1:-1], wordScores, last, splitter)
else:
if word==last: continue
last=word
wordScores[word]=wordScores.get(word,0)+1
# Convert scores to use wids:
widScores=IIBucket()
getWid=lexicon.getWordId
for word, score in wordScores.items():
widScores[getWid(word)]=score
del wordScores
currentWids=IISet(self._unindex.get(documentId, []))
# Get rid of document words that are no longer indexed
self.unindex_objectWids(documentId, difference(currentWids, widScores))
# Now index the words. Note that the new xIBTrees are clever
# enough to do nothing when there isn't a change. Woo hoo.
insert=self.insertForwardIndexEntry
for wid, score in widScores.items():
insert(wid, documentId, score)
# Save the unindexing info if it's changed:
wids=widScores.keys()
if wids != currentWids.keys():
self._unindex[documentId]=wids
return len(wids)