当前位置: 首页>>代码示例>>Python>>正文


Python IIBTree.IISet类代码示例

本文整理汇总了Python中BTrees.IIBTree.IISet的典型用法代码示例。如果您正苦于以下问题:Python IISet类的具体用法?Python IISet怎么用?Python IISet使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了IISet类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_lookup

    def test_lookup(self):
        bigsize = 1000000
        smallsize = 1000
        large = IISet(xrange(bigsize))
        small = IISet(xrange(0, bigsize, bigsize/smallsize))

        start = time()
        for i in small:
            a = large[i]
        print "\ngetitem distributed %.6f" % (time()-start)

        start = time()
        for i in small:
            a = large[bigsize-1]
        print "getitem end %.6f" % (time()-start)

        start = time()
        for i in small:
            a = large[0]
        print "getitem start %.6f" % (time()-start)

        start = time()
        for i in small:
            a = large.has_key(i)
        print "\nhas_key distributed %.6f" % (time()-start)

        start = time()
        for i in small:
            a = large.has_key(bigsize-1)
        print "has_key end %.6f" % (time()-start)

        start = time()
        for i in small:
            a = large.has_key(0)
        print "has_key start %.6f" % (time()-start)
开发者ID:hannosch,项目名称:experimental.catalogqueryplan,代码行数:35,代码来源:test_performance.py

示例2: FilteredSetBase

class FilteredSetBase(Persistent):

    def __init__(self, id, expr):
        self.id   = id
        self.expr = expr
        self.clear()


    def clear(self):
        self.ids  = IISet()


    def index_object(self, documentId, obj):
        raise RuntimeError,'index_object not defined'


    def unindex_object(self,documentId):
        try: self.ids.remove(Id)
        except: pass


    def getId(self):            return self.id
    def getExpression(self):    return self.expr
    def getIds(self):           return self.ids
    def getType(self):          return self.meta_type

    def setExpression(self, expr): self.expr = expr

    def __repr__(self):
        return '%s: (%s) %s' % (self.id,self.expr,map(None,self.ids))

    __str__ = __repr__
开发者ID:OS2World,项目名称:APP-SERVER-Zope,代码行数:32,代码来源:FilteredSet.py

示例3: _eval

def _eval(query, cat):
  '''evaluate *query* in the context of *cat* (a 'Products.ZCatalog.Catalog.Catalog').'''
  rs = query._eval(_QueryContext(cat))
  if isinstance(rs, ISearch):
    if hasattr(rs, 'asSet'): rs = rs.asSet()
    elif isinstance(rs, IBTree): rs = rs.getTree()
    else: hits = tuple(rs); rs = IISet(); rs.__setstate__((hits,))
  return rs
开发者ID:Vinsurya,项目名称:Plone,代码行数:8,代码来源:eval.py

示例4: testFixed1843

 def testFixed1843(self):
     from BTrees.IIBTree import IISet
     t = IISet()
     t.insert(1)
     # This one used to fail to raise the TypeError when it occurred.
     self.assertRaises(TypeError, t.keys, "")
     # This one used to segfault.
     self.assertRaises(TypeError, t.keys, 0, "")
开发者ID:zopefoundation,项目名称:BTrees,代码行数:8,代码来源:testBTrees.py

示例5: get

    def get(self, pattern):
        """ Query the lexicon for words matching a pattern."""

        # single word pattern  produce a slicing problem below.
        # Because the splitter throws away single characters we can
        # return an empty tuple here.

        if len(pattern)==1: return ()

        wc_set = [self.multi_wc, self.single_wc]

        digrams = []
        globbing = 0
        for i in range(len(pattern)):
            if pattern[i] in wc_set:
                globbing = 1
                continue

            if i == 0:
                digrams.insert(i, (self.eow + pattern[i]) )
                digrams.append((pattern[i] + pattern[i+1]))
            else:
                try:
                    if pattern[i+1] not in wc_set:
                        digrams.append( pattern[i] + pattern[i+1] )

                except IndexError:
                    digrams.append( (pattern[i] + self.eow) )

        if not globbing:
            result =  self._lexicon.get(pattern, None)
            if result is None:
                return ()
            return (result, )

        ## now get all of the intsets that contain the result digrams
        result = None
        for digram in digrams:
            result=union(result, self._digrams.get(digram, None))

        if not result:
            return ()
        else:
            ## now we have narrowed the list of possible candidates
            ## down to those words which contain digrams.  However,
            ## some words may have been returned that match digrams,
            ## but do not match 'pattern'.  This is because some words
            ## may contain all matching digrams, but in the wrong
            ## order.

            expr = re.compile(self.createRegex(pattern))
            words = []
            hits = IISet()
            for x in result:
                if expr.match(self._inverseLex[x]):
                    hits.insert(x)
            return hits
开发者ID:OS2World,项目名称:APP-SERVER-Zope,代码行数:57,代码来源:GlobbingLexicon.py

示例6: items

 def items(self):
     # return a list of value to int set of rid tuples
     indexed = self._index_value
     items = [(bool(indexed), self._index)]
     false = IISet()
     for rid, value in self._unindex.iteritems():
         if value != indexed:
             false.add(rid)
     items.append((not bool(indexed), false))
     return items
开发者ID:eprigorodov,项目名称:Products.ZCatalog,代码行数:10,代码来源:BooleanIndex.py

示例7: group

 def group(self, seq):
   sortIndex = self._sortIndex; sortReverse = self._sortReverse
   ns = len(seq); ni = len(sortIndex)
   if ns >= 0.1 * ni:
     # result large compared to index -- sort via index
     handled = IISet(); hn = 0
     _load = getattr(sortIndex, '_load', None)
     if _load is None:
       # not an optimized index
       items = sortIndex.items()
       
       _load = lambda (x1, x2): x2
       if sortReverse: items.reverse()
     elif sortReverse:
       gRO = getattr(sortIndex, 'getReverseOrder', None)
       items = gRO and gRO()
       if items is None:
         items = list(sortIndex._index.keys()); items.reverse()
     else: items = sortIndex._index.keys()
     for i in items:
       ids = intersection(seq, _load(i))
       if ids:
         handled.update(ids); hn += len(ids)
         yield i, ids
     if hn != len(seq): yield None, difference(seq, handled)
   else:
     # result relatively small -- sort via result
     m = OOBTree()
     keyFor = getattr(sortIndex, 'keyForDocument', None)
     # work around "nogopip" bug: it defines "keyForDocument" as an integer
     if not callable(keyFor):
       # this will fail, when the index neither defines a reasonable
       # "keyForDocument" nor "documentToKeyMap". In this case,
       # the index cannot be used for sorting.
       keyFor = lambda doc, map=sortIndex.documentToKeyMap(): map[doc]
     noValue = IITreeSet()
     for doc in seq.keys():
       try: k = keyFor(doc)
       except KeyError: noValue.insert(doc); continue
       l = m.get(k)
       if l is None: l = m[k] = IITreeSet()
       l.insert(doc)
     items = m.items()
     if sortReverse: items = list(items); items.reverse()
     for i in items: yield i
     if noValue: yield None, noValue
开发者ID:Vinsurya,项目名称:Plone,代码行数:46,代码来源:sorting.py

示例8: lookupWordsBySimilarity

    def lookupWordsBySimilarity(self, word):       
        """ perform a similarity lookup """

        lst = self._lexicon.getSimiliarWords(word)

        docids = IISet()
        used_words = {} 

        getwid = self._lexicon.getWordId

        for word, threshold in lst:
            used_words[word] = threshold
            wid = getwid(word)

            docids.update( self._storage.get(wid) )

        return ResultSet(docids, used_words)
开发者ID:eaudeweb,项目名称:naaya,代码行数:17,代码来源:TextIndexNG.py

示例9: AccountingFolder

class AccountingFolder(BaseFolder, BrowserDefaultMixin):
    """
    """
    security = ClassSecurityInfo()

    implements(interfaces.IAccountingFolder)

    meta_type = 'AccountingFolder'
    _at_rename_after_creation = True

    schema = AccountingFolder_schema

    ##code-section class-header #fill in your manual code here
    ##/code-section class-header

    # Methods

    # Manually created methods

    def __init__(self, oid, **kwargs):
        BaseFolder.__init__(self, oid, **kwargs)
        self._closing_transfers = IISet()

    security.declareProtected(permissions.View, 'getAccountingRoot')
    def getAccountingRoot(self):
        ''' Return 'self' as accounting root
        '''
        return self

    def displayContentsTab(self):
        """ Hide contents tab
        """
        return False

    def registerClosingDate(self, date):
        """ register closing transfer date
        """
        # strip time before insert
        date = int(DateTime(date.Date()))
        self._closing_transfers.insert(date)

    def getClosingDates(self):
        """ return all registered closing dates
        """
        return self._closing_transfers
开发者ID:rochecompaan,项目名称:Products.UpfrontAccounting,代码行数:45,代码来源:AccountingFolder.py

示例10: _lookup

    def _lookup(self, words, do_autoexpand=1):
        """ search a word or a list of words in the lexicon and 
            return a ResultSet of found documents.
        """

        docids = IISet()
        used_words = {} 

        #  remove stopwords from data
        if self.use_stopwords:
            words = self.use_stopwords.process( words ) 

        if self.use_thesaurus and self.thesaurus_mode == 'expand_always':
            TH = ThesaurusRegistry.get(self.use_thesaurus)
            for word in words[:]:
                r = TH.getTermsFor(word)
                words.extend(r)

        for word in words:

            # perform casefolding if necessary
            if self.splitter_casefolding:
                word = word.lower()

            if self.use_normalizer:
                word = NormalizerRegistry.get(self.use_normalizer).process(word)    
 
            used_words[word] = 1.0

            wid = self._lexicon.getWordId(word)

            # Retrieve list of docIds for this wordid
            if wid is not None:
                docids.update( self._storage.get(wid) )

            # perform autoexpansion of terms by performing
            # a search using right-truncation
            if do_autoexpand and self.autoexpand and len(word) >= self.autoexpand_limit:
                rs = self.lookupWordsByTruncation(word, right=1)
                docids.update(rs.docIds())
                wlen = len(word)
                for w in rs.words().keys():
                    used_words[w] = TRUNC_WEIGHT[len(w)-wlen]

        return ResultSet(docids, used_words)
开发者ID:eaudeweb,项目名称:naaya,代码行数:45,代码来源:TextIndexNG.py

示例11: setOperation

def setOperation(op, sets, isearch):
  '''perform *op* on *sets*. if *isearch*, return an incremental search.

  *op* may be '"and"' or '"or"'.

  Uses 'IncrementalSearch', if available.
  '''
  if not sets:
    if op == 'and': return # None means all results
    if isearch: search = IOr(); search.complete(); return search
    return IISet()
  # Note: "multiunion" is *much* faster than "IOr"!
  #if IAnd is not None and (isearch or len(sets) > 1):
  if IAnd is not None and (isearch or (op == 'and' and len(sets) > 1)):
    isets = []
    for set in sets:
      if set is None:
        # all results
        if op == 'and': continue
        else: return
      if not isinstance(set, ISearch): set = IBTree(set)
      isets.append(set)
    if op == 'and' and not isets: return # empty 'and'
    cl = op == 'and' and IAnd or IOr
    if len(isets) == 1:
      # do not wrap a one element search
      search = isets[0]
    else: search = cl(*isets); search.complete()
    if isearch: return search
    if hasattr(search, 'asSet'): r = search.asSet()
    else: r = IISet(); r.__setstate__((tuple(search),))
    return r
  if op == 'or' and len(sets) > 5:
    r = multiunion(sets)
  else:
    combine = op == 'and' and intersection or union
    r= None
    for set in sets: r= combine(r,set)
    if r is None:
      if combine is union: r = IISet()
      else: return
    if isearch: r = IBTree(r)
  return r
开发者ID:socialplanning,项目名称:Products.ManageableIndex,代码行数:43,代码来源:ManagableIndex.py

示例12: group

 def group(self, seq):
   sortIndex = self._sortIndex; sortReverse = self._sortReverse
   ns = len(seq); ni = len(sortIndex)
   if ns >= 0.1 * ni:
     # result large compared to index -- sort via index
     handled = IISet(); hn = 0
     _load = getattr(sortIndex, '_load', None)
     if _load is None:
       # not an optimized index
       items = sortIndex.items()
       
       _load = lambda (x1, x2): x2
       if sortReverse: items.reverse()
     elif sortReverse:
       gRO = getattr(sortIndex, 'getReverseOrder', None)
       items = gRO and gRO()
       if items is None:
         items = list(sortIndex._index.keys()); items.reverse()
     else: items = sortIndex._index.keys()
     for i in items:
       ids = intersection(seq, _load(i))
       if ids:
         handled.update(ids); hn += len(ids)
         yield i, ids
     if hn != len(seq): yield None, difference(seq, handled)
   else:
     # result relatively small -- sort via result
     keyFor = sortIndex.keyForDocument; m = OOBTree()
     noValue = IITreeSet()
     for doc in seq.keys():
       try: k = keyFor(doc)
       except KeyError: noValue.insert(doc); continue
       l = m.get(k)
       if l is None: l = m[k] = IITreeSet()
       l.insert(doc)
     items = m.items()
     if sortReverse: items = list(items); items.reverse()
     for i in items: yield i
     if noValue: yield None, noValue
开发者ID:a25kk,项目名称:stv2,代码行数:39,代码来源:sorting.py

示例13: count

 def count(self, context, facet, intersect=None):
     if IQueryResults.providedBy(intersect):
         intersect = IISet(intersect.keys())
     sm = sitemanager_for(context)
     unique_name = '%s.%s' % (facet.name, self.name)
     cache_tools = queryUtility(ISetCacheTools, context=sm)
     invalidated = cache_tools.invalidated_records
     if not isinstance(invalidated, IISet):
         invalidated = IISet(invalidated)
     if isinstance(intersect, IISet):
         invalid = len(intersection(intersect, invalidated)) > 0
     if unique_name in cache_tools.filter_setid_cache:
         setid = cache_tools.filter_setid_cache[unique_name]
         if setid in cache_tools.set_cache:
             if invalid:
                 del(cache_tools.set_cache[setid])
                 del(cache_tools.filter_setid_cache[unique_name])
             else:
                 records = cache_tools.set_cache[setid]
                 if intersect is None:
                     return len(records)
                 if isinstance(intersect, IISet):
                     #optimal to cast smaller set to match IISet.
                     return len(intersection(intersect, IISet(records)))
                 return len(set(intersect) & records)
     #otherwise, at this point, no cached value, so query catalog...
     qf = self(unique_name)
     runner = AdvancedQueryRunner(context)
     result = runner(qf)
     setid = result.setid
     cache_tools.set_cache[setid] = result.frozen
     cache_tools.filter_setid_cache[unique_name] = setid
     if intersect is None:
         return len(result)
     if isinstance(intersect, IISet):
         return len(intersection(intersect, IISet(result.frozen)))
     return len(set(intersect) & result.frozen)
开发者ID:gu-eresearch,项目名称:plone.app.folderui,代码行数:37,代码来源:defaults.py

示例14: nearResultSets

def nearResultSets(sets, index, distance=5, bidirectional=1):
    """ perform near search on results sets """
    
    # One resultset consists of an IISet() or documentIds and 
    # tuple whose first element is the word (from LexiconLookup())
    # First we perform an intersection to get the documentIds of
    # those documents that contain all the words

    docids =  intersectResultSets(sets).docIds()

    # Now we determine for every document the positions of all
    # the words inside the document. Then we compare all the positions
    # to determine neighbourship
    
    words = []
    for set in sets:
        for word in set.words().keys():
            words.append(word)

    res_docids = IISet()

    for docId in docids:
        # the posMap is a list of tuples(word,IISet[positions])
        posMap = index.positionsFromDocumentLookup(docId, words)

        if bidirectional:
            if len(posMap.checkPositionMapBidirectional(distance)) > 0:
                res_docids.insert(docId)
        else:
            if len(posMap.checkPositionMapUnidirectional(distance)) > 0:
                res_docids.insert(docId)

    d = {}
    for w in words: d[w] = 1.0

    return ResultSet(res_docids, d)       
开发者ID:eaudeweb,项目名称:naaya,代码行数:36,代码来源:ResultSet.py

示例15: __init__

    def __init__(self, id, title, skelton, fileattache, parent, elements):
        if elements:
            from Products.ZCTextIndex.ZCTextIndex import manage_addLexicon
            manage_addLexicon(self,id='lexicon',elements = elements)

        self.__of__(parent)._buildIndexing(id,title)

        t=time()     
        self.created  = t     
        self.modified = t     

        self.fileattache = fileattache

        self.data     =IOBTree()  # id -> Message     
        self.ids      =IISet() # ids of children

        self.loadSkelton(None, skelton)
        self.loadProperties(skelton)
        self.skelton = skelton
开发者ID:nakagami,项目名称:Products.Zch,代码行数:19,代码来源:ZchSite.py


注:本文中的BTrees.IIBTree.IISet类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。