当前位置: 首页>>代码示例>>Python>>正文


Python cache.sgm函数代码示例

本文整理汇总了Python中r2.lib.cache.sgm函数的典型用法代码示例。如果您正苦于以下问题:Python sgm函数的具体用法?Python sgm怎么用?Python sgm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了sgm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _byID

    def _byID(cls, ids, data=False, return_dict=True, extra_props=None,
              stale=False, ignore_missing=False):
        ids, single = tup(ids, True)
        prefix = thing_prefix(cls.__name__)

        if not all(x <= tdb.MAX_THING_ID for x in ids):
            raise NotFound('huge thing_id in %r' % ids)

        def count_found(ret, still_need):
            cls._cache.stats.cache_report(
                hits=len(ret), misses=len(still_need),
                cache_name='sgm.%s' % cls.__name__)

        if not cls._cache.stats:
            count_found = None

        def items_db(ids):
            items = cls._get_item(cls._type_id, ids)
            for i in items.keys():
                items[i] = cls._build(i, items[i])

            return items

        bases = sgm(cls._cache, ids, items_db, prefix, stale=stale,
                    found_fn=count_found)

        # Check to see if we found everything we asked for
        missing = []
        for i in ids:
            if i not in bases:
                missing.append(i)
            elif bases[i] and bases[i]._id != i:
                g.log.error("thing.py: Doppleganger on byID: %s got %s for %s" %
                            (cls.__name__, bases[i]._id, i))
                bases[i] = items_db([i]).values()[0]
                bases[i]._cache_myself()
        if missing and not ignore_missing:
            raise NotFound, '%s %s' % (cls.__name__, missing)
        for i in missing:
            ids.remove(i)

        if data:
            need = []
            for v in bases.itervalues():
                if not v._loaded:
                    need.append(v)
            if need:
                cls._load_multi(need)

        if extra_props:
            for _id, props in extra_props.iteritems():
                for k, v in props.iteritems():
                    bases[_id].__setattr__(k, v, False)

        if single:
            return bases[ids[0]] if ids else None
        elif return_dict:
            return bases
        else:
            return filter(None, (bases.get(i) for i in ids))
开发者ID:Acceto,项目名称:reddit,代码行数:60,代码来源:thing.py

示例2: get_live_promotions

def get_live_promotions(sr_names):
    sanitized_names = [SPECIAL_NAMES.get(name, name) for name in sr_names]
    promos_by_sanitized_name = sgm(
        g.cache, sanitized_names, miss_fn=_get_live_promotions, prefix="live_promotions", time=60, stale=True
    )
    promos_by_srname = {REVERSED_NAMES.get(name, name): val for name, val in promos_by_sanitized_name.iteritems()}
    return itertools.chain.from_iterable(promos_by_srname.itervalues())
开发者ID:karthikv,项目名称:reddit,代码行数:7,代码来源:promote.py

示例3: _byID

    def _byID(cls, ids):
        ids, is_single = tup(ids, True)

        if not len(ids):
            if is_single:
                raise InvariantException("whastis?")
            else:
                return {}

        # all keys must be strings or directly convertable to strings
        assert all(isinstance(_id, basestring) and str(_id) for _id in ids)

        def lookup(l_ids):
            rows = cls.cf.multiget(l_ids, column_count=max_column_count)

            l_ret = {}
            for t_id, row in rows.iteritems():
                t = cls._from_serialized_columns(t_id, row)
                l_ret[t._id] = t

            return l_ret

        ret = cache.sgm(thing_cache, ids, lookup, prefix=cls._cache_prefix())

        if is_single and not ret:
            raise NotFound("<%s %r>" % (cls.__name__,
                                        ids[0]))
        elif is_single:
            assert len(ret) == 1
            return ret.values()[0]

        return ret
开发者ID:JediWatchman,项目名称:reddit,代码行数:32,代码来源:tdb_cassandra.py

示例4: _byID

    def _byID(cls, ids, data=False, return_dict=True, extra_props=None):
        ids, single = tup(ids, True)
        prefix = thing_prefix(cls.__name__)

        def items_db(ids):
            items = cls._get_item(cls._type_id, ids)
            for i in items.keys():
                items[i] = cls._build(i, items[i])

            return items

        bases = sgm(cache, ids, items_db, prefix)

        #check to see if we found everything we asked for
        if any(i not in bases for i in ids):
            missing = [i for i in ids if i not in bases]
            raise NotFound, '%s %s' % (cls.__name__, missing)

        if data:
            need = [v for v in bases.itervalues() if not v._loaded]
            if need:
                cls._load_multi(need)

        #e.g. add the sort prop
        if extra_props:
            for _id, props in extra_props.iteritems():
                for k, v in props.iteritems():
                    bases[_id].__setattr__(k, v, False)

        if single:
            return bases[ids[0]]
        elif return_dict:
            return bases
        else:
            return filter(None, (bases.get(i) for i in ids))
开发者ID:rajbot,项目名称:tikical,代码行数:35,代码来源:thing.py

示例5: location_by_ips

def location_by_ips(ips):
    ips, is_single = tup(ips, ret_is_single=True)
    location_by_ip = sgm(g.cache, ips, miss_fn=_location_by_ips, prefix="location_by_ip", time=GEOIP_CACHE_TIME)
    if is_single and location_by_ip:
        return location_by_ip[ips[0]]
    else:
        return location_by_ip
开发者ID:pra85,项目名称:reddit,代码行数:7,代码来源:geoip.py

示例6: normalized_hot

def normalized_hot(sr_ids, obey_age_limit=True, ageweight=None):
    timer = g.stats.get_timer("normalized_hot")
    timer.start()

    if not sr_ids:
        return []

    if ageweight and feature.is_enabled("scaled_normalized_hot"):
        tuples_by_srid = get_hot_tuples(sr_ids, ageweight=ageweight)
    else:
        tuples_by_srid = sgm(g.cache, sr_ids, miss_fn=get_hot_tuples,
                             prefix='normalized_hot', time=g.page_cache_time)

    if obey_age_limit:
        cutoff = datetime.now(g.tz) - timedelta(days=g.HOT_PAGE_AGE)
        oldest = epoch_seconds(cutoff)
    else:
        oldest = 0.

    merged = heapq.merge(*tuples_by_srid.values())
    generator = (link_name for ehot, hot, link_name, timestamp in merged
                           if timestamp > oldest)
    ret = list(itertools.islice(generator, MAX_LINKS))
    timer.stop()
    return ret
开发者ID:SovietMan,项目名称:reddit,代码行数:25,代码来源:normalized_hot.py

示例7: _byID

    def _byID(cls, ids, data=False, return_dict=True, extra_props=None, stale=False):
        ids, single = tup(ids, True)
        prefix = thing_prefix(cls.__name__)

        if not all(x <= tdb.MAX_THING_ID for x in ids):
            raise NotFound("huge thing_id in %r" % ids)

        def count_found(ret, still_need):
            cache.stats.cache_report(hits=len(ret), misses=len(still_need), cache_name="sgm.%s" % cls.__name__)

        if not cache.stats:
            count_found = None

        def items_db(ids):
            items = cls._get_item(cls._type_id, ids)
            for i in items.keys():
                items[i] = cls._build(i, items[i])

            return items

        bases = sgm(cache, ids, items_db, prefix, stale=stale, found_fn=count_found)

        # check to see if we found everything we asked for
        for i in ids:
            if i not in bases:
                missing = [i for i in ids if i not in bases]
                raise NotFound, "%s %s" % (cls.__name__, missing)
            if bases[i] and bases[i]._id != i:
                g.log.error("thing.py: Doppleganger on byID: %s got %s for %s" % (cls.__name__, bases[i]._id, i))
                bases[i] = items_db([i]).values()[0]
                bases[i]._cache_myself()

        if data:
            need = []
            for v in bases.itervalues():
                v._asked_for_data = True
                if not v._loaded:
                    need.append(v)
            if need:
                cls._load_multi(need)
        ### The following is really handy for debugging who's forgetting data=True:
        #       else:
        #           for v in bases.itervalues():
        #                if v._id in (1, 2, 123):
        #                    raise ValueError

        # e.g. add the sort prop
        if extra_props:
            for _id, props in extra_props.iteritems():
                for k, v in props.iteritems():
                    bases[_id].__setattr__(k, v, False)

        if single:
            return bases[ids[0]]
        elif return_dict:
            return bases
        else:
            return filter(None, (bases.get(i) for i in ids))
开发者ID:nandhinijie,项目名称:reddit,代码行数:58,代码来源:thing.py

示例8: organization_by_ips

def organization_by_ips(ips):
    ips, is_single = tup(ips, ret_is_single=True)
    organization_by_ip = sgm(g.cache, ips, miss_fn=_organization_by_ips,
                             prefix='organization_by_ip',
                             time=GEOIP_CACHE_TIME)
    if is_single and organization_by_ip:
        return organization_by_ip[ips[0]]
    else:
        return organization_by_ip
开发者ID:APerson241,项目名称:reddit,代码行数:9,代码来源:geoip.py

示例9: _by_name

    def _by_name(cls, names, stale=False, _update = False):
        '''
        Usages: 
        1. Subreddit._by_name('funny') # single sr name
        Searches for a single subreddit. Returns a single Subreddit object or 
        raises NotFound if the subreddit doesn't exist.
        2. Subreddit._by_name(['aww','iama']) # list of sr names
        Searches for a list of subreddits. Returns a dict mapping srnames to 
        Subreddit objects. Items that were not found are ommitted from the dict.
        If no items are found, an empty dict is returned.
        '''
        #lower name here so there is only one cache
        names, single = tup(names, True)

        to_fetch = {}
        ret = {}

        for name in names:
            ascii_only = str(name.decode("ascii", errors="ignore"))
            lname = ascii_only.lower()

            if lname in cls._specials:
                ret[name] = cls._specials[lname]
            elif len(lname) > Subreddit.MAX_SRNAME_LENGTH:
                g.log.debug("Subreddit._by_name() ignoring invalid srname (too long): %s", lname)
            else:
                to_fetch[lname] = name

        if to_fetch:
            def _fetch(lnames):
                q = cls._query(lower(cls.c.name) == lnames,
                               cls.c._spam == (True, False),
                               limit = len(lnames),
                               data=True)
                try:
                    srs = list(q)
                except UnicodeEncodeError:
                    print "Error looking up SRs %r" % (lnames,)
                    raise

                return dict((sr.name.lower(), sr._id)
                            for sr in srs)

            srs = {}
            srids = sgm(g.cache, to_fetch.keys(), _fetch, prefix='subreddit.byname', stale=stale)
            if srids:
                srs = cls._byID(srids.values(), data=True, return_dict=False, stale=stale)

            for sr in srs:
                ret[to_fetch[sr.name.lower()]] = sr

        if ret and single:
            return ret.values()[0]
        elif not ret and single:
            raise NotFound, 'Subreddit %s' % name
        else:
            return ret
开发者ID:Chef1991,项目名称:reddit,代码行数:57,代码来源:subreddit.py

示例10: _fast_query

        def _fast_query(cls, thing1s, thing2s, name, data=True):
            """looks up all the relationships between thing1_ids and thing2_ids
            and caches them"""
            prefix = thing_prefix(cls.__name__)

            thing1_dict = dict((t._id, t) for t in tup(thing1s))
            thing2_dict = dict((t._id, t) for t in tup(thing2s))

            thing1_ids = thing1_dict.keys()
            thing2_ids = thing2_dict.keys()

            name = tup(name)

            pairs = set((x, y, n)
                        for x in thing1_ids
                        for y in thing2_ids
                        for n in name)

            def items_db(pairs):
                t1_ids = set()
                t2_ids = set()
                names = set()
                for t1, t2, name in pairs:
                    t1_ids.add(t1)
                    t2_ids.add(t2)
                    names.add(name)

                q = cls._query(cls.c._thing1_id == t1_ids,
                               cls.c._thing2_id == t2_ids,
                               cls.c._name == names,
                               eager_load = True,
                               data = data)

                rel_ids = {}
                for rel in q:
                    #TODO an alternative for multiple
                    #relations with the same keys
                    #l = rel_ids.setdefault((rel._thing1_id, rel._thing2_id), [])
                    #l.append(rel._id)
                    rel_ids[(rel._thing1._id, rel._thing2._id, rel._name)] = rel._id
                
                for p in pairs:
                    if p not in rel_ids:
                        rel_ids[p] = None
                        
                return rel_ids

            res = sgm(cache, pairs, items_db, prefix)
            #convert the keys back into objects
            #we can assume the rels will be in the cache and just call
            #_byID lots
            res_obj = {}
            for k, rid in res.iteritems():
                obj_key = (thing1_dict[k[0]], thing2_dict[k[1]], k[2])
                res_obj[obj_key] = cls._byID(rid, data=data) if rid else None
                
            return res_obj
开发者ID:rajbot,项目名称:tikical,代码行数:57,代码来源:thing.py

示例11: normalized_rising

def normalized_rising(sr_ids):
    if not sr_ids:
        return []

    tuples_by_srid = sgm(g.cache, sr_ids, miss_fn=get_rising_tuples,
                         prefix='normalized_rising', time=g.page_cache_time)

    merged = heapq.merge(*tuples_by_srid.values())

    return [link_name for norm_score, score, link_name in merged]
开发者ID:Bebetz,项目名称:reddit,代码行数:10,代码来源:rising.py

示例12: _fast_query

        def _fast_query(cls, thing1s, thing2s, name, data=True, eager_load=True, thing_data=False):
            """looks up all the relationships between thing1_ids and
               thing2_ids and caches them"""
            prefix = thing_prefix(cls.__name__)

            thing1_dict = dict((t._id, t) for t in tup(thing1s))
            thing2_dict = dict((t._id, t) for t in tup(thing2s))

            thing1_ids = thing1_dict.keys()
            thing2_ids = thing2_dict.keys()

            name = tup(name)

            # permute all of the pairs
            pairs = set((x, y, n) for x in thing1_ids for y in thing2_ids for n in name)

            def lookup_rel_ids(pairs):
                rel_ids = {}

                t1_ids = set()
                t2_ids = set()
                names = set()
                for t1, t2, name in pairs:
                    t1_ids.add(t1)
                    t2_ids.add(t2)
                    names.add(name)

                if t1_ids and t2_ids and names:
                    q = cls._query(cls.c._thing1_id == t1_ids, cls.c._thing2_id == t2_ids, cls.c._name == names)
                else:
                    q = []

                for rel in q:
                    rel_ids[(rel._thing1_id, rel._thing2_id, rel._name)] = rel._id

                for p in pairs:
                    if p not in rel_ids:
                        rel_ids[p] = None

                return rel_ids

            # get the relation ids from the cache or query the db
            res = sgm(cls._cache, pairs, lookup_rel_ids, prefix)

            # get the relation objects
            rel_ids = {rel_id for rel_id in res.itervalues() if rel_id is not None}
            rels = cls._byID_rel(rel_ids, data=data, eager_load=eager_load, thing_data=thing_data)

            res_obj = {}
            for (thing1_id, thing2_id, name), rel_id in res.iteritems():
                pair = (thing1_dict[thing1_id], thing2_dict[thing2_id], name)
                rel = rels[rel_id] if rel_id is not None else None
                res_obj[pair] = rel

            return res_obj
开发者ID:JBTech,项目名称:reddit,代码行数:55,代码来源:thing.py

示例13: get

 def get(cls, sr_ids):
     """Return a dictionary of sr_id -> list of ads for each of sr_ids"""
     # Mangling: Caller convention is to use empty string for FRONT_PAGE
     sr_ids = [(sr_id or cls.FRONT_PAGE) for sr_id in sr_ids]
     adweights = sgm(cls.cache, sr_ids, cls._load_multi,
                     prefix=cls.cache_prefix, stale=True)
     results = {sr_id: cls.from_columns(adweights[sr_id])
                for sr_id in adweights}
     if cls.FRONT_PAGE in results:
         results[''] = results.pop(cls.FRONT_PAGE)
     return results
开发者ID:caseypatrickdriscoll,项目名称:reddit,代码行数:11,代码来源:promo.py

示例14: _fast_query_all_names

        def _fast_query_all_names(cls, thing1s, thing2s, data=True):
            """looks up all the relationships between thing1_ids and thing2_ids
            and caches them
            """
            prefix = thing_prefix(cls.__name__)

            thing1_dict = dict((t._id, t) for t in thing1s)
            thing2_dict = dict((t._id, t) for t in thing2s)

            thing1_ids = thing1_dict.keys()
            thing2_ids = thing2_dict.keys()

            pairs = set((x, y)
                        for x in thing1_ids
                        for y in thing2_ids)

            def items_db(pairs):
                t1_ids = set()
                t2_ids = set()
                for t1, t2 in pairs:
                    t1_ids.add(t1)
                    t2_ids.add(t2)

                q = cls._query(cls.c._thing1_id == t1_ids,
                               cls.c._thing2_id == t2_ids,
                               eager_load = True,
                               data = data)

                rel_ids = {}
                for rel in q:
                    l = rel_ids.setdefault((rel._thing1_id, rel._thing2_id), [])
                    l.append(rel._id)

                for p in pairs:
                    if p not in rel_ids:
                        rel_ids[p] = []

                return rel_ids

            res = sgm(cache, pairs, items_db, prefix)
            #convert the keys back into objects
            #we can assume the rels will be in the cache and just call
            #_byID lots
            res_obj = {}
            for k, rids in res.iteritems():
                for rid in rids:
                    obj_key = (thing1_dict[k[0]], thing2_dict[k[1]])
                    result = cls._byID(rid, data=data) if rid else None
                    if res_obj.get(obj_key) is None:
                        res_obj[obj_key] = result

            return res_obj
开发者ID:brendanlong,项目名称:lesswrong,代码行数:52,代码来源:thing.py

示例15: location_by_ips

def location_by_ips(ips):
    ips, is_single = tup(ips, ret_is_single=True)
    location_by_ip = sgm(
        cache=g.gencache,
        keys=ips,
        miss_fn=_location_by_ips,
        prefix='geoip:loc_',
        time=GEOIP_CACHE_TIME,
    )
    if is_single and location_by_ip:
        return location_by_ip[ips[0]]
    else:
        return location_by_ip
开发者ID:PlayNetwork,项目名称:reddit,代码行数:13,代码来源:geoip.py


注:本文中的r2.lib.cache.sgm函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。