当前位置: 首页>>代码示例>>Python>>正文


Python StrictRedis.hmget方法代码示例

本文整理汇总了Python中redis.StrictRedis.hmget方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.hmget方法的具体用法?Python StrictRedis.hmget怎么用?Python StrictRedis.hmget使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在redis.StrictRedis的用法示例。


在下文中一共展示了StrictRedis.hmget方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_request_error

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
 def test_request_error(self):
     pool = get_pool()
     subject = RedisMetadata(pool, True)
     r1 = Request("md1", int(time()) - 10, 'https://www.knuthellan.com/', domain='knuthellan.com')
     subject.request_error(r1, 404)
     connection = StrictRedis(connection_pool=pool)
     self.assertEqual(b'https://www.knuthellan.com/', connection.hmget('md1', FIELD_URL)[0])
     self.assertEqual(b'd_md1', connection.hmget('md1', FIELD_DOMAIN_FINGERPRINT)[0])
     self.assertEqual(b'404', connection.hmget('md1', FIELD_ERROR)[0])
开发者ID:scrapinghub,项目名称:frontera,代码行数:11,代码来源:test_redis.py

示例2: test_page_crawled

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
 def test_page_crawled(self):
     pool = get_pool()
     subject = RedisMetadata(pool, True)
     r1 = Request("md1", int(time()) - 10, 'https://www.knuthellan.com/', domain='knuthellan.com')
     subject.page_crawled(r1)
     connection = StrictRedis(connection_pool=pool)
     self.assertEqual(b'200', connection.hmget('md1', FIELD_STATUS_CODE)[0])
开发者ID:scrapinghub,项目名称:frontera,代码行数:9,代码来源:test_redis.py

示例3: Simmetrica

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
class Simmetrica(object):

    DEFAULT_INCREMENT = 1
    DEFAULT_RESOLUTION = '5min'
    DEFAULT_REDIS_HOST = 'localhost'
    DEFAULT_REDIS_PORT = 6379
    DEFAULT_REDIS_DB = 0

    resolutions = {
        'min': 60,
        '5min': 300,
        '15min': 900,
        'hour': 3600,
        'day': 86400,
        'week': 86400 * 7,
        'month': 86400 * 30,
        'year': 86400 * 365
    }

    def __init__(self, host=None, port=None, db=None):
        self.backend = StrictRedis(
            host=host or self.DEFAULT_REDIS_HOST,
            port=int(port or self.DEFAULT_REDIS_PORT),
            db=db or self.DEFAULT_REDIS_DB
        )

    def push(self, event, increment=DEFAULT_INCREMENT, now=None):
        pipe = self.backend.pipeline()
        for resolution, timestamp in self.get_timestamps_for_push(now):
            key = self.get_event_key(event, resolution)
            pipe.hincrby(key, timestamp, increment)
        return pipe.execute()

    def query(self, event, start, end, resolution=DEFAULT_RESOLUTION):
        key = self.get_event_key(event, resolution)
        timestamps = self.get_timestamps_for_query(
            start, end, self.resolutions[resolution])
        values = self.backend.hmget(key, timestamps)
        for timestamp, value in zip(timestamps, values):
            yield timestamp, value or 0

    def get_timestamps_for_query(self, start, end, resolution):
        return range(self.round_time(start, resolution),
                     self.round_time(end, resolution),
                     resolution)

    def get_timestamps_for_push(self, now):
        now = now or self.get_current_timestamp()
        for resolution, timestamp in self.resolutions.items():
            yield resolution, self.round_time(now, timestamp)

    def round_time(self, time, resolution):
        return int(time - (time % resolution))

    def get_event_key(self, event, resolution):
        return 'simmetrica:{0}:{1}'.format(event, resolution)

    def get_current_timestamp(self):
        return int(time.time())
开发者ID:dwmclary,项目名称:simmetrica,代码行数:61,代码来源:simmetrica.py

示例4: PECorrelator

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
class PECorrelator(object):
    def __init__(self):
        self.r = StrictRedis(unix_socket_path=redis_socket, decode_responses=True)

    def get_all_samples(self):
        return self.r.smembers("hashes_sha256")

    def get_all_hashes(self, sha256):
        return [sha256] + list(self.r.hmget(sha256, ["sha1", "md5"]))

    def get_sample_info(self, sha256):
        return self.r.hgetall(sha256)

    def get_timestamps_iso(self, num=-1):
        return [
            (datetime.datetime.fromtimestamp(ts).isoformat(), val)
            for ts, val in self.r.zrevrange("timestamps", 0, num, withscores=True)
        ]

    def get_timestamps(self, num=-1):
        return self.r.zrevrange("timestamps", 0, num, withscores=True)

    def get_samples_timestamp(self, timestamp):
        return self.r.smembers("timestamp:{}".format(timestamp))

    def get_imphashs(self, num=-1):
        return self.r.zrevrange("imphashs", 0, num, withscores=True)

    def get_samples_imphash(self, imphash):
        return self.r.smembers("imphash:{}".format(imphash))

    def get_entrypoints(self, num=-1):
        return self.r.zrevrange("entrypoints", 0, num, withscores=True)

    def get_samples_entrypoint(self, entrypoint):
        return self.r.smembers("entrypoint:{}".format(entrypoint))

    def get_secnumbers(self, num=-1):
        return self.r.zrevrange("secnumbers", 0, num, withscores=True)

    def get_samples_secnumber(self, secnumber):
        return self.r.smembers("secnumber:{}".format(secnumber))

    def get_originalfilenames(self, num=-1):
        return self.r.zrevrange("originalfilenames", 0, num, withscores=True)

    def get_samples_originalfilename(self, originalfilename):
        return self.r.smembers("originalfilename:{}".format(originalfilename))

    def get_sample_secnames(self, sha256):
        return self.r.smembers("{}:secnames".format(sha256))
开发者ID:MISP,项目名称:misp-workbench,代码行数:53,代码来源:pecorrelator.py

示例5: RedisBroker

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
class RedisBroker(object):

    def __init__(self, **redis_c):
        self.ip_set_name = redis_c.pop('ip_set_name')
        self._r = StrictRedis(**redis_c)

    def ip_notebook(self, ip):
        (count, status) = self._r.hmget(ip, 'count', 'status')
        print count, status
        if not count:
            self._r.hmset(ip, {'count': 1, 'status': 0})
            self._r.sadd(self.ip_set_name, ip)
        else:
            self._r.hincrby(ip, 'count', amount=1)
开发者ID:importcjj,项目名称:sparta,代码行数:16,代码来源:redis_broker.py

示例6: test_links_extracted

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
 def test_links_extracted(self):
     pool = get_pool()
     subject = RedisMetadata(pool, True)
     l1 = Request("l1", int(time()) - 10, 'https://www.knuthellan.com/', domain='knuthellan.com')
     l2 = Request("l2", int(time()) - 10, 'https://www.khellan.com/', domain='khellan.com')
     l3 = Request("l3", int(time()) - 10, 'https://www.hellan.me/', domain='hellan.me')
     links = [l1, l2, l3]
     subject.links_extracted(None, links)
     connection = StrictRedis(connection_pool=pool)
     self.assertEqual(b'https://www.knuthellan.com/', connection.hmget('l1', FIELD_URL)[0])
     self.assertEqual(b'd_l1', connection.hmget('l1', FIELD_DOMAIN_FINGERPRINT)[0])
     self.assertEqual(b'https://www.khellan.com/', connection.hmget("l2", FIELD_URL)[0])
     self.assertEqual(b'd_l2', connection.hmget('l2', FIELD_DOMAIN_FINGERPRINT)[0])
     self.assertEqual(b'https://www.hellan.me/', connection.hmget("l3", FIELD_URL)[0])
     self.assertEqual(b'd_l3', connection.hmget('l3', FIELD_DOMAIN_FINGERPRINT)[0])
开发者ID:scrapinghub,项目名称:frontera,代码行数:17,代码来源:test_redis.py

示例7: test_add_seeds

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
 def test_add_seeds(self):
     pool = get_pool()
     subject = RedisMetadata(pool, True)
     r1 = Request("md1", int(time()) - 10, 'https://www.knuthellan.com/', domain='knuthellan.com')
     r2 = Request("md2", int(time()) - 10, 'https://www.khellan.com/', domain='khellan.com')
     r3 = Request("md3", int(time()) - 10, 'https://www.hellan.me/', domain='hellan.me')
     seeds = [r1, r2, r3]
     subject.add_seeds(seeds)
     connection = StrictRedis(connection_pool=pool)
     self.assertEqual(b'https://www.knuthellan.com/', connection.hmget('md1', FIELD_URL)[0])
     self.assertEqual(b'd_md1', connection.hmget('md1', FIELD_DOMAIN_FINGERPRINT)[0])
     self.assertEqual(b'https://www.khellan.com/', connection.hmget("md2", FIELD_URL)[0])
     self.assertEqual(b'd_md2', connection.hmget('md2', FIELD_DOMAIN_FINGERPRINT)[0])
     self.assertEqual(b'https://www.hellan.me/', connection.hmget("md3", FIELD_URL)[0])
     self.assertEqual(b'd_md3', connection.hmget('md3', FIELD_DOMAIN_FINGERPRINT)[0])
开发者ID:scrapinghub,项目名称:frontera,代码行数:17,代码来源:test_redis.py

示例8: Router

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]

#.........这里部分代码省略.........

        log.info("init complete")
        self.init_complete.set(True)

    def _parse_collection(self, hostname):
        "return the Nimbus.io collection name from host name"
        offset = -1 * ( len(self.service_domain) + 1 )
        return hostname[:offset].lower()

    def _hosts_for_collection(self, collection):
        "return a list of hosts for this collection"
        cluster_id = self._cluster_for_collection(collection)
        if cluster_id is None:
            return None
        cluster_info = self._cluster_info(cluster_id)
        return cluster_info['hosts']

    def _cluster_for_collection(self, collection, _retries=0):
        "return cluster ID for collection"

        collection_row = self.collection_lookup.get(collection)
        if not collection_row:
            return None
        return collection_row['cluster_id']
            
    def _db_cluster_info(self, cluster_id):
        async_result = self.central_conn_pool.run("""
            select name, hostname, node_number_in_cluster 
            from nimbusio_central.node 
            where cluster_id=%s 
            order by node_number_in_cluster""", 
            [cluster_id, ])

        rows = async_result.get()
    
        info = dict(rows = rows,
                    hosts = [r['hostname'] for r in rows])

        return info

    def _cluster_info(self, cluster_id):
        "return info about a cluster and its hosts"
        if cluster_id in self.known_clusters:
            return self.known_clusters[cluster_id]
        
        info = self._db_cluster_info(cluster_id)
        
        self.known_clusters[cluster_id] = info 
        return info

    def check_availability(self, hosts, dest_port, _resolve_cache=dict()):
        "return set of hosts we think are available" 
        log = logging.getLogger("check_availability")

        available = set()
        if not hosts:
            return available

        addresses = []
        for host in hosts:
            if not host in _resolve_cache:
                _resolve_cache[host] = socket.gethostbyname(host)
            addresses.append(_resolve_cache[host])

        redis_keys = [ REDIS_WEB_MONITOR_HASHKEY_FORMAT % (a, dest_port, )
                       for a in addresses ]

        try:
            redis_values = self.redis.hmget(REDIS_WEB_MONITOR_HASH_NAME,
                                            redis_keys)
        except RedisError as err:
            log.warn("redis error querying availability for %s: %s, %r"
                % ( REDIS_WEB_MONITOR_HASH_NAME, err, redis_keys, ))
            # just consider everything available. it's the best we can do.
            available.update(hosts)
            return available

        unknown = []
        for idx, val in enumerate(redis_values):
            if val is None:
                unknown.append((hosts[idx], redis_keys[idx], ))
                continue
            try:
                status = json.loads(val)
            except Exception, err:
                log.warn("cannot decode %s %s %s %r" % ( 
                    REDIS_WEB_MONITOR_HASH_NAME, hosts[idx], 
                    redis_keys[idx], val, ))
            else:
                if status["reachable"]:
                    available.add(hosts[idx])
            
        if unknown:
            log.warn("no availability info in redis for hkeys: %s %r" % 
                ( REDIS_WEB_MONITOR_HASH_NAME, unknown, ))
            # if every host is unknown, just consider them all available
            if len(unknown) == len(hosts):
                available.update(hosts)

        return available
开发者ID:HackLinux,项目名称:nimbus.io,代码行数:104,代码来源:router.py

示例9: RedisJobStore

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
class RedisJobStore(BaseJobStore):
    """
    Stores jobs in a Redis database. Any leftover keyword arguments are directly passed to redis's StrictRedis.

    Plugin alias: ``redis``

    :param int db: the database number to store jobs in
    :param str jobs_key: key to store jobs in
    :param str run_times_key: key to store the jobs' run times in
    :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available
    """

    def __init__(self, db=0, jobs_key='apscheduler.jobs', run_times_key='apscheduler.run_times',
                 pickle_protocol=pickle.HIGHEST_PROTOCOL, **connect_args):
        super(RedisJobStore, self).__init__()

        if db is None:
            raise ValueError('The "db" parameter must not be empty')
        if not jobs_key:
            raise ValueError('The "jobs_key" parameter must not be empty')
        if not run_times_key:
            raise ValueError('The "run_times_key" parameter must not be empty')

        self.pickle_protocol = pickle_protocol
        self.jobs_key = jobs_key
        self.run_times_key = run_times_key
        self.redis = StrictRedis(db=int(db), **connect_args)

    def lookup_job(self, job_id):
        job_state = self.redis.hget(self.jobs_key, job_id)
        return self._reconstitute_job(job_state) if job_state else None

    def get_due_jobs(self, now):
        timestamp = datetime_to_utc_timestamp(now)
        job_ids = self.redis.zrangebyscore(self.run_times_key, 0, timestamp)
        if job_ids:
            job_states = self.redis.hmget(self.jobs_key, *job_ids)
            return self._reconstitute_jobs(six.moves.zip(job_ids, job_states))
        return []

    def get_next_run_time(self):
        next_run_time = self.redis.zrange(self.run_times_key, 0, 0, withscores=True)
        if next_run_time:
            return utc_timestamp_to_datetime(next_run_time[0][1])

    def get_all_jobs(self):
        job_states = self.redis.hgetall(self.jobs_key)
        jobs = self._reconstitute_jobs(six.iteritems(job_states))
        paused_sort_key = datetime(9999, 12, 31, tzinfo=utc)
        return sorted(jobs, key=lambda job: job.next_run_time or paused_sort_key)

    def add_job(self, job):
        if self.redis.hexists(self.jobs_key, job.id):
            raise ConflictingIdError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.multi()
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            if job.next_run_time:
                pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            pipe.execute()

    def update_job(self, job):
        if not self.redis.hexists(self.jobs_key, job.id):
            raise JobLookupError(job.id)

        with self.redis.pipeline() as pipe:
            pipe.hset(self.jobs_key, job.id, pickle.dumps(job.__getstate__(), self.pickle_protocol))
            if job.next_run_time:
                pipe.zadd(self.run_times_key, datetime_to_utc_timestamp(job.next_run_time), job.id)
            else:
                pipe.zrem(self.run_times_key, job.id)
            pipe.execute()

    def remove_job(self, job_id):
        if not self.redis.hexists(self.jobs_key, job_id):
            raise JobLookupError(job_id)

        with self.redis.pipeline() as pipe:
            pipe.hdel(self.jobs_key, job_id)
            pipe.zrem(self.run_times_key, job_id)
            pipe.execute()

    def remove_all_jobs(self):
        with self.redis.pipeline() as pipe:
            pipe.delete(self.jobs_key)
            pipe.delete(self.run_times_key)
            pipe.execute()

    def shutdown(self):
        self.redis.connection_pool.disconnect()

    def _reconstitute_job(self, job_state):
        job_state = pickle.loads(job_state)
        job = Job.__new__(Job)
        job.__setstate__(job_state)
        job._scheduler = self._scheduler
        job._jobstore_alias = self._alias
        return job

#.........这里部分代码省略.........
开发者ID:theguardian,项目名称:CherryStrap,代码行数:103,代码来源:redis.py

示例10: __init__

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
class RedisHelper:
    prefix = "bc:chart:cache"

    def __init__(self, host=None, port=None, prefix=None):
        # 这里读取redis配置信息,用到了conf.py里面的函数
        self._host = host
        self._port = int(port)
        self._redis = StrictRedis(host=self._host, port=self._port)

    def gen_key(self, chart_id):
        return "%s:%s" % (self.prefix, chart_id)

    def put(self, chart_id, data, expire=2000):
        key = self.gen_key(chart_id)
        self._redis.set(key, dumps(data))
        self._redis.expire(key, expire)
        return True

    def delete(self, chart_id):
        key = self.gen_key(chart_id)
        self._redis.delete(key)

    def deleteN(self, chart_id):
        key = self.gen_key(chart_id)
        keys = self._redis.keys("%s*" % key)
        for k in keys:
            self._redis.delete(k)

    def get(self, chart_id):
        key = self.gen_key(chart_id)
        data = self._redis.get(key)
        return {} if not data else loads(data)

    def hset(self, key, field, value):
        self._redis.hset(key, field, value)

    def hmget(self, key, fields):
        return self._redis.hmget(key, fields)

    def flush(self):
        keys = self._redis.keys("%s*" % self.prefix)
        pipe = self._redis.pipeline()
        for key in keys:
            pipe.delete(key)
        pipe.execute()

    # the type of value is list
    def list_push(self, key, data):
        return self._redis.rpush(key, dumps(data))
    # pop the head element of the list

    def list_pop(self, key):
        return self._redis.lpop(key)

    # pop all elements of the list
    def list_all_pop(self, key):
        while True:
            if self.list_size(key) == 0:
                self._redis.delete(key)
                break
            res = self._redis.lpop(key)
            if res:
                yield loads(res)

    # the length of list
    def list_size(self, key):
        return self._redis.llen(key)

    @property
    def redis(self):
        return self._redis
开发者ID:Dragonriver1990,项目名称:python_tools,代码行数:73,代码来源:cache_redis.py

示例11: expire_reservation

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
def expire_reservation(event_name):
  cutoff_ts = long(time.time()-30)
  for i in redis.hscan_iter("events:" + event_name, match="reservations-ts:*"):
    if long(i[1]) < cutoff_ts:
      (_, user) = i[0].split(":")
      qty = int(redis.hget("events:" + event_name, "reservations-user:" + user))
      backout_reservation(user, event_name, qty)  

# Expire reservations
for_event = "Womens Javelin"
create_expired_reservation(for_event)
expiration = time.time() + 20
while True:
  expire_reservation(for_event)
  oustanding = redis.hmget("events:" + for_event, "reservations-user:Fred", "reservations-user:Jim", "reservations-user:Amy")
  availbale = redis.hget("events:" + for_event, "available")
  print "{}, Available:{}, Reservations:{}".format(for_event, availbale, oustanding)
  if time.time() > expiration:
    break
  else:
    time.sleep(1)

# Part Four - Posting purchases
def reserve_with_pending(user, event_name, qty):
  p = redis.pipeline()
  try:
    redis.watch("events:" + event_name)
    available = int(redis.hget("events:" + event_name, "available"))
    if available >= qty:
      order_id = generate_order_id()
开发者ID:alvinr,项目名称:data-modeling,代码行数:32,代码来源:all.py

示例12: __init__

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]

#.........这里部分代码省略.........
        if tagging == '__taggings__' or tagging == '__all__':
            rv = {}
        else:
            rv = []

        # searching what elements should be match
        for info, ts in elements:
            info = unpackb(info)
            if tagging == '__taggings__':
                rv[ts] = list(info.keys())
            elif tagging == '__all__':
                rv[ts] = info
            elif tagging in info:
                if ret_whold:
                    rv.append((info, ts))
                else:
                    rv.append((info[tagging], ts))

        return rv

    def inc_coll_cache_set(self, coll, field, value):
        key = self.inc_coll_cache_fmt.format(name=coll.name)
        self.rdb.hset(key, field, packb(value))

    def inc_coll_caches_get(self, coll, *fields):
        """
        :ret: return [] if no data exists. Normal structure is:
                [value1, value2, ..., valueN]
        """
        if not fields:
            return []

        key = self.inc_coll_cache_fmt.format(name=coll.name)
        rv = self.rdb.hmget(key, *fields)
        # print('inc_coll_caches_get - ', rv)
        # print('inc_coll_caches_get After - ', [unpackb(r) for r in rv if r])
        return [unpackb(r) for r in rv if r]

    def inc_coll_caches_del(self, coll, *fields):
        key = self.inc_coll_cache_fmt.format(name=coll.name)
        return self.rdb.hdel(key, *fields)

    def uniq_count_coll_cache_set(self, coll, ts, tagging, values):
        """
        :param values: should be a iterable object contain members
        """
        values = {packb(v) for v in values}
        key_fmt = self.unique_count_coll_cache_fmt
        key = key_fmt.format(name=coll.name, tagging=tagging, ts=ts)
        return self.rdb.sadd(key, *values)

    def uniq_count_coll_cache_get(self, coll, tagging, timestamps, count_only=False):
        key_fmt = self.unique_count_coll_cache_fmt
        rv = []
        for ts in timestamps:
            key = key_fmt.format(name=coll.name, tagging=tagging, ts=ts)
            if count_only:
                count = self.rdb.scard(key)
                rv.append(count)
            else:
                members = self.rdb.smembers(key)
                rv.append({unpackb(m) for m in members})
        return rv

    def uniq_count_coll_cache_pop(self, coll, tagging, timestamps, number):
        """
开发者ID:JasonLai256,项目名称:plumbca,代码行数:70,代码来源:backend.py

示例13: SnapshotConnector

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import hmget [as 别名]
class SnapshotConnector(object):

    def __init__(self):
        self.r = StrictRedis(unix_socket_path=redis_socket, decode_responses=True)

    # ##### Helpers web interface #####

    def get_groups(self, groups=None):
        if not groups:
            grps = sorted(self.r.smembers('groups'))
        else:
            grps = sorted(groups)
        return [(g, self.get_events(self.r.smembers(g))) for g in grps]

    def get_events(self, events=None):
        if events is None:
            eids = sorted(self.r.smembers('events'), key=int, reverse=True)
        else:
            eids = sorted(events, key=int, reverse=True)
        return [self.get_event_digest(eid) for eid in eids]

    def hashes_eids(self, hashes):
        eids = set()
        for h in hashes:
            eids.update(self.r.smembers('{}:eids'.format(h)))
        return eids

    def rebuild_eid_cache(self):
        for sha256 in self.r.smembers('hashes_sha256'):
            sha1, md5 = self.r.hmget(sha256, ['sha1', 'md5'])
            eids = search('{} {} {}'.format(sha256, sha1, md5), 'value')
            if eids:
                all_eids = [e for e, f in eids.most_common()]
                self.r.sadd('{}:eids'.format(sha256), *all_eids)

    # ##### Values functions #####

    def make_hashed_value(self, value):
        '''
            Hash the value to search
        '''
        return SHA256.new(value.strip().lower()).hexdigest()

    def get_value_details(self, hashed_value):
        '''
            Returns all attributes of a value
        '''
        attributes_ids = self.r.smembers('{}:attrs'.format(hashed_value))
        return [self.r.hgetall('attribute:{}'.format(attrid)) for attrid in attributes_ids]

    def get_all_value_digest(self):
        p = self.r.pipeline(False)
        attrs = [self.r.hmget('attribute:{}'.format(attrid), 'event_id', 'value1', 'value2', 'comment')
                 for attrid in self.r.smembers('attributes')]
        p.execute()
        return attrs

    def get_value_digest(self, hashed_value):
        '''
            Returns value1 & 2 and comment, deduplicate
        '''
        attrids = self.r.smembers('{}:attrs'.format(hashed_value))
        digest = [self.r.hmget('attribute:{}'.format(aid), 'value1', 'value2', 'comment') for aid in attrids]
        values = set()
        comments = set()
        for v1, v2, comment in digest:
            values.add(v1)
            if v2:
                values.add(v2)
            if comment:
                comments.add(comment)
        return values, comments

    def get_events_digest_from_value(self, hashed_value):
        '''
            Returns digests of events the value is listed in.
        '''
        return [self.get_event_digest(eid) for eid in self.r.smembers(hashed_value)]

    # ##### Keys functions #####

    def key_values_digests(self, key):
        '''
            Returns value digests of all values in a key
        '''
        return [self.get_value_digest(hashed_value) for hashed_value in self.r.smembers(key)]

    # ##### Event functions #####

    def get_event_digest(self, eid):
        '''
            Returns info and date of the event
        '''
        to_return = {'eid': eid, 'tags': self.r.smembers('event:{}:tags'.format(eid))}
        to_return.update(dict(zip(['info', 'date'], self.r.hmget('event:{}'.format(eid), 'info', 'date'))))
        return to_return

    def merge(self, events):
        '''
            Merge a list of events into one set.
#.........这里部分代码省略.........
开发者ID:FloatingGhost,项目名称:misp-workbench,代码行数:103,代码来源:connector.py


注:本文中的redis.StrictRedis.hmget方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。