当前位置: 首页>>代码示例>>Python>>正文


Python StrictRedis.ping方法代码示例

本文整理汇总了Python中redis.StrictRedis.ping方法的典型用法代码示例。如果您正苦于以下问题:Python StrictRedis.ping方法的具体用法?Python StrictRedis.ping怎么用?Python StrictRedis.ping使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在redis.StrictRedis的用法示例。


在下文中一共展示了StrictRedis.ping方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init_redis

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
 def __init_redis(self):
     try:
         redis = StrictRedis()
         redis.ping()    # raises an exception if it failes
         self.redis = redis
     except:
         pass
开发者ID:prototo,项目名称:AnimeInfoBot,代码行数:9,代码来源:client.py

示例2: redis

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
 def redis(self):
     redis_config = self.config.get('redis')
     redis = StrictRedis(
         redis_config.get('host'), redis_config.get('port'), redis_config.get('db')
     )
     redis.ping()
     return redis
开发者ID:vampirekiss,项目名称:wechat_message,代码行数:9,代码来源:provider.py

示例3: connect

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
	def connect(self, host = REDIS_HOST, port = REDIS_PORT, db = REDIS_DB):
		connection = StrictRedis(host = host, port = port, db = db)
		try:
			connection.ping()
			print('Successfully connected to Redis: host – %s, port – %d, db – %d.' % (host, port, db))
			self.connection = connection
		except:
			print('Connection to Redis failed: host – %s, port – %d, db – %d.' % (host, port, db))
开发者ID:moigagoo,项目名称:simtech_redis,代码行数:10,代码来源:redis_connector.py

示例4: redis

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
def redis():
    try:
        from redis import StrictRedis
        from redis.exceptions import ConnectionError
    except ImportError:
        pytest.skip('redis library not installed')
    try:
        r = StrictRedis()
        r.ping()
    except ConnectionError:
        pytest.skip('could not connect to redis')
    r.flushall()
    return r
开发者ID:actmd,项目名称:flask-kvsession,代码行数:15,代码来源:conftest.py

示例5: TestRModels

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
class TestRModels(object):

    def setup_method(self, method):
        self.rc = StrictRedis()
        self.disabled = False
        try:
            self.rc.ping()
        except ConnectionError:
            self.disabled = True
        self.prefix = "copr:test:r_models"

        self.time_now = time.time()

    def teardown_method(self, method):
        if self.disabled:
            return

        keys = self.rc.keys('{}*'.format(self.prefix))
        if keys:
            self.rc.delete(*keys)

    def test_timed_stats_events(self):
        if self.disabled:
            return

        TimedStatEvents.add_event(self.rc, name="foobar", prefix=self.prefix,
                                  timestamp=self.time_now, )

        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,) == 1
        TimedStatEvents.add_event(self.rc, name="foobar", prefix=self.prefix,
                                  timestamp=self.time_now, count=2)

        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,) == 3

        TimedStatEvents.add_event(self.rc, name="foobar", prefix=self.prefix,
                                  timestamp=self.time_now - 1000000, count=2)
        TimedStatEvents.add_event(self.rc, name="foobar", prefix=self.prefix,
                                  timestamp=self.time_now - 3000000, count=3)

        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,) == 3
        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,
                                         day_min=self.time_now - 2000000) == 5
        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,
                                         day_min=self.time_now - 5000000) == 8

        TimedStatEvents.trim_before(self.rc, name="foobar",
                                    prefix=self.prefix, threshold_timestamp=self.time_now - 200000)

        assert TimedStatEvents.get_count(self.rc, name="foobar", prefix=self.prefix,
                                         day_min=self.time_now - 5000000) == 3
开发者ID:0-T-0,项目名称:copr,代码行数:52,代码来源:test_rmodels.py

示例6: check_running

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
def check_running(name: str):
    socket_path = get_socket_path(name)
    try:
        r = StrictRedis(unix_socket_path=socket_path)
        return r.ping()
    except ConnectionError:
        return False
开发者ID:CIRCL,项目名称:potiron,代码行数:9,代码来源:helpers.py

示例7: RedisCache

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
class RedisCache(object):

    def __init__(self, params={}):
        self._validate(params)

        if not self.server:
            raise Exception('Redis Server Not Defined')

        try:
            log.debug('Connecting to redis at [%s]?[%s]' % (self.server, self.database))
            self.cache = StrictRedis(self.server, port=self.port, db=self.database)
        except ConnectionError as ex:
            raise Exception("Unable to connect to Redis", ex)

    def get(self, key):
        """
        Fetch a given key from the cache. If the key does not exist, return
        default, which itself defaults to None.
        """
        ckey = self._create_key(key)
        log.debug("Getting the cache key [%s]" % ckey)
        return self.cache.get(ckey)

    def ping(self):
        """
        This command is often used to test if the cache is still alive, or to measure latency.
        """
        log.debug("Ping to the cache")
        return self.cache.ping()

    def store(self, key, value, expires=None):
        """
        Set a value in the cache. If timeout is given, that timeout will be
        used for the key; otherwise the default cache timeout will be used.
        """
        ckey = self._create_key(key)
        log.debug("Storing the cache key [%s]" % ckey)
        return self.cache.set(ckey, value, ex=expires)

    def delete(self, key):
        """
        Delete a key from the cache, failing silently.
        """
        ckey = self._create_key(key)
        log.debug("Deleting the cache key [%s]" % ckey)
        return self.cache.delete(ckey)

    def _validate(self, params):
        """
        Initialize all the needed parameters
        """
        self.server = params.get('server', 'localhost')
        self.port = params.get('port', 6379)
        self.database = params.get('database', 2)
        self.key_prefix = params.get('key_prefix', 'domus')

    def _create_key(self, key):
        return "%s.%s" % (self.key_prefix, key)
开发者ID:Esiravegna,项目名称:domus,代码行数:60,代码来源:cache.py

示例8: RedisCache

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
class RedisCache(CacheInterface):
    
    _c = None
    _pool = None
    
    
    def __init__(self, conn_pool):
        ''' for single client, init host and port;
            for client pool, init pool'''
        self._c = StrictRedis(connection_pool=conn_pool)
        self._pool = pool

    
    @classmethod
    def alloc(self, node, max_connections=2**16, timeout=20, db=0):
        host = node.split(':')[0]
        port = int(node.split(':')[1])
        pool = BlockingConnectionPool(max_connections=max_connections, 
            timeout=timeout, **{'host': host, 'port': port, 'db': db})
        
        return RedisCache(pool)

    def get_instance(self,):
        ''' for single client, return Redis;
            for client pool, return item in pool'''
        return self._c


    def keys(self,):
        ''' yield all keys in cache'''
        for key in self._c.keys():
            yield key


    def set(self, key, val):
        ''' set val in cache by key'''
        self._c.set(key, val)


    def get(self, key):
        ''' get val in cache by key'''
        return self._c.get(key)


    @classmethod
    def ping(self, node):
        return self._c.ping()


    def __del__(self,):
        ''' release connections'''
        self._pool.disconnect()
        del self._pool
开发者ID:cyue,项目名称:dcache,代码行数:55,代码来源:cache_interface.py

示例9: RedisEngine

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
class RedisEngine(QueueEngine):
	"""docstring for RedisEngine"""
	def __init__(self):
		super(RedisEngine, self).__init__()
		self.r = None
		self.host = 'localhost'
		self.port = 6379

	def connect(self):
		self.r = StrictRedis(self.host, self.port, db=0)
		return self.is_available()

	def is_available(self):
		print('is redis available')
		if self.r is None:
			return False
		return self.r.ping() is not None

	def enqueue(self, queue, msg, timeout=0):
		self.r.rpush(queue, msg)

	def dequeue(self, queue, timeout):
		rsp = self.r.blpop(queue, timeout=0)
		return rsp[1]
开发者ID:logileifs,项目名称:gateway-utils,代码行数:26,代码来源:redisengine.py

示例10: Boundary

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]

#.........这里部分代码省略.........
                        if value:
                            anomaly_breakdown[algorithm] += 1

            # It could have been deleted by the Roomba
            except TypeError:
                exceptions['DeletedByRoomba'] += 1
            except TooShort:
                exceptions['TooShort'] += 1
            except Stale:
                exceptions['Stale'] += 1
            except Boring:
                exceptions['Boring'] += 1
            except:
                exceptions['Other'] += 1
                logger.info("exceptions['Other'] traceback follows:")
                logger.info(traceback.format_exc())

        # Add values to the queue so the parent process can collate
        for key, value in anomaly_breakdown.items():
            self.anomaly_breakdown_q.put((key, value))

        for key, value in exceptions.items():
            self.exceptions_q.put((key, value))

    def run(self):
        """
        Called when the process intializes.
        """
        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
                self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
                continue

            # Discover unique metrics
            unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))

            if len(unique_metrics) == 0:
                logger.info('no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Reset boundary_metrics
            boundary_metrics = []

            # Build boundary metrics
            for metric_name in unique_metrics:
                for metric in BOUNDARY_METRICS:
                    CHECK_MATCH_PATTERN = metric[0]
                    check_match_pattern = re.compile(CHECK_MATCH_PATTERN)
                    base_name = metric_name.replace(FULL_NAMESPACE, '', 1)
                    pattern_match = check_match_pattern.match(base_name)
                    if pattern_match:
                        if ENABLE_BOUNDARY_DEBUG:
                            logger.info("debug - boundary metric - pattern MATCHED - " + metric[0] + " | " + base_name)
                        boundary_metrics.append([metric_name, metric[1]])

            if ENABLE_BOUNDARY_DEBUG:
                logger.info("debug - boundary metrics - " + str(boundary_metrics))
开发者ID:gaos1,项目名称:skyline,代码行数:69,代码来源:boundary.py

示例11: Analyzer

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]

#.........这里部分代码省略.........
                else:
        	        self.anomaly_breakdown[key] += value

            for key, value in exceptions.items():
                if key not in self.exceptions:
                    self.exceptions[key] = value
                else:
        	        self.exceptions[key] += value

    def send_mail(self, alert, metric):
        """
        Send an alert email to the appropriate recipient
        """
        msg = MIMEMultipart('alternative')
        msg['Subject'] = '[skyline alert] ' + metric[1]
        msg['From'] = settings.ALERT_SENDER
        msg['To'] = alert[1]
        link = '%s/render/?width=588&height=308&target=%s' % (settings.GRAPHITE_HOST, metric[1])
        body = 'Anomalous value: %s <br> Next alert in: %s seconds <a href="%s"><img src="%s"/></a>' % (metric[0], alert[2], link, link)
        msg.attach(MIMEText(body, 'html'))
        s = SMTP('127.0.0.1')
        s.sendmail(settings.ALERT_SENDER, alert[1], msg.as_string())
        s.quit()

    def run(self):
        """
        Called when the process intializes.
        """
        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
                self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
                continue

            # Discover unique metrics
            unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))

            if len(unique_metrics) == 0:
                logger.info('no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Spawn processes
            pids = []
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
                if i > len(unique_metrics):
                    logger.info('WARNING: skyline is set for more cores than needed.')
                    break

                p = Process(target=self.spin_process, args=(i, unique_metrics))
                pids.append(p)
                p.start()

            # Send wait signal to zombie processes
            for p in pids:
                p.join()

            # Send alerts
            #if settings.ENABLE_ALERTS:
            #    for alert in settings.ALERTS:
开发者ID:scalextremeinc,项目名称:skyline,代码行数:70,代码来源:analyzer.py

示例12: Analyzer

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]

#.........这里部分代码省略.........
            else:
                now = log_wait_for + 1

        logger.info('starting %s run' % skyline_app)
        if os.path.isfile(skyline_app_loglock):
            logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
            try:
                os.remove(skyline_app_loglock)
                logger.info('log lock file removed')
            except OSError:
                logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
                pass
        else:
            logger.info('bin/%s.d log management done' % skyline_app)

        if not os.path.exists(settings.SKYLINE_TMP_DIR):
            if python_version == 2:
                os.makedirs(settings.SKYLINE_TMP_DIR, 0750)
            if python_version == 3:
                os.makedirs(settings.SKYLINE_TMP_DIR, mode=0o750)

        # Initiate the algorithm timings if Analyzer is configured to send the
        # algorithm_breakdown metrics with ENABLE_ALGORITHM_RUN_METRICS
        algorithm_tmp_file_prefix = settings.SKYLINE_TMP_DIR + '/' + skyline_app + '.'
        algorithms_to_time = []
        if send_algorithm_run_metrics:
            algorithms_to_time = settings.ALGORITHMS

        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('skyline can\'t connect to redis at socket path %s' % settings.REDIS_SOCKET_PATH)
                sleep(10)
                self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
                continue

            # Report app up
            self.redis_conn.setex(skyline_app, 120, now)

            # Discover unique metrics
            unique_metrics = list(self.redis_conn.smembers(settings.FULL_NAMESPACE + 'unique_metrics'))

            if len(unique_metrics) == 0:
                logger.info('no metrics in redis. try adding some - see README')
                sleep(10)
                continue

            # Using count files rather that multiprocessing.Value to enable metrics for
            # metrics for algorithm run times, etc
            for algorithm in algorithms_to_time:
                algorithm_count_file = algorithm_tmp_file_prefix + algorithm + '.count'
                algorithm_timings_file = algorithm_tmp_file_prefix + algorithm + '.timings'
                # with open(algorithm_count_file, 'a') as f:
                with open(algorithm_count_file, 'w') as f:
                    pass
                with open(algorithm_timings_file, 'w') as f:
                    pass

            # Spawn processes
            pids = []
            pid_count = 0
            for i in range(1, settings.ANALYZER_PROCESSES + 1):
开发者ID:blak3r2,项目名称:skyline,代码行数:70,代码来源:analyzer_dev.py

示例13: Roomba

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]

#.........这里部分代码省略.........
        """
        # Log management to prevent overwriting
        # Allow the bin/<skyline_app>.d to manage the log
        if os.path.isfile(skyline_app_logwait):
            try:
                os_remove(skyline_app_logwait)
            except OSError:
                logger.error('error - failed to remove %s, continuing' % skyline_app_logwait)
                pass

        now = time()
        log_wait_for = now + 5
        while now < log_wait_for:
            if os.path.isfile(skyline_app_loglock):
                sleep(.1)
                now = time()
            else:
                now = log_wait_for + 1

        logger.info('starting %s run' % skyline_app)
        if os.path.isfile(skyline_app_loglock):
            logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
            try:
                os_remove(skyline_app_loglock)
                logger.info('log lock file removed')
            except OSError:
                logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
                pass
        else:
            logger.info('bin/%s.d log management done' % skyline_app)

        logger.info('%s :: started roomba' % skyline_app)

        while 1:
            now = time()

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error(
                    '%s :: roomba can\'t connect to redis at socket path %s' %
                    (skyline_app, settings.REDIS_SOCKET_PATH))
                sleep(10)
                self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
                continue

            # Spawn processes
            pids = []
            for i in range(1, settings.ROOMBA_PROCESSES + 1):
                if not self.skip_mini:
                    logger.info('%s :: starting vacuum process on mini namespace' % skyline_app)
                    p = Process(target=self.vacuum, args=(i, settings.MINI_NAMESPACE, settings.MINI_DURATION + settings.ROOMBA_GRACE_TIME))
                    pids.append(p)
                    p.start()

                logger.info('%s :: starting vacuum process' % skyline_app)
                p = Process(target=self.vacuum, args=(i, settings.FULL_NAMESPACE, settings.FULL_DURATION + settings.ROOMBA_GRACE_TIME))
                pids.append(p)
                p.start()

            # Send wait signal to zombie processes
            # for p in pids:
            #     p.join()
            # deroomba - kill any lingering vacuum processes
            # Changed to manage Roomba processes as edge cases related to I/O
            # wait have been experienced that resulted in Roomba stalling so a
            # ROOMBA_TIMEOUT setting was added and here we use the pattern
            # described by http://stackoverflow.com/users/2073595/dano at
            # http://stackoverflow.com/a/26064238 to monitor and kill any
            # stalled processes rather than using p.join(TIMEOUT) - 20160505
            # @earthgecko ref 1342
            logger.info('%s :: allowing vacuum process/es %s seconds to run' % (
                skyline_app, str(settings.ROOMBA_TIMEOUT)))
            start = time()
            while time() - start <= settings.ROOMBA_TIMEOUT:
                if any(p.is_alive() for p in pids):
                    # Just to avoid hogging the CPU
                    sleep(.1)
                else:
                    # All the processes are done, break now.
                    time_to_run = time() - start
                    logger.info('%s :: vacuum processes completed in %.2f' % (skyline_app, time_to_run))
                    break
            else:
                # We only enter this if we didn't 'break' above.
                logger.info('%s :: timed out, killing all Roomba processes' % (skyline_app))
                for p in pids:
                    p.terminate()
                    p.join()

            # sleeping in the main process is more CPU efficient than sleeping
            # in the vacuum def also roomba is quite CPU intensive so we only
            # what to run roomba once every minute
            process_runtime = time() - now
            roomba_optimum_run_duration = 60
            if process_runtime < roomba_optimum_run_duration:
                sleep_for = (roomba_optimum_run_duration - process_runtime)
                logger.info('%s :: sleeping %.2f for due to low run time' % (skyline_app, sleep_for))
                sleep(sleep_for)
开发者ID:blak3r2,项目名称:skyline,代码行数:104,代码来源:roomba.py

示例14: StrictRedis

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
#
u'''
MODULE DOCSTRING 
'''

# imports std lib

# imports 3rd party libs

# imports sprayer
from itertools import izip

from redis import StrictRedis
s = StrictRedis(port=55511)
#s = StrictRedis(port=33322)
print s.ping()

print 'segundo:', s.execute_command('PING')


def get_masters():
    masters_as_list = s.execute_command('SENTINEL', 'MASTERS')
    masters = {}
    for master_l in masters_as_list:
        #convert list representing a master to a dictionary
        i = iter(master_l)
        master_d = dict(izip(i, i))
        masters[master_d['name']] = master_d
    return masters

print get_masters()
开发者ID:javierarilos,项目名称:redis_playground,代码行数:33,代码来源:execute_commands.py

示例15: Worker

# 需要导入模块: from redis import StrictRedis [as 别名]
# 或者: from redis.StrictRedis import ping [as 别名]
class Worker(Process):
    """
    The worker processes chunks from the queue and appends
    the latest datapoints to their respective timesteps in Redis.
    """
    def __init__(self, queue, parent_pid, skip_mini, canary=False):
        super(Worker, self).__init__()
        self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
        self.q = queue
        self.parent_pid = parent_pid
        self.daemon = True
        self.canary = canary
        self.skip_mini = skip_mini

    def check_if_parent_is_alive(self):
        """
        Self explanatory.
        """
        try:
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def in_skip_list(self, metric_name):
        """
        Check if the metric is in SKIP_LIST.
        """
        for to_skip in settings.SKIP_LIST:
            if to_skip in metric_name:
                return True

        return False

    def run(self):
        """
        Called when the process intializes.
        """
        # Log management to prevent overwriting
        # Allow the bin/<skyline_app>.d to manage the log
        if os.path.isfile(skyline_app_logwait):
            try:
                os_remove(skyline_app_logwait)
            except OSError:
                logger.error('error - failed to remove %s, continuing' % skyline_app_logwait)
                pass

        now = time()
        log_wait_for = now + 5
        while now < log_wait_for:
            if os.path.isfile(skyline_app_loglock):
                sleep(.1)
                now = time()
            else:
                now = log_wait_for + 1

        logger.info('starting %s run' % skyline_app)
        if os.path.isfile(skyline_app_loglock):
            logger.error('error - bin/%s.d log management seems to have failed, continuing' % skyline_app)
            try:
                os_remove(skyline_app_loglock)
                logger.info('log lock file removed')
            except OSError:
                logger.error('error - failed to remove %s, continuing' % skyline_app_loglock)
                pass
        else:
            logger.info('bin/%s.d log management done' % skyline_app)

        logger.info('%s :: started worker' % skyline_app)

        FULL_NAMESPACE = settings.FULL_NAMESPACE
        MINI_NAMESPACE = settings.MINI_NAMESPACE
        MAX_RESOLUTION = settings.MAX_RESOLUTION
        full_uniques = '%sunique_metrics' % FULL_NAMESPACE
        mini_uniques = '%sunique_metrics' % MINI_NAMESPACE
        pipe = self.redis_conn.pipeline()

        last_send_to_graphite = time()
        queue_sizes = []

        # python-2.x and python3.x had while 1 and while True differently
        # while 1:
        running = True
        while running:

            # Make sure Redis is up
            try:
                self.redis_conn.ping()
            except:
                logger.error('%s :: can\'t connect to redis at socket path %s' % (skyline_app, settings.REDIS_SOCKET_PATH))
                sleep(10)
                self.redis_conn = StrictRedis(unix_socket_path=settings.REDIS_SOCKET_PATH)
                pipe = self.redis_conn.pipeline()
                continue

            try:
                # Get a chunk from the queue with a 15 second timeout
                chunk = self.q.get(True, 15)
                now = time()

                for metric in chunk:
#.........这里部分代码省略.........
开发者ID:blak3r2,项目名称:skyline,代码行数:103,代码来源:worker.py


注:本文中的redis.StrictRedis.ping方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。