当前位置: 首页>>代码示例>>Python>>正文


Python settings.REDIS_PORT属性代码示例

本文整理汇总了Python中django.conf.settings.REDIS_PORT属性的典型用法代码示例。如果您正苦于以下问题:Python settings.REDIS_PORT属性的具体用法?Python settings.REDIS_PORT怎么用?Python settings.REDIS_PORT使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在django.conf.settings的用法示例。


在下文中一共展示了settings.REDIS_PORT属性的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def get(self, request, *args, **kwargs):
        connect = redis.StrictRedis(
            host=settings.REDIS_HOST,
            port=settings.REDIS_PORT,
            db=settings.REDIS_SPACE,
            password=settings.REDIS_PASSWD
        )
        week_list = ['Won', 'Tue', 'Wed', 'Thur', 'Fri', 'Sat', 'Sun']
        TEMP = connect.hgetall('WORK',)
        WORK = []
        for key in week_list:
            WORK.append({
                'time': str(key, encoding='utf-8'),
                '执行次数': TEMP[key]
            })
        return Response(
            {'title': '一周内工单执行','dataset': WORK} or {}, status.HTTP_200_OK
        ) 
开发者ID:YoLoveLife,项目名称:DevOps,代码行数:20,代码来源:dashboard.py

示例2: invalidate_view_cache_for_tenant_and_cache_key

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def invalidate_view_cache_for_tenant_and_cache_key(schema_name, cache_key_prefix=None):
    """Invalidate our view cache for a specific tenant and source type.

    If cache_key_prefix is None, all views will be invalidated.
    """
    cache = caches["default"]
    if isinstance(cache, RedisCache):
        cache = Redis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB)
        all_keys = cache.keys("*")
        all_keys = [key.decode("utf-8") for key in all_keys]
    elif isinstance(cache, LocMemCache):
        all_keys = list(locmem._caches.get(settings.TEST_CACHE_LOCATION).keys())
        all_keys = [key.split(":", 2)[-1] for key in all_keys]
    else:
        msg = "Using an unsupported caching backend!"
        raise KokuCacheError(msg)

    all_keys = all_keys if all_keys is not None else []

    if cache_key_prefix:
        keys_to_invalidate = [key for key in all_keys if (schema_name in key and cache_key_prefix in key)]
    else:
        # Invalidate all cached views for the tenant
        keys_to_invalidate = [key for key in all_keys if schema_name in key]

    for key in keys_to_invalidate:
        cache.delete(key)

    msg = f"Invalidated request cache for\n\ttenant: {schema_name}\n\tcache_key_prefix: {cache_key_prefix}"
    LOG.info(msg) 
开发者ID:project-koku,项目名称:koku,代码行数:32,代码来源:cache.py

示例3: __init__

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def __init__(self):
        self._pool = redis.ConnectionPool(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
                                          decode_responses=True, db=settings.MAP_CACHE_DB,
                                          password=settings.REDIS_PASSWORD)
        self.client = redis.Redis(connection_pool=self._pool) 
开发者ID:yandenghong,项目名称:KortURL,代码行数:7,代码来源:client.py

示例4: get_redis_client

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def get_redis_client() -> redis.StrictRedis:
    return redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT,
                             password=settings.REDIS_PASSWORD, db=0) 
开发者ID:zulip,项目名称:zulip,代码行数:5,代码来源:redis_utils.py

示例5: __init__

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def __init__(self, verbose=False):
        self.r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)
        self.verbose = verbose 
开发者ID:MTG,项目名称:freesound-datasets,代码行数:5,代码来源:redis_store.py

示例6: connect

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def connect(self):  
        cc = lambda *args: protocol.ClientCreator(reactor, *args)

        self.redis_sub = RedisDispatch(settings.REDIS_HOST, settings.REDIS_PORT)
        redis_factory = RedisServiceRegisteringFactory(self)
        reactor.connectTCP(settings.REDIS_HOST, settings.REDIS_PORT, redis_factory)
        yield redis_factory.deferred 
开发者ID:canvasnetworks,项目名称:canvas,代码行数:9,代码来源:channels.py

示例7: __init__

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def __init__(self):
        if self.__kerberos_has_ticket() is False:
            self.__kerberos_init()
        if api.isdone('finalize') is False:
            api.bootstrap_with_global_options(context='api')
            api.finalize()
        api.Backend.rpcclient.connect()
        self.redis = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=settings.REDIS_DB, password=settings.REDIS_PASSWORD) 
开发者ID:larrabee,项目名称:freeipa-password-reset,代码行数:10,代码来源:pwdmanager.py

示例8: __init__

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def __init__(self, thread_id, name, experiment, component_id, max_results, cache_results):
        threading.Thread.__init__(self)
        self.threadID = thread_id
        self.name = name
        self.experiment = experiment
        self.comp_id = component_id
        self.result = {}
        self.max_results = max_results
        self.cache_results = cache_results
        print "Submitting topology to storm. End component", self.comp_id
        exp = Experiment.objects.get(pk=self.experiment)
        graph = exp.workflow.graph_data
        graph_data = {}
        print graph
        tmp = graph.split(',')
        for elem in tmp:
            first_node = elem.split(":")[0]
            second_node = elem.split(":")[1]
            if second_node in graph_data:
                depend_nodes = graph_data[second_node]
                depend_nodes.add(first_node)
            else:
                graph_data[second_node] = set()
                graph_data[second_node].add(first_node)
        topological_graph = toposort_flatten(graph_data)
        print "Graph after topological sort", topological_graph
        message = {
            'exp_id': self.experiment, 'result': self.comp_id,
            'graph': topological_graph, 'components': defaultdict()}

        for data in topological_graph:
            component_id = int(data)
            comp = Component.objects.get(pk=component_id)
            if comp.operation_type.function_type == 'Create':
                if comp.operation_type.function_arg == 'Table':
                        filename = comp.operation_type.function_subtype_arg
                        input_data = read_csv(filename)
                        message['input'] = {}
                        for elem in list(input_data.columns):
                            message['input'][elem] = list(input_data[elem])
                        message['cols'] = list(input_data.columns)
                        # message['input'] = input_data.to_dict()

            serialized_obj = serializers.serialize('json', [comp.operation_type, ])
            print "Component_id", component_id, " ", comp.operation_type
            message['components'][data] = serialized_obj

        print "Message ", message
        r = redis.StrictRedis(host=settings.REDIS_HOST, port=settings.REDIS_PORT, db=0)
        self.pubsub = r.pubsub(ignore_subscribe_messages=True)
        self.pubsub.subscribe("Exp " + str(self.experiment))
        ret = r.publish('workflow', json.dumps(message))
        print "return", ret 
开发者ID:CiscoSystems,项目名称:cognitive,代码行数:55,代码来源:results_storm.py

示例9: handle

# 需要导入模块: from django.conf import settings [as 别名]
# 或者: from django.conf.settings import REDIS_PORT [as 别名]
def handle(self, sample='10000', host='ip-10-203-46-218.ec2.internal', *args, **options):
        slave_redis = CanvasRedis(host=host, port=settings.REDIS_PORT, db=settings.REDIS_DB_MAIN)
        slave_cache = CanvasRedis(host=host, port=settings.REDIS_PORT, db=settings.REDIS_DB_CACHE)
        
        if sample != "*":
            sample = int(sample)

        def human(size):
            # Multiply size * 3 to roughly account for the difference in RDB vs in-memory size.
            return "%.1f MB" % (size * 3 / 1000000.0)

        for client in (slave_redis, slave_cache):
            dbsize = client.dbsize()
            if sample == "*":
                print "Summarizing total memory usage for db %s" % client.connection.db
                key_names = client.keys("*")
            else:
                groups = collections.defaultdict(lambda: 0)
                sizes = []
                scalar = 1.0 * dbsize / sample
                print "Sampling %s random keys (of %s) from db %s" % (sample, dbsize, client.connection.db)
                pipeline = client.pipeline()
                for i in range(sample):
                    pipeline.randomkey()
                key_names = pipeline.execute()

            chunksize = 10000
            cursor = 0
            key_sizes = []
            while cursor < len(key_names):
                pipeline = client.pipeline()
                for result in key_names[cursor:cursor+chunksize]:
                    pipeline.execute_command("DEBUG", "OBJECT", result)
                debug_chunk = pipeline.execute()
                for i, result in enumerate(debug_chunk):
                    debug_dict = dict([kv.split(':') for kv in ('type:' + result).split()])
                    key = key_names[cursor + i]
                    keysize = int(debug_dict['serializedlength']) + len(key)
                    key_sizes.append(keysize)
                cursor += chunksize

            if sample == "*":
                print human(sum(key_sizes))
                continue

            # TODO: msg_backlogs look big, figure out how to group these (probably show biggest 25 keys too)
            for key, keysize in zip(key_names, key_sizes):
                keygroup = re.sub("(?<=[:\.]).+(?=[:\.])", "#", key)
                groups[keygroup] += keysize

            print "== TOP 10 RESULTS =="
            for k in sorted(groups, key=lambda k: -groups[k])[:10]:
                size = groups[k]
                print k, human(size * scalar)
            avg = 1.0 * sum(key_sizes) / len(key_sizes)
            print "Average key size: %s (%s estimated total)" % (avg, human(avg * dbsize))
            print "" 
开发者ID:canvasnetworks,项目名称:canvas,代码行数:59,代码来源:profile_redis.py


注:本文中的django.conf.settings.REDIS_PORT属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。