当前位置: 首页>>代码示例>>Python>>正文


Python cache.CacheHelper类代码示例

本文整理汇总了Python中cache.CacheHelper的典型用法代码示例。如果您正苦于以下问题:Python CacheHelper类的具体用法?Python CacheHelper怎么用?Python CacheHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了CacheHelper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: queryRunner

def queryRunner():

    hosts = None
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status")

    if clusterStatus:
        hosts = clusterStatus.get_all_hosts()

    # retreive all active query workloads
    queries = CacheHelper.active_queries()
    for query in queries:

        # async update query workload object
        updateQueryWorkload.apply_async(args=[query])

        count = int(query.qps)
        filters = list(set(query.include_filters) -\
                       set(query.exclude_filters))
        params = generateQueryParams(query.indexed_key,
                                     query.bucket,
                                     filters,
                                     query.limit,
                                     query.startkey,
                                     query.endkey,
                                     query.startkey_docid,
                                     query.endkey_docid)
        multi_query.delay(count,
                          query.ddoc,
                          query.view,
                          params,
                          query.bucket,
                          query.password,
                          hosts = hosts)
开发者ID:Boggypop,项目名称:testrunner,代码行数:33,代码来源:query.py

示例2: setitup

    def setitup(self):
        # if user forget to assign the number of initial nodes for any cluster
        # use 1 node as default
        if len(self._num_initial_nodes) < len(self._clusters_keys_olst):
            diff = len(self._clusters_keys_olst) - len(self._num_initial_nodes)
            for i in range(diff):
                self._num_initial_nodes.append('1')

        for key in self._clusters_keys_olst:
            clusterStatus = None
            if key == 0:
                clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or ClusterStatus()
            else:
                clusterStatus = CacheHelper.clusterstatus(cfg.CB_REMOTE_CLUSTER_TAG[key-1]+"_status") or\
                    ClusterStatus(cfg.CB_REMOTE_CLUSTER_TAG[key-1]+"_status")

            clusterStatus.all_available_hosts = ["%s:%s" % (node.ip, node.port) for node in self._clusters_dic[key]]

            self.set_the_cluster_up(self._clusters_dic[key][:int(self._num_initial_nodes[key])])

        time.sleep(20)

        if self._xdcr:
            self._link_create_replications(self._s_master, self._d_master, "cluster1")
            if self._rdirection == "bidirection":
                self._link_create_replications(self._d_master, self._s_master, "cluster0")
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:26,代码来源:cluster_setup.py

示例3: report_kv_latency

def report_kv_latency(bucket = "default"):

    if cfg.SERIESLY_IP == '':
        # seriesly not configured
        return

    rabbitHelper = report_kv_latency.rabbitHelper
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()

    host = clusterStatus.get_random_host()
    if host is None: return

    ip, port = host.split(':')

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.active and workload.bucket == bucket:

            # read workload params
            bucket = str(workload.bucket)
            password = str(workload.password)

            # read template from active workload
            template = Template.from_cache(str(workload.template))
            template = template.__dict__
            client.decodeMajgicStrings(template)

            # setup key/val to use for timing
            key = _random_string(12)
            value = json.dumps(template['kv'])
            get_key = key


            # for get op, try to pull from consume_queue
            # so that we can calc impact of dgm
            consume_queue = workload.consume_queue
            if consume_queue is not None:
                keys = rabbitHelper.getJsonMsg(str(consume_queue), requeue = True)
                if len(keys) > 0:
                    get_key = str(keys[0])

            # collect op latency
            set_latency = client.mc_op_latency('set', key, value, ip, port, bucket, password)
            get_latency = client.mc_op_latency('get', get_key, value, ip, port, bucket, password)
            delete_latency = client.mc_op_latency('delete', key, value, ip, port, bucket, password)


            # report to seriessly
            seriesly = Seriesly(cfg.SERIESLY_IP, 3133)
            db='fast'
            seriesly[db].append({'set_latency' : set_latency,
                                 'get_latency' : get_latency,
                                 'delete_latency' : delete_latency})
开发者ID:jason-hou,项目名称:testrunner,代码行数:54,代码来源:workload_manager.py

示例4: perform_admin_tasks

def perform_admin_tasks(adminMsg, cluster_id=cfg.CB_CLUSTER_TAG + "_status"):
    app.workload_manager.updateClusterStatus()
    clusterStatus = CacheHelper.clusterstatus(cluster_id)
    if clusterStatus is None:
        logger.error("Unable to fetch clusterStatus from cache")
        return

    rest = clusterStatus.node_rest()

    # Add nodes
    servers = adminMsg["rebalance_in"]
    add_nodes(rest, servers, cluster_id)

    # Get all nodes
    allNodes = []
    for node in rest.node_statuses():
        allNodes.append(node.id)

    # Remove nodes
    servers = adminMsg["rebalance_out"]
    toBeEjectedNodes = remove_nodes(rest, servers, adminMsg["involve_orchestrator"], cluster_id)

    # Failover Node
    servers = adminMsg["failover"]
    auto_failover_servers = adminMsg["auto_failover"]
    only_failover = adminMsg["only_failover"]
    add_back_servers = adminMsg["add_back"]
    failoverNodes = failover_nodes(rest, servers, only_failover, adminMsg["involve_orchestrator"], cluster_id)
    autoFailoverNodes = auto_failover_nodes(
        rest, auto_failover_servers, only_failover, adminMsg["involve_orchestrator"], cluster_id
    )

    app.workload_manager.updateClusterStatus()
    clusterStatus = CacheHelper.clusterstatus(cluster_id) or ClusterStatus(cluster_id)
    rest = clusterStatus.node_rest()
    addBackNodes = add_back_nodes(rest, add_back_servers, autoFailoverNodes + failoverNodes)
    toBeEjectedNodes.extend(failoverNodes)
    toBeEjectedNodes.extend(autoFailoverNodes)
    for node in addBackNodes:
        toBeEjectedNodes.remove(node)

    # SoftRestart a node
    servers = adminMsg["soft_restart"]
    restart(servers, cluster_id=cluster_id)

    # HardRestart a node
    servers = adminMsg["hard_restart"]
    restart(servers, type="hard", cluster_id=cluster_id)

    if not only_failover and (len(allNodes) > 0 or len(toBeEjectedNodes) > 0):
        logger.error("Rebalance")
        logger.error(allNodes)
        logger.error(toBeEjectedNodes)
        rest.rebalance(otpNodes=allNodes, ejectedNodes=toBeEjectedNodes)
开发者ID:Boggypop,项目名称:testrunner,代码行数:54,代码来源:rest_client_tasks.py

示例5: postcondition_handler

def postcondition_handler():

    workloads = CacheHelper.workloads()
    for workload in workloads:
        if workload.postcondition_handler and workload.active:
            bucket = workload.bucket
            bs = BucketStatus.from_cache(bucket)
            bs.block(bucket)
            status = True

            try:
                postcondition_handler = \
                    getattr(phandler,
                            workload.postcondition_handler)

                status = postcondition_handler(workload)

            except AttributeError:
                logger.error("Postcondition method %s doesn't exist" \
                             % workload.postcondition_handler)
                workload.postcondition = None
                workload.postcondition_handler = None


            if status == True:
                # unblock bucket and deactivate workload
                bs = BucketStatus.from_cache(bucket)
                bs.unblock(bucket)
                workload.active = False
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:29,代码来源:workload_manager.py

示例6: restart

def restart(servers='', type='soft', cluster_id=cfg.CB_CLUSTER_TAG+"_status"):
    if servers.find('.') != -1 or servers == '':
        servers = servers.split()
    else:
        clusterStatus = CacheHelper.clusterstatus(cluster_id)
        count = int(servers)
        if len(clusterStatus.nodes) >= int(count):
            servers = clusterStatus.get_all_hosts()
        else:
            logger.error("Restart nodes request invalid. # of nodes in cluster is not enough")
            return
        servers = servers[:count]

    for server in servers:
        ip, port = parse_server_arg(server)
        node_ssh, node = create_ssh_conn(ip)
        if type is not 'soft':
            logger.error('Hard Restart')
            if cfg.COUCHBASE_OS == "windows":
                cmd = "shutdown -r -t 0"
            else:
                cmd = "reboot"
        else:
            logger.error('Soft Restart')
            if cfg.COUCHBASE_OS == "windows":
                cmd = "net stop couchbaseserver && net start couchbaseserver"
            else:
                cmd = "/etc/init.d/couchbase-server restart"

        logger.error(cmd)
        result = node_ssh.execute_command(cmd, node)
        logger.error(result)
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:32,代码来源:rest_client_tasks.py

示例7: restart

def restart(servers="", type="soft", cluster_id=cfg.CB_CLUSTER_TAG + "_status"):
    if servers.find(".") != -1 or servers == "":
        servers = servers.split()
    else:
        clusterStatus = CacheHelper.clusterstatus(cluster_id) or ClusterStatus(cluster_id)
        count = int(servers)
        if len(clusterStatus.nodes) >= int(count):
            servers = clusterStatus.get_all_hosts()
        else:
            logger.error("Restart nodes request invalid. # of nodes in cluster is not enough")
            return
        servers = servers[:count]

    for server in servers:
        ip, port = parse_server_arg(server)
        node_ssh, node = create_ssh_conn(ip)
        if type is not "soft":
            logger.error("Hard Restart")
            cmd = "reboot"
        else:
            logger.error("Soft Restart")
            cmd = "/etc/init.d/couchbase-server restart"

        logger.error(cmd)
        result = node_ssh.execute_command(cmd, node)
        logger.error(result)
开发者ID:Boggypop,项目名称:testrunner,代码行数:26,代码来源:rest_client_tasks.py

示例8: updateQueryWorkload

def updateQueryWorkload(query):
    workloads = CacheHelper.workloads()

    for workload in workloads:
        if workload.active and workload.bucket == query.bucket:
            key = query.indexed_key
            workload.updateIndexKeys(key)
开发者ID:Boggypop,项目名称:testrunner,代码行数:7,代码来源:query.py

示例9: queryConsumer

def queryConsumer(queryQueue = "query_default"):

    rabbitHelper = queryConsumer.rabbitHelper
    queryQueueSize = rabbitHelper.qsize(queryQueue)

    # for cli retreive currently active query workload
    # since multi-query is not supported here
    active_query = None
    all_queries = CacheHelper.active_queries()
    if len(all_queries) > 0:
        active_query = all_queries[0]

    if queryQueueSize> 0:

        # setup new query workload from queued message
        queryMsg = rabbitHelper.getJsonMsg(queryQueue)
        logger.error(queryMsg)
        try:
            queryWorkload = QueryWorkload(queryMsg)

            # deactivate old query workload
            if active_query is not None:
                active_query.active = False


            # activate new query workload
            # to be detected in queryRunner task
            queryWorkload.active = True

            if 'rcq' in queryMsg:
                rabbitHelper.putMsg(queryMsg['rcq'], "Started Querying: %s/%s" % \
                    (queryWorkload.ddoc, queryWorkload.view))

        except KeyError:
            logger.info("Invalid query workload message: %s" % queryMsg)
开发者ID:mschoch,项目名称:testrunner,代码行数:35,代码来源:query.py

示例10: queryRunner

def queryRunner(max_msgs=10):

    rabbitHelper = queryRunner.rabbitHelper

    # check queue with pending http requests
    pending_http_requests = "query_multi_" + cfg.CB_CLUSTER_TAG
    if rabbitHelper.qsize(pending_http_requests) > max_msgs:

        # purge waiting tasks
        rabbitHelper.purge(pending_http_requests)
        query_ops_manager(max_msgs, True)

    else:

        hosts = None
        clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG + "_status")

        if clusterStatus:
            hosts = clusterStatus.get_all_hosts()

        # retreive all active query workloads
        queries = CacheHelper.active_queries()
        for query in queries:

            # async update query workload object
            updateQueryWorkload.apply_async(args=[query])

            count = int(query.qps)
            filters = list(set(query.include_filters) - set(query.exclude_filters))
            params = generateQueryParams(
                query.indexed_key,
                query.bucket,
                filters,
                query.limit,
                query.startkey,
                query.endkey,
                query.startkey_docid,
                query.endkey_docid,
            )
            multi_query.delay(count, query.ddoc, query.view, params, query.bucket, query.password, hosts=hosts)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:40,代码来源:query.py

示例11: getClusterStat

def getClusterStat(bucket, stat):

    val = 0
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status") or\
        ClusterStatus()
    host = clusterStatus.get_random_host()
    stat_checker = phandler.BucketStatChecker(bucket, addr = host)
    stats = stat_checker.get_stats()
    if len(stats) > 0:
        if stat in stats:
            val = stats[stat]

    return val
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:13,代码来源:workload_manager.py

示例12: get_ep_hostip_from_params

def get_ep_hostip_from_params(params):
    app.workload_manager.updateClusterStatus()
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status")
    random_host = None
    try:
        random_host = clusterStatus.get_random_host().split(":")[0]
    except AttributeError:
        logger.error("Can not fetch cluster status information")
        pass

    host = params.get('ip') or random_host or cfg.COUCHBASE_IP
    port = params.get('port') or 11210

    return host, int(port)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:14,代码来源:postcondition_handlers.py

示例13: queryRunner

def queryRunner():

    # retreive all active query workloads
    queries = CacheHelper.active_queries()
    for query in queries:

        count = int(query.qps)
        params = {"stale" : "update_after"}
        multi_query.delay(count,
                          query.ddoc,
                          query.view,
                          params,
                          query.bucket,
                          query.password)
开发者ID:mschoch,项目名称:testrunner,代码行数:14,代码来源:query.py

示例14: throttle_kv_ops

def throttle_kv_ops(isovercommited=True):

    rabbitHelper = throttle_kv_ops.rabbitHelper

    workloads = CacheHelper.workloads()
    for workload in workloads:
       if workload.active:
           if isovercommited:
               # clear pending task_queue
               rabbitHelper.purge(workload.task_queue)

               # reduce ops by 10%
               workload.ops_per_sec = workload.ops_per_sec*0.90
               logger.error("Cluster Overcommited: reduced ops to (%s)" % workload.ops_per_sec)
开发者ID:xiejunyi,项目名称:testrunner,代码行数:14,代码来源:workload_manager.py

示例15: queue_op_cycles

def queue_op_cycles(workload):


    # read doc template
    template = Template.from_cache(str(workload.template))
    if template is None:
        logger.error("no doc template imported")
        return

    rabbitHelper = queue_op_cycles.rabbitHelper
    bucket = str(workload.bucket)
    task_queue = workload.task_queue

    active_hosts = None
    clusterStatus = CacheHelper.clusterstatus(cfg.CB_CLUSTER_TAG+"_status")
    if clusterStatus is not None:
        active_hosts = clusterStatus.get_all_hosts()

    # create 30 op cycles
    for i in xrange(20):

        if workload.cc_queues is not None:
            # override template attribute with workload
            template.cc_queues = workload.cc_queues

        if len(workload.indexed_keys) > 0:
            template.indexed_keys = workload.indexed_keys

        # read  workload settings
        bucketInfo = {"bucket" : workload.bucket,
                      "password" : workload.password}

        ops_sec = workload.ops_per_sec

        create_count = int(ops_sec *  workload.create_perc/100)
        update_count = int(ops_sec *  workload.update_perc/100)
        get_count = int(ops_sec *  workload.get_perc/100)
        del_count = int(ops_sec *  workload.del_perc/100)
        exp_count = int(ops_sec *  workload.exp_perc/100)
        consume_queue =  workload.consume_queue

        ttl = workload.ttl
        miss_queue = workload.miss_queue
        miss_perc = workload.miss_perc

        generate_pending_tasks(task_queue, template, bucketInfo, create_count,
                               update_count, get_count, del_count, exp_count,
                               consume_queue, ttl, miss_perc, miss_queue, active_hosts)
开发者ID:ashvindersingh,项目名称:testrunner,代码行数:48,代码来源:workload_manager.py


注:本文中的cache.CacheHelper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。