当前位置: 首页>>代码示例>>Python>>正文


Python data_helper.MemcachedClientHelper类代码示例

本文整理汇总了Python中memcached.helper.data_helper.MemcachedClientHelper的典型用法代码示例。如果您正苦于以下问题:Python MemcachedClientHelper类的具体用法?Python MemcachedClientHelper怎么用?Python MemcachedClientHelper使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了MemcachedClientHelper类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: system_stats

    def system_stats(self, nodes, pnames, frequency, verbosity=False):
        shells = []
        for node in nodes:
            try:
                bucket = RestConnection(node).get_buckets()[0].name
                MemcachedClientHelper.direct_client(node, bucket)
                shells.append(RemoteMachineShellConnection(node))
            except:
                pass
        d = {"snapshots": []}
        #        "pname":"x","pid":"y","snapshots":[{"time":time,"value":value}]

        start_time = str(self._task["time"])
        while not self._aborted():
            time.sleep(frequency)
            current_time = time.time()
            i = 0
            for shell in shells:
                node = nodes[i]
                unique_id = node.ip+'-'+start_time
                for pname in pnames:
                    obj = RemoteMachineHelper(shell).is_process_running(pname)
                    if obj and obj.pid:
                        value = self._extract_proc_info(shell, obj.pid)
                        value["name"] = pname
                        value["id"] = obj.pid
                        value["unique_id"] = unique_id
                        value["time"] = current_time
                        value["ip"] = node.ip
                        d["snapshots"].append(value)
                i +=  1
        self._task["systemstats"] = d["snapshots"]
        print " finished system_stats"
开发者ID:jchris,项目名称:testrunner,代码行数:33,代码来源:stats.py

示例2: iostats

    def iostats(self, nodes, frequency, verbosity=False):

        shells = []
        for node in nodes:
            try:
                bucket = RestConnection(node).get_buckets()[0].name
                MemcachedClientHelper.direct_client(node, bucket)
                shells.append(RemoteMachineShellConnection(node))
            except:
                pass

        self._task["iostats"] = []

        print "started capturing io stats"

        while not self._aborted():
            time.sleep(frequency)
            print "collecting io_stats"
            for shell in shells:
                kB_read, kB_wrtn = self._extract_io_info(shell)
                if kB_read and kB_wrtn:
                    self._task["iostats"].append({"time": time.time(),
                                                 "ip": shell.ip,
                                                 "read": kB_read,
                                                 "write": kB_wrtn})
        print "finished capturing io stats"
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:26,代码来源:stats.py

示例3: common_tearDown

    def common_tearDown(servers, testcase):
        log = logger.Logger.get_logger()
        log.info(
            "==============  common_tearDown was started for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
        RemoteUtilHelper.common_basic_setup(servers)

        log.info("10 seconds delay to wait for couchbase-server to start")
        time.sleep(10)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
        )
        try:
            rest = RestConnection(self._servers[0])
            buckets = rest.get_buckets()
            for bucket in buckets:
                MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
        except Exception:
            pass
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        log.info(
            "==============  common_tearDown was finished for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
开发者ID:jason-hou,项目名称:testrunner,代码行数:28,代码来源:autofailovertests.py

示例4: insert_key

 def insert_key(serverInfo, bucket_name, count, size):
     client = MemcachedClientHelper.proxy_client(serverInfo, bucket_name)
     value = MemcachedClientHelper.create_value("*", size)
     for i in range(count * 1000):
         key = "key_" + str(i)
         flag = random.randint(1, 999)
         client.set(key, 0, flag, value)
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:7,代码来源:autocompaction.py

示例5: set_get_test

    def set_get_test(self, value_size, number_of_items):
        fixed_value = MemcachedClientHelper.create_value("S", value_size)
        specs = [
            ("default", 0),
            ("set-get-bucket-replica-1", 1),
            ("set-get-bucket-replica-2", 2),
            ("set-get-bucket-replica-3", 3),
        ]
        serverInfo = self.master
        rest = RestConnection(serverInfo)
        bucket_ram = int(rest.get_nodes_self().memoryQuota / 4)

        mcport = rest.get_nodes_self().memcached
        for name, replica in specs:
            rest.create_bucket(name, bucket_ram, "sasl", "password", replica, mcport)

        bucket_data = {}
        buckets = RestConnection(serverInfo).get_buckets()
        for bucket in buckets:
            bucket_data[bucket.name] = {}
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            self.test.assertTrue(ready, "wait_for_memcached failed")

            client = MemcachedClientHelper.direct_client(serverInfo, bucket.name)
            inserted = []
            rejected = []
            while len(inserted) <= number_of_items and len(rejected) <= number_of_items:
                try:
                    key = str(uuid.uuid4())
                    client.set(key, 0, 0, fixed_value)
                    inserted.append(key)
                except mc_bin_client.MemcachedError:
                    pass

            retry = 0
            remaining_items = []
            remaining_items.extend(inserted)
            msg = "memcachedError : {0} - unable to get a pre-inserted key : {1}"
            while retry < 10 and len(remaining_items) > 0:
                verified_keys = []
                for key in remaining_items:
                    try:
                        flag, keyx, value = client.get(key=key)
                        if not value == fixed_value:
                            self.test.fail("value mismatch for key {0}".format(key))
                        verified_keys.append(key)
                    except mc_bin_client.MemcachedError as error:
                        self.log.error(msg.format(error.status, key))
                    retry += 1
                [remaining_items.remove(x) for x in verified_keys]

            print_count = 0
            for key in remaining_items:
                if print_count > 100:
                    break
                print_count += 1
                self.log.error("unable to verify key : {0}".format(key))
            if remaining_items:
                self.test.fail("unable to verify {0} keys".format(len(remaining_items)))
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:59,代码来源:setgettests.py

示例6: test_checkpointing_with_full_rollback

    def test_checkpointing_with_full_rollback(self):
        bucket = self.src_cluster.get_buckets()[0]
        nodes = self.src_cluster.get_nodes()

        # Stop Persistence on Node A & Node B
        for node in nodes:
            mem_client = MemcachedClientHelper.direct_client(node, bucket)
            mem_client.stop_persistence()

        self.src_cluster.pause_all_replications()

        gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items)
        self.src_cluster.load_all_buckets_from_generator(gen)

        self.src_cluster.resume_all_replications()

        self.sleep(self._checkpoint_interval * 2)

        self.get_and_validate_latest_checkpoint()

        # Perform mutations on the bucket
        self.async_perform_update_delete()

        self.sleep(self._wait_timeout)

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(self.src_cluster.get_master_node())
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(nodes[1], bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self.src_cluster.async_failover()
        failover_task.result()

        # Wait for Failover & rollback to complete
        self.sleep(self._wait_timeout * 5)

        goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0]) \
                     + '/goxdcr.log*'
        count1 = NodeHelper.check_goxdcr_log(
            nodes[0],
            "Received rollback from DCP stream",
            goxdcr_log)
        self.assertGreater(count1, 0, "full rollback not received from DCP as expected")
        self.log.info("full rollback received from DCP as expected")
        count2 = NodeHelper.check_goxdcr_log(
            nodes[0],
            "Rolled back startSeqno to 0",
            goxdcr_log)
        self.assertGreater(count2, 0, "startSeqno not rolled back to 0 as expected")
        self.log.info("startSeqno rolled back to 0 as expected")

        shell.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:56,代码来源:checkpointXDCR.py

示例7: common_tearDown

 def common_tearDown(servers, testcase):
     RemoteUtilHelper.common_basic_setup(servers)
     log = logger.Logger.get_logger()
     log.info("10 seconds delay to wait for couchbase-server to start")
     time.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     try:
         MemcachedClientHelper.flush_bucket(servers[0], 'default')
     except Exception:
         pass
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
开发者ID:jchris,项目名称:testrunner,代码行数:13,代码来源:autofailovertests.py

示例8: _test_backup_add_restore_bucket_with_expiration_key

    def _test_backup_add_restore_bucket_with_expiration_key(self, replica):
        bucket = "default"
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        size = int(info.memoryQuota * 2.0 / 3.0)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi, replicaNumber=replica)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        expiry = 60
        test_uuid = uuid.uuid4()
        keys = ["key_%s_%d" % (test_uuid, i) for i in range(5000)]
        self.log.info("pushing keys with expiry set to {0}".format(expiry))
        for key in keys:
            try:
                client.set(key, expiry, 0, key)
            except mc_bin_client.MemcachedError as error:
                msg = "unable to push key : {0} to bucket : {1} error : {2}"
                self.log.error(msg.format(key, client.vbucketId, error.status))
                self.fail(msg.format(key, client.vbucketId, error.status))
        client.close()
        self.log.info("inserted {0} keys with expiry set to {1}".format(len(keys), expiry))
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
        self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
        node = RestConnection(self.master).get_nodes_self()

        output, error = self.shell.execute_command(self.perm_command)
        self.shell.log_command_output(output, error)
        backupHelper = BackupHelper(self.master, self)
        backupHelper.backup(bucket, node, self.remote_tmp_folder)

        BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
        rest.create_bucket(bucket, ramQuotaMB=size, proxyPort=info.moxi)
        BucketOperationHelper.wait_for_memcached(self.master, bucket)
        backupHelper.restore(self.remote_tmp_folder)
        time.sleep(60)
        client = MemcachedClientHelper.direct_client(self.master, bucket)
        self.log.info('verifying that all those keys have expired...')
        for key in keys:
            try:
                client.get(key=key)
                msg = "expiry was set to {0} but key: {1} did not expire after waiting for {2}+ seconds"
                self.fail(msg.format(expiry, key, expiry))
            except mc_bin_client.MemcachedError as error:
                self.assertEquals(error.status, 1,
                                  msg="expected error code {0} but saw error code {1}".format(1, error.status))
        client.close()
        self.log.info("verified that those keys inserted with expiry set to {0} have expired".format(expiry))
开发者ID:jchris,项目名称:testrunner,代码行数:49,代码来源:backuptests.py

示例9: setUp

 def setUp(self):
     self.log = logger.Logger.get_logger()
     self.params = TestInputSingleton.input.test_params
     self.master = TestInputSingleton.input.servers[0]
     rest = RestConnection(self.master)
     rest.init_cluster(self.master.rest_username, self.master.rest_password)
     info = rest.get_nodes_self()
     rest.init_cluster_memoryQuota(self.master.rest_username, self.master.rest_password,
                                   memoryQuota=info.mcdMemoryReserved)
     ClusterOperationHelper.cleanup_cluster([self.master])
     ClusterOperationHelper.wait_for_ns_servers_or_assert([self.master], self)
     self._create_default_bucket()
     self.keys_cleanup = []
     self.onenodemc = MemcachedClientHelper.direct_client(self.master, "default", timeout=600)
     self.onenodemoxi = MemcachedClientHelper.proxy_client(self.master, "default", timeout=600)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:15,代码来源:memcapable.py

示例10: common_tearDown

 def common_tearDown(servers, testcase):
     for server in servers:
         shell = RemoteMachineShellConnection(server)
         shell.start_membase()
     log = logger.Logger.get_logger()
     log.info("10 seconds delay to wait for membase-server to start")
     time.sleep(10)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     try:
         MemcachedClientHelper.flush_bucket(servers[0], 'default', 11211)
     except Exception:
         pass
     ClusterOperationHelper.cleanup_cluster(servers)
     ClusterOperationHelper.wait_for_ns_servers_or_assert(servers, testcase)
     BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:15,代码来源:combotests.py

示例11: test_time_sync_threshold_setting_rest_call

    def test_time_sync_threshold_setting_rest_call(self):

        self.log.info("starting test_time_sync_threshold_setting_rest_call")

        # bucket is created with lww in base test case using the LWW parameter

        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])

        rest = RestConnection(self.master)
        self.assertTrue(
            rest.set_cas_drift_threshold(self.buckets[0], 100000, 200000), "Unable to set the CAS drift threshold"
        )
        time.sleep(15)  # take a few seconds for the stats to settle in
        stats = client.stats()

        self.assertTrue(
            int(stats["ep_hlc_drift_ahead_threshold_us"]) == 100000 * 1000,
            "Ahead threshold incorrect. Expected {0} actual {1}".format(
                100000 * 1000, stats["ep_hlc_drift_ahead_threshold_us"]
            ),
        )

        self.assertTrue(
            int(stats["ep_hlc_drift_behind_threshold_us"]) == 200000 * 1000,
            "Ahead threshold incorrect. Expected {0} actual {1}".format(
                200000 * 1000, stats["ep_hlc_drift_behind_threshold_us"]
            ),
        )
开发者ID:membase,项目名称:testrunner,代码行数:28,代码来源:lww_stats.py

示例12: test_time_sync_threshold_setting

    def test_time_sync_threshold_setting(self):

        self.log.info("starting test_time_sync_threshold_setting")

        # bucket is created with lww in base test case using the LWW parameter

        # get the stats
        client = MemcachedClientHelper.direct_client(self.servers[0], self.buckets[0])
        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD, ahead_threshold
            ),
        )
        # change the setting and verify it is per the new setting - this may or may not be supported

        shell = RemoteMachineShellConnection(self.servers[0])
        output, error = shell.execute_cbepctl(
            self.buckets[0],
            "",
            "set vbucket_param",
            "hlc_drift_ahead_threshold_us ",
            str(LWWStatsTests.DEFAULT_THRESHOLD / 2) + LWWStatsTests.DUMMY_VBUCKET,
        )
        if len(error) > 0:
            self.fail("Failed to set the drift counter threshold, please check the logs.")

        ahead_threshold = int(client.stats()["ep_hlc_drift_ahead_threshold_us"])
        self.assertTrue(
            ahead_threshold == LWWStatsTests.DEFAULT_THRESHOLD / 2,
            "Ahead threshold mismatch expected: {0} actual {1}".format(
                LWWStatsTests.DEFAULT_THRESHOLD / 2, ahead_threshold
            ),
        )
开发者ID:membase,项目名称:testrunner,代码行数:35,代码来源:lww_stats.py

示例13: _verify_data

    def _verify_data(self, version):
        #verify all the keys
        #let's use vbucketaware
        rest = RestConnection(self.servers[0])
        moxi = MemcachedClientHelper.proxy_client(self.servers[0], self.bucket_name)
        index = 0
        all_verified = True
        keys_failed = []
        for key in self.updated_keys:
            try:
                index += 1
                flag, keyx, value = moxi.get(key=key)
                self.assertTrue(value.endswith(version),
                                msg='values do not match . key value should endwith {0}'.format(version))
            except MemcachedError as error:
                self.log.error(error)
                self.log.error(
                    "memcachedError : {0} - unable to get a pre-inserted key : {0}".format(error.status, key))
                keys_failed.append(key)
                all_verified = False
                #            except :
                #                self.log.error("unknown errors unable to get a pre-inserted key : {0}".format(key))
                #                keys_failed.append(key)
                #                all_verified = False

        self.assertTrue(all_verified,
                        'unable to verify #{0} keys'.format(len(keys_failed)))
开发者ID:steveyen,项目名称:testrunner,代码行数:27,代码来源:replicationtests.py

示例14: _insert_data

 def _insert_data(self, howmany):
     self.onenodemc = MemcachedClientHelper.proxy_client(self.master, "default")
     items = ["{0}-{1}".format(str(uuid.uuid4()), i) for i in range(0, howmany)]
     for item in items:
         self.onenodemc.set(item, 0, 0, item)
     self.log.info("inserted {0} items".format(howmany))
     self.onenodemc.close()
开发者ID:paul-guo-,项目名称:appstack,代码行数:7,代码来源:warmupcluster.py

示例15: run

 def run(self):
     client = MemcachedClientHelper.direct_client(server, bucket)
     for i in range(num_items):
         key = "key-{0}".format(i)
         value = "value-{0}".format(str(uuid.uuid4())[:7])
         client.set(key, 0, 0, value, 0)
     log.info("Loaded {0} key".format(num_items))
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:7,代码来源:checkpoint.py


注:本文中的memcached.helper.data_helper.MemcachedClientHelper类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。