当前位置: 首页>>代码示例>>Python>>正文


Python RemoteMachineShellConnection.kill_memcached方法代码示例

本文整理汇总了Python中lib.remote.remote_util.RemoteMachineShellConnection.kill_memcached方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.kill_memcached方法的具体用法?Python RemoteMachineShellConnection.kill_memcached怎么用?Python RemoteMachineShellConnection.kill_memcached使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lib.remote.remote_util.RemoteMachineShellConnection的用法示例。


在下文中一共展示了RemoteMachineShellConnection.kill_memcached方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_partial_rollback

# 需要导入模块: from lib.remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from lib.remote.remote_util.RemoteMachineShellConnection import kill_memcached [as 别名]
    def test_partial_rollback(self):
        kv_node = self.get_nodes_from_services_map(service_type="kv", get_all_nodes=True)
        log.info("kv nodes:{0}".format(kv_node))
        for node in kv_node:
            mem_client = MemcachedClientHelper.direct_client(node, self.src_bucket_name)
            mem_client.stop_persistence()
        body = self.create_save_function_body(self.function_name, self.handler_code,
                                              worker_count=3)
        try:
            task = self.cluster.async_load_gen_docs(self.master, self.src_bucket_name, self.gens_load,
                                                    self.buckets[0].kvs[1], 'create', compression=self.sdk_compression)
        except Exception as e:
            log.info("error while loading data")
        self.deploy_function(body,wait_for_bootstrap=False)
        # Kill memcached on Node A
        self.log.info("Killing memcached on {0}".format(kv_node[1]))
        shell = RemoteMachineShellConnection(kv_node[1])
        shell.kill_memcached()

        # Start persistence on Node B
        self.log.info("Starting persistence on {0}".
                      format(kv_node[0]))
        mem_client = MemcachedClientHelper.direct_client(kv_node[0],
                                                         self.src_bucket_name)
        mem_client.start_persistence()
        # Wait for bootstrap to complete
        self.wait_for_bootstrap_to_complete(body['appname'])
        stats_src = RestConnection(self.master).get_bucket_stats(bucket=self.src_bucket_name)
        log.info(stats_src)
        self.verify_eventing_results(self.function_name, stats_src["curr_items"], skip_stats_validation=True)
开发者ID:arod1987,项目名称:testrunner,代码行数:32,代码来源:eventing_recovery.py

示例2: kill_memcached_service

# 需要导入模块: from lib.remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from lib.remote.remote_util.RemoteMachineShellConnection import kill_memcached [as 别名]
 def kill_memcached_service(self, server):
     remote_client = RemoteMachineShellConnection(server)
     remote_client.kill_memcached()
     remote_client.disconnect()
开发者ID:membase,项目名称:testrunner,代码行数:6,代码来源:eventing_base.py

示例3: partial_rollback

# 需要导入模块: from lib.remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from lib.remote.remote_util.RemoteMachineShellConnection import kill_memcached [as 别名]
    def partial_rollback(self):
        bucket = self._cb_cluster.get_bucket_by_name("default")

        self._cb_cluster.flush_buckets([bucket])

        index = self.create_index(bucket, "default_index")
        self.load_data()
        self.wait_for_indexing_complete()

        # Stop Persistence on Node A & Node B
        mem_client = MemcachedClientHelper.direct_client(self._input.servers[0],
                                                         bucket)
        mem_client.stop_persistence()
        mem_client = MemcachedClientHelper.direct_client(self._input.servers[1],
                                                         bucket)
        mem_client.stop_persistence()

        # Perform mutations on the bucket
        self.async_perform_update_delete(self.upd_del_fields)
        if self._update:
            self.sleep(60, "Waiting for updates to get indexed...")
        self.wait_for_indexing_complete()

        # Run FTS Query to fetch the initial count of mutated items
        query = "{\"query\": \"mutated:>0\"}"
        query = json.loads(query)
        for index in self._cb_cluster.get_indexes():
            hits1, _, _, _ = index.execute_query(query)
            self.log.info("Hits before rollback: %s" % hits1)

        # Fetch count of docs in index and bucket
        before_index_doc_count = index.get_indexed_doc_count()
        before_bucket_doc_count = index.get_src_bucket_doc_count()

        self.log.info("Docs in Bucket : %s, Docs in Index : %s" % (
            before_bucket_doc_count, before_index_doc_count))

        # Kill memcached on Node A so that Node B becomes master
        shell = RemoteMachineShellConnection(self._master)
        shell.kill_memcached()

        # Start persistence on Node B
        mem_client = MemcachedClientHelper.direct_client(self._input.servers[1],
                                                         bucket)
        mem_client.start_persistence()

        # Failover Node B
        failover_task = self._cb_cluster.async_failover(
            node=self._input.servers[1])
        failover_task.result()

        # Wait for Failover & FTS index rollback to complete
        self.sleep(10)

        # Run FTS query to fetch count of mutated items post rollback.
        for index in self._cb_cluster.get_indexes():
            hits2, _, _, _ = index.execute_query(query)
            self.log.info("Hits after rollback: %s" % hits2)

        # Fetch count of docs in index and bucket
        after_index_doc_count = index.get_indexed_doc_count()
        after_bucket_doc_count = index.get_src_bucket_doc_count()

        self.log.info("Docs in Bucket : %s, Docs in Index : %s"
                      % (after_bucket_doc_count, after_index_doc_count))

        # Validation : If there are deletes, validate the #docs in index goes up post rollback
        if self._input.param("delete", False):
            self.assertGreater(after_index_doc_count, before_index_doc_count,
                               "Deletes : Index count after rollback not greater than before rollback")
        else:
            # For Updates, validate that #hits goes down in the query output post rollback
            self.assertGreater(hits1, hits2,
                               "Mutated items before rollback are not more than after rollback")

        # Failover FTS node
        failover_fts_node = self._input.param("failover_fts_node", False)

        if failover_fts_node:
            failover_task = self._cb_cluster.async_failover(
                node=self._input.servers[2])
            failover_task.result()
            self.sleep(10)

            # Run FTS query to fetch count of mutated items post FTS node failover.
            for index in self._cb_cluster.get_indexes():
                hits3, _, _, _ = index.execute_query(query)
                self.log.info(
                    "Hits after rollback and failover of primary FTS node: %s" % hits3)
                self.assertEqual(hits2, hits3,
                                 "Mutated items after FTS node failover are not equal to that after rollback")
开发者ID:,项目名称:,代码行数:93,代码来源:

示例4: test_ingestion_after_kv_rollback

# 需要导入模块: from lib.remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from lib.remote.remote_util.RemoteMachineShellConnection import kill_memcached [as 别名]
    def test_ingestion_after_kv_rollback(self):
        self.setup_for_test()

        # Stop Persistence on Node A & Node B
        self.log.info("Stopping persistence on NodeA & NodeB")
        mem_client = MemcachedClientHelper.direct_client(self.input.servers[0],
                                                         self.cb_bucket_name)
        mem_client.stop_persistence()
        mem_client = MemcachedClientHelper.direct_client(self.input.servers[1],
                                                         self.cb_bucket_name)
        mem_client.stop_persistence()

        # Perform Create, Update, Delete ops in the CB bucket
        self.log.info("Performing Mutations")
        self.perform_doc_ops_in_all_cb_buckets(self.num_items, "delete", 0,
                                               self.num_items / 2)

        # Validate no. of items in CBAS dataset
        if not self.validate_cbas_dataset_items_count(self.cbas_dataset_name,
                                                      self.num_items / 2, 0):
            self.fail(
                "No. of items in CBAS dataset do not match that in the CB bucket")

        # Count no. of items in CB & CBAS Buckets
        items_in_cb_bucket = self.get_item_count(self.master,
                                                 self.cb_bucket_name)
        items_in_cbas_bucket, _ = self.get_num_items_in_cbas_dataset(
            self.cbas_dataset_name)
        self.log.info(
            "Before Rollback --- # docs in CB bucket : %s, # docs in CBAS bucket : %s",
            items_in_cb_bucket, items_in_cbas_bucket)

        if items_in_cb_bucket != items_in_cbas_bucket:
            self.fail(
                "Before Rollback : # Items in CBAS bucket does not match that in the CB bucket")

        # Kill memcached on Node A so that Node B becomes master
        self.log.info("Kill Memcached process on NodeA")
        shell = RemoteMachineShellConnection(self.master)
        shell.kill_memcached()

        # Start persistence on Node B
        self.log.info("Starting persistence on NodeB")
        mem_client = MemcachedClientHelper.direct_client(self.input.servers[1],
                                                         self.cb_bucket_name)
        mem_client.start_persistence()

        # Failover Node B
        self.log.info("Failing over NodeB")
        self.sleep(10)
        failover_task = self._cb_cluster.async_failover(self.input.servers,
                                                        [self.input.servers[1]])
        failover_task.result()

        # Wait for Failover & CBAS rollback to complete
        self.sleep(120)

        # Count no. of items in CB & CBAS Buckets
        items_in_cb_bucket = self.get_item_count(self.master,
                                                 self.cb_bucket_name)
        items_in_cbas_bucket, _ = self.get_num_items_in_cbas_dataset(
            self.cbas_dataset_name)
        self.log.info(
            "After Rollback --- # docs in CB bucket : %s, # docs in CBAS bucket : %s",
            items_in_cb_bucket, items_in_cbas_bucket)

        if items_in_cb_bucket != items_in_cbas_bucket:
            self.fail(
                "After Rollback : # Items in CBAS bucket does not match that in the CB bucket")
开发者ID:arod1987,项目名称:testrunner,代码行数:71,代码来源:cbas_bucket_operations.py


注:本文中的lib.remote.remote_util.RemoteMachineShellConnection.kill_memcached方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。