当前位置: 首页>>代码示例>>Python>>正文


Python Cluster.shutdown方法代码示例

本文整理汇总了Python中couchbase.cluster.Cluster.shutdown方法的典型用法代码示例。如果您正苦于以下问题:Python Cluster.shutdown方法的具体用法?Python Cluster.shutdown怎么用?Python Cluster.shutdown使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在couchbase.cluster.Cluster的用法示例。


在下文中一共展示了Cluster.shutdown方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_create_delete_similar_views

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
    def test_create_delete_similar_views(self):
        ddoc_name_prefix = self.input.param("ddoc_name_prefix", "ddoc")
        view_name = self.input.param("view_name", "test_view")
        map_fn = 'function (doc) {if(doc.age !== undefined) { emit(doc.age, doc.name);}}'
        rest = RestConnection(self.servers[0])
        ddocs = [DesignDocument(ddoc_name_prefix + "1", [View(view_name, map_fn,
                                                             dev_view=False)],
                                options={"updateMinChanges":0, "replicaUpdateMinChanges":0}),
                DesignDocument(ddoc_name_prefix + "2", [View(view_name, map_fn,
                                                            dev_view=True)],
                               options={"updateMinChanges":0, "replicaUpdateMinChanges":0})]

        ViewBaseTests._load_docs(self, self.num_docs, "test_")
        for ddoc in ddocs:
            results = self.create_ddoc(rest, 'default', ddoc)

        try:
            cluster = Cluster()
            cluster.delete_view(self.servers[0], ddocs[1].name, ddocs[1].views[0])
        finally:
            cluster.shutdown()

        results_new = rest.query_view(ddocs[0].name, ddocs[0].views[0].name, 'default',
                                  {"stale" : "ok", "full_set" : "true"})
        self.assertEquals(results.get(u'rows', []), results_new.get(u'rows', []),
                          "Results returned previosly %s don't match with current %s" % (
                          results.get(u'rows', []), results_new.get(u'rows', [])))
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:29,代码来源:viewtests.py

示例2: SwapRebalanceBase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class SwapRebalanceBase(unittest.TestCase):

    @staticmethod
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.cluster_run = False
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        if len(set([server.ip for server in self.servers])) == 1:
            ip = rest.get_nodes_self().ip
            for server in self.servers:
                server.ip = ip
            self.cluster_run = True
        self.case_number = self.input.param("case_number", 0)
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 1000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)
        self.load_started = False
        self.loaders = []
        try:
            # Clear the state from Previous invalid run
            if rest._rebalance_progress_status() == 'running':
                self.log.warning("rebalancing is still running, previous test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase.reset(self)
            self.cluster_helper = Cluster()

            # Make sure the test is setup correctly
            min_servers = int(self.num_initial_servers) + int(self.num_swap)
            msg = "minimum {0} nodes required for running swap rebalance"
            self.assertTrue(len(self.servers) >= min_servers, msg=msg.format(min_servers))

            self.log.info('picking server : {0} as the master'.format(serverInfo))
            node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
            info = rest.get_nodes_self()
            rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
            rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
            if self.num_buckets > 10:
                BaseTestCase.change_max_buckets(self, self.num_buckets)
            self.log.info("==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                      .format(self.case_number, self._testMethodName))
            SwapRebalanceBase._log_start(self)
        except Exception, e:
            self.cluster_helper.shutdown()
            self.fail(e)
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:59,代码来源:swaprebalance.py

示例3: wait_for_replication

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
 def wait_for_replication(servers, cluster_helper=None, timeout=600):
     if cluster_helper is None:
         cluster = Cluster()
     else:
         cluster = cluster_helper
     tasks = []
     rest = RestConnection(servers[0])
     buckets = rest.get_buckets()
     for server in servers:
         for bucket in buckets:
             for server_repl in list(set(servers) - set([server])):
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server],
                         bucket,
                         "tap",
                         "eq_tapq:[email protected]" + server_repl.ip + ":idle",
                         "==",
                         "true",
                     )
                 )
                 tasks.append(
                     cluster.async_wait_for_stats(
                         [server],
                         bucket,
                         "tap",
                         "eq_tapq:[email protected]" + server_repl.ip + ":backfill_completed",
                         "==",
                         "true",
                     )
                 )
     try:
         for task in tasks:
             task.result(timeout)
     finally:
         if cluster_helper is None:
             # stop all newly created task manager threads
             cluster.shutdown()
         return True
开发者ID:ketakigangal,项目名称:cbsystest,代码行数:41,代码来源:rebalance_helper.py

示例4: SwapRebalanceBase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class SwapRebalanceBase(unittest.TestCase):

    @staticmethod
    def common_setup(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        serverInfo = self.servers[0]
        rest = RestConnection(serverInfo)
        self.case_number = self.input.param("case_number", 0)

        # Clear the state from Previous invalid run
        if rest._rebalance_progress_status() == 'running':
                self.log.warning("rebalancing is still running, previous test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
        self.load_started = False
        self.loaders = []
        self.log.info("==============  SwapRebalanceBase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase.reset(self)
        self.cluster_helper = Cluster()
        # Initialize test params
        self.replica = self.input.param("replica", 1)
        self.keys_count = self.input.param("keys-count", 100000)
        self.load_ratio = self.input.param("load-ratio", 1)
        self.ratio_expiry = self.input.param("ratio-expiry", 0.03)
        self.ratio_deletes = self.input.param("ratio-deletes", 0.13)
        self.num_buckets = self.input.param("num-buckets", 1)
        self.failover_factor = self.num_swap = self.input.param("num-swap", 1)
        self.num_initial_servers = self.input.param("num-initial-servers", 3)
        self.fail_orchestrator = self.swap_orchestrator = self.input.param("swap-orchestrator", False)
        self.do_access = self.input.param("do-access", True)

        # Make sure the test is setup correctly
        min_servers = int(self.num_initial_servers) + int(self.num_swap)
        msg = "minimum {0} nodes required for running swap rebalance"
        self.assertTrue(len(self.servers) >= min_servers,
            msg=msg.format(min_servers))

        self.log.info('picking server : {0} as the master'.format(serverInfo))
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.servers)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username, password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        self.log.info("==============  SwapRebalanceBase setup was finished for test #{0} {1} =============="
                      .format(self.case_number, self._testMethodName))
        SwapRebalanceBase._log_start(self)

    @staticmethod
    def common_tearDown(self):
        self.cluster_helper.shutdown()
        test_failed = (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures or self._resultForDoCleanups.errors)) \
                   or (hasattr(self, '_exc_info') and self._exc_info()[1] is not None)
        if test_failed and TestInputSingleton.input.param("stop-on-failure", False)\
                        or self.input.param("skip_cleanup", False):
                    self.log.warn("CLEANUP WAS SKIPPED")
        else:
            SwapRebalanceBase.reset(self)
            SwapRebalanceBase._log_finish(self)

    @staticmethod
    def reset(self):
        self.log.info("==============  SwapRebalanceBase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
        self.log.info("Stopping load in Teardown")
        SwapRebalanceBase.stop_load(self.loaders)
        for server in self.servers:
            rest = RestConnection(server)
            if rest._rebalance_progress_status() == 'running':
                self.log.warning("rebalancing is still running, test should be verified")
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
            if server.data_path:
                rest = RestConnection(server)
                rest.set_data_path(data_path=server.data_path)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        self.log.info("==============  SwapRebalanceBase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
#.........这里部分代码省略.........
开发者ID:Boggypop,项目名称:testrunner,代码行数:103,代码来源:swaprebalance.py

示例5: XDCRBaseTest

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class XDCRBaseTest (unittest.TestCase):

    def setUp(self):
        try:
            self._log = logger.Logger.get_logger()
            self._log.info("SetUp process started ...")
            self._input = TestInputSingleton.input
            self._cluster_helper = Cluster()

            self._init_parameters()

            if not self._input.param("skip_cleanup", False):
                self._cleanup_previous_setup()

            self._init_clusters()
            self.setup_extended()
            self._log.info("SetUp process completed...")
        except:
            self._log.error("Error while setting up clusters: %s", sys.exc_info()[0])
            self._cleanup_broken_setup()
            raise

    def tearDown(self):
        self.teardown_extended()
        self._do_cleanup()
        self._cluster_helper.shutdown()

    def _cleanup_previous_setup(self):
        self.teardown_extended()
        self._do_cleanup()

    def test_setUp(self):
        pass

    def _init_parameters(self):
        self._log.info("Initializing input parameters started...")

        self._clusters_dic = self._input.clusters # clusters is declared as dic in TestInput which is unordered.
        self._clusters_keys_olst = range(len(self._clusters_dic)) #clusters are populated in the dic in testrunner such that ordinal is the key. 
                                                                   #orderedDic cannot be used in order to maintain the compability with python 2.6
        self._cluster_names_dic = {} # populated in _init_clusters() method
        self._cluster_counter_temp_int = 0 #TODO: fix the testrunner code to pass cluster name in params.

        self._buckets_arr = ["default"]  #??    
        self._default_bucket_bool = self._input.param("default_bucket", True)
        self._standard_buckets_int = self._input.param("standard_buckets", 0)
        self._sasl_buckets_int = self._input.param("sasl_buckets", 0)
        self._total_buckets_int = self._sasl_buckets_int + self._default_bucket_bool + self._standard_buckets_int

        #self.num_servers = self._input.param("servers", len(self.servers))
        self._num_replicas_int = self._input.param("replicas", 1)
        self._num_items_int = self._input.param("items", 1000)
        self._dgm_run_bool = self._input.param("dgm_run", False)

        self._mem_quota_int = 0 # will be set in subsequent methods

        self.init_parameters_extended()

        self._log.info("Initializing input parameters completed...")


    def _init_clusters(self):
        for key in self._clusters_keys_olst:
            self._set_cluster_names(key)
            self._setup_cluster(self._clusters_dic[key])


    # This method shall be overridden in case there are parameters that need to be initialized.
    def init_parameters_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during setup.
    def setup_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during teardown.
    def teardown_extended(self):
        pass

    def _do_cleanup(self):
        for key in self._clusters_keys_olst:
            nodes = self._clusters_dic[key]
            BucketOperationHelper.delete_all_buckets_or_assert(nodes, self)
            ClusterOperationHelper.cleanup_cluster(nodes)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(nodes, self)

    def _cleanup_broken_setup(self):
        try:
            self.tearDown()
        except:
            self._log.info("Error while cleaning broken setup.")

    def _set_cluster_names(self, key):
        self._cluster_counter_temp_int += 1
        self._cluster_names_dic[key] = "cluster-{0}".format(self._cluster_counter_temp_int)

    def _setup_cluster(self, nodes):
        self._init_nodes(nodes)
        self._create_buckets(nodes)
        self._config_cluster(nodes)
#.........这里部分代码省略.........
开发者ID:jchris,项目名称:testrunner,代码行数:103,代码来源:xdcrbasetests.py

示例6: BaseTestCase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
        # TODO (Mike): Create Standard buckets

    def tearDown(self):
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        ClusterOperationHelper.cleanup_cluster(self.servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        self.buckets = {}
        self.cluster.shutdown()

    def _initialize_nodes(self, cluster, servers):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0/3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets[name] = {1 : KVStore()}
        for task in bucket_tasks:
            task.result()

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket, kv_stores in self.buckets.items():
            items = sum([len(kv_store) for kv_store in kv_stores.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
#.........这里部分代码省略.........
开发者ID:paul-guo-,项目名称:appstack,代码行数:103,代码来源:basetestcase.py

示例7: BaseTestCase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.pre_warmup_stats = {}
        try:
            self.wait_timeout = self.input.param("wait_timeout", 60)
            # number of case that is performed from testrunner( increment each time)
            self.case_number = self.input.param("case_number", 0)
            self.default_bucket = self.input.param("default_bucket", True)
            if self.default_bucket:
                self.default_bucket_name = "default"
            self.standard_buckets = self.input.param("standard_buckets", 0)
            self.sasl_buckets = self.input.param("sasl_buckets", 0)
            self.memcached_buckets = self.input.param("memcached_buckets", 0)
            self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
            self.num_servers = self.input.param("servers", len(self.servers))
            # initial number of items in the cluster
            self.nodes_init = self.input.param("nodes_init", 1)
            self.nodes_in = self.input.param("nodes_in", 1)
            self.nodes_out = self.input.param("nodes_out", 1)

            self.num_replicas = self.input.param("replicas", 1)
            self.num_items = self.input.param("items", 1000)
            self.value_size = self.input.param("value_size", 512)
            self.dgm_run = self.input.param("dgm_run", False)
            # max items number to verify in ValidateDataTask, None - verify all
            self.max_verify = self.input.param("max_verify", None)
            # we don't change consistent_view on server by default
            self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
            self.rebalanceIndexWaitingDisabled = self.input.param("rebalanceIndexWaitingDisabled", None)
            self.rebalanceIndexPausingDisabled = self.input.param("rebalanceIndexPausingDisabled", None)
            self.maxParallelIndexers = self.input.param("maxParallelIndexers", None)
            self.maxParallelReplicaIndexers = self.input.param("maxParallelReplicaIndexers", None)
            self.quota_percent = self.input.param("quota_percent", None)
            self.port = None
            if self.input.param("port", None):
                self.port = str(self.input.param("port", None))
            self.log.info(
                "==============  basetestcase setup was started for test #{0} {1}==============".format(
                    self.case_number, self._testMethodName
                )
            )
            # avoid any cluster operations in setup for new upgrade & upgradeXDCR tests
            if str(self.__class__).find("newupgradetests") != -1 or str(self.__class__).find("upgradeXDCR") != -1:
                self.log.info("any cluster operation in setup will be skipped")
                self.log.info(
                    "==============  basetestcase setup was finished for test #{0} {1} ==============".format(
                        self.case_number, self._testMethodName
                    )
                )
                return
            # avoid clean up if the previous test has been tear down
            if not self.input.param("skip_cleanup", True) or self.case_number == 1 or self.case_number > 1000:
                if self.case_number > 1000:
                    self.log.warn("teardDown for previous test failed. will retry..")
                    self.case_number -= 1000
                self.tearDown()
                self.cluster = Cluster()

            self.quota = self._initialize_nodes(
                self.cluster,
                self.servers,
                self.disabled_consistent_view,
                self.rebalanceIndexWaitingDisabled,
                self.rebalanceIndexPausingDisabled,
                self.maxParallelIndexers,
                self.maxParallelReplicaIndexers,
                self.port,
            )

            if str(self.__class__).find("rebalanceout.RebalanceOutTests") != -1:
                # rebalance all nodes into the cluster before each test
                self.cluster.rebalance(self.servers[: self.num_servers], self.servers[1 : self.num_servers], [])
            elif self.nodes_init > 1:
                self.cluster.rebalance(self.servers[:1], self.servers[1 : self.nodes_init], [])
            elif str(self.__class__).find("ViewQueryTests") != -1 and not self.input.param("skip_rebalance", False):
                self.cluster.rebalance(self.servers, self.servers[1:], [])
            if self.dgm_run:
                self.quota = 256
            if self.total_buckets > 0:
                self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
            if str(self.__class__).find("newupgradetests") == -1:
                self._bucket_creation()
            self.log.info(
                "==============  basetestcase setup was finished for test #{0} {1} ==============".format(
                    self.case_number, self._testMethodName
                )
            )
            self._log_start(self)
        except Exception, e:
            self.cluster.shutdown()
            self.fail(e)
开发者ID:Boggypop,项目名称:testrunner,代码行数:99,代码来源:basetestcase.py

示例8: BaseTestCase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        if not self.input.param("skip_cleanup", False):
            try:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning("rebalancing is still running, test should be verified")
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

#.........这里部分代码省略.........
开发者ID:ronniedada,项目名称:testrunner,代码行数:103,代码来源:basetestcase.py

示例9: usage

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
        for o, a in opts:
            if o == "-h":
                usage()

        input = TestInput.TestInputParser.get_test_input(sys.argv)
        if not input.servers:
            usage("ERROR: no servers specified. Please use the -i parameter.")
    except IndexError:
        usage()
    except getopt.GetoptError, error:
        usage("ERROR: " + str(error))

    docs_per_day = input.param("doc_per_day", 49)
    years = input.param("years", 2)
    bucket_name = input.param("bucket_name", "default")
    bucket_port = input.param("bucket_port", None)
    bucket_sasl_pass = input.param("bucket_sasl_pass", None)
    flag = input.param("flags", 0)

    cluster = Cluster()
    try:
        bucket = initialize_bucket(bucket_name, bucket_port, bucket_sasl_pass)
        loader = DocLoader(input.servers, cluster)
        generators_load = loader.generate_docs(docs_per_day, years)
        loader.load(generators_load, bucket, flag=flag)
    finally:
        cluster.shutdown()

if __name__ == "__main__":
    main()
开发者ID:arunapiravi,项目名称:testrunner,代码行数:32,代码来源:doc_loader.py

示例10: BaseUITestCase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]

#.........这里部分代码省略.........
        host = self.machine.ip
        if host in ['localhost', '127.0.0.1']:
            cmd = 'ps -ef|grep selenium-server'
            output = commands.getstatusoutput(cmd)
            if str(output).find('selenium-server-standalone') > -1:
                return True
        else:
            #cmd = "ssh {0}@{1} 'bash -s' < 'tasklist |grep selenium-server'".format(self.input.servers[0].ssh_username,
            #                                                                        host)
            cmd = 'tasklist |grep java'
            o, r = self.shell.execute_command(cmd)
            #cmd = "ssh {0}@{1} 'bash -s' < 'ps -ef|grep selenium-server'"
            if str(o).find('java') > -1:
                return True
        return False

    def setUp(self):
        try:
            self.log = logger.Logger.get_logger()
            self.input = TestInputSingleton.input
            self.servers = self.input.servers
            self.browser = self.input.ui_conf['browser']
            self.replica  = self.input.param("replica", 1)
            self.case_number = self.input.param("case_number", 0)
            self.cluster = Cluster()
            self.machine = self.input.ui_conf['server']
            self.driver = None
            self.shell = RemoteMachineShellConnection(self.machine)
            #avoid clean up if the previous test has been tear down
            if not self.input.param("skip_cleanup", True) \
                                            or self.case_number == 1:
                self.tearDown()
            self._log_start(self)
            self._kill_old_drivers()
            #thread for selenium server
            if not self._is_selenium_running():
                self.log.info('start selenium')
                self._start_selenium_thread()
                self._wait_for_selenium_is_started()
            self.log.info('start selenium session')
            if self.browser == 'ff':
                self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                               .format(self.machine.ip,
                                                       self.machine.port),
                                               desired_capabilities=DesiredCapabilities.FIREFOX)
            elif self.browser == 'chrome':
                self.driver = webdriver.Remote(command_executor='http://{0}:{1}/wd/hub'
                                               .format(self.machine.ip,
                                                       self.machine.port),
                                               desired_capabilities=DesiredCapabilities.CHROME)
            self.log.info('start selenium started')
            self.driver.get("http://{0}:{1}".format(self.servers[0].ip,
                                                    self.servers[0].port))
            self.driver.maximize_window()
        except Exception as ex:
            self.input.test_params["stop-on-failure"] = True
            self.log.error("SETUP WAS FAILED. ALL TESTS WILL BE SKIPPED")
            self.fail(ex)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(),
                                              self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(),
                                               self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def tearDown(self):
        try:
            if self.driver:
                path_screen = self.input.ui_conf['screenshots'] or 'logs/screens'
                full_path = '{1}/screen_{0}.png'.format(time.time(), path_screen)
                self.log.info('screenshot is available: %s' % full_path)
                if not os.path.exists(path_screen):
                    os.mkdir(path_screen)
                self.driver.get_screenshot_as_file(os.path.abspath(full_path))
            rest = RestConnection(self.servers[0])
            if rest._rebalance_progress_status() == 'running':
                stopped = rest.stop_rebalance()
                self.assertTrue(stopped, msg="unable to stop rebalance")
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
            if self.driver:
                self.driver.close()
        finally:
            if self.driver:
                self.shell.disconnect()
            self.cluster.shutdown()
开发者ID:Boggypop,项目名称:testrunner,代码行数:104,代码来源:uibasetest.py

示例11: CheckpointTests

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class CheckpointTests(unittest.TestCase):

    def setUp(self):
        self.cluster = Cluster()

        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.num_servers = self.input.param("servers", 1)

        master = self.servers[0]
        num_replicas = self.input.param("replicas", 1)
        self.bucket = 'default'

        # Start: Should be in a before class function
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([master], self)
        # End: Should be in a before class function

        self.quota = self.cluster.init_node(master)
        self.old_vbuckets = self._get_vbuckets(master)
        ClusterOperationHelper.set_vbuckets(master, 1)
        self.cluster.create_default_bucket(master, self.quota, num_replicas)
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])

    def tearDown(self):
        master = self.servers[0]
        ClusterOperationHelper.set_vbuckets(master, self.old_vbuckets)
        rest = RestConnection(master)
        rest.stop_rebalance()
        self.cluster.rebalance(self.servers[:self.num_servers], [],
                               self.servers[1:self.num_servers])
        self.cluster.bucket_delete(master, self.bucket)
        self.cluster.shutdown()

    def checkpoint_create_items(self):
        param = 'checkpoint'
        stat_key = 'vb_0:open_checkpoint_id'
        num_items = 6000

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, '5000')
        chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers], self.bucket,
                                          param, stat_key)
        load_thread = self.generate_load(master, self.bucket, num_items)
        load_thread.join()
        tasks = []
        for server, value in chk_stats.items():
            tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param,
                                                           stat_key, '>', value))
        for task in tasks:
            try:
                timeout = 30 if (num_items * .001) < 30 else num_items * .001
                task.result(timeout)
            except TimeoutError:
                self.fail("New checkpoint not created")

    def checkpoint_create_time(self):
        param = 'checkpoint'
        timeout = 60
        stat_key = 'vb_0:open_checkpoint_id'

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(timeout))
        chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers], self.bucket,
                                          param, stat_key)
        load_thread = self.generate_load(master, self.bucket, 1)
        load_thread.join()
        log.info("Sleeping for {0} seconds)".format(timeout))
        time.sleep(timeout)
        tasks = []
        for server, value in chk_stats.items():
            tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param,
                                                           stat_key, '>', value))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("New checkpoint not created")
        self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(600))

    def checkpoint_collapse(self):
        param = 'checkpoint'
        chk_size = 5000
        num_items = 25000
        stat_key = 'vb_0:last_closed_checkpoint_id'
        stat_chk_itms = 'vb_0:num_checkpoint_items'

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        slave1 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA1)
        slave2 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA2)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, str(chk_size))
        m_stats = StatsCommon.get_stats([master], self.bucket, param, stat_key)
        self._stop_replication(slave2, self.bucket)
        load_thread = self.generate_load(master, self.bucket, num_items)
        load_thread.join()

        tasks = []
#.........这里部分代码省略.........
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:103,代码来源:checkpoint.py

示例12: FailoverBaseTest

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class FailoverBaseTest(unittest.TestCase):

    @staticmethod
    def setUp(self):
        log = logger.Logger.get_logger()
        self._input = TestInputSingleton.input
        self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
        self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
        self.bidirectional = self._input.param("bidirectional", False)
        self.case_number = self._input.param("case_number", 0)
        self._value_size = self._input.param("value_size", 256)
        self.wait_timeout = self._input.param("wait_timeout", 60)
        self._servers = self._input.servers
        self.master = self._servers[0]
        self._failed_nodes = []
        num_buckets = 0
        self.buckets = []
        self.default_bucket = self._input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
            num_buckets += 1
        self._standard_buckets = self._input.param("standard_buckets", 0)
        self._sasl_buckets = self._input.param("sasl_buckets", 0)
        num_buckets += self._standard_buckets + self._sasl_buckets
        self.dgm_run = self._input.param("dgm_run", True)
        self.log = logger.Logger().get_logger()
        self._cluster_helper = Cluster()
        self.disabled_consistent_view = self._input.param("disabled_consistent_view", None)
        self._quota = self._initialize_nodes(self._cluster_helper, self._servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = int((2.0 / 3.0) / float(num_buckets) * float(self._quota))
        self.gen_create = BlobGenerator('loadOne', 'loadOne_', self._value_size, end=self._keys_count)
        self.add_back_flag = False
        self._cleanup_nodes = []
        log.info("==============  setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        RemoteUtilHelper.common_basic_setup(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        for server in self._servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
        self._setup_cluster()
        self._create_buckets_()
        log.info("==============  setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))

    @staticmethod
    def tearDown(self):
        try:
            self._cluster_helper.shutdown()
            log = logger.Logger.get_logger()
            log.info("==============  tearDown was started for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
            RemoteUtilHelper.common_basic_setup(self._servers)
            log.info("10 seconds delay to wait for membase-server to start")
            time.sleep(10)
            for server in self._cleanup_nodes:
                shell = RemoteMachineShellConnection(server)
                o, r = shell.execute_command("iptables -F")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/etc/init.d/couchbase-server start")
                shell.log_command_output(o, r)
                shell.disconnect()
            BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
            ClusterOperationHelper.cleanup_cluster(self._servers)
            ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
            log.info("==============  tearDown was finished for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
        finally:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _setup_cluster(self):
        rest = RestConnection(self.master)
        credentials = self._input.membase_settings
        ClusterOperationHelper.add_all_nodes_or_assert(self.master, self._servers, credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

    def _create_buckets_(self):
        if self.default_bucket:
            self._cluster_helper.create_default_bucket(self.master, self.bucket_size, self._num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
#.........这里部分代码省略.........
开发者ID:mschoch,项目名称:testrunner,代码行数:103,代码来源:failovertests.py

示例13: BaseTestCase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import shutdown [as 别名]
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        #initial number of items in the cluster
        self.nodes_init = self.input.param("nodes_init", 1)

        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
            #rebalance all nodes into the cluster before each test
            self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [])
        elif self.nodes_init > 1:
            self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
            try:
                if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                    and TestInputSingleton.input.param("stop-on-failure", False))\
                        or self.input.param("skip_cleanup", False):
                    self.log.warn("CLEANUP WAS SKIPPED")
                else:
                    self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                    rest = RestConnection(self.master)
                    alerts = rest.get_alerts()
                    if alerts is not None and len(alerts) != 0:
                        self.log.warn("Alerts were found: {0}".format(alerts))
                    if rest._rebalance_progress_status() == 'running':
                        self.log.warning("rebalancing is still running, test should be verified")
                        stopped = rest.stop_rebalance()
                        self.assertTrue(stopped, msg="unable to stop rebalance")
                    BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                    ClusterOperationHelper.cleanup_cluster(self.servers)
                    time.sleep(10)
                    ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                    self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

#.........这里部分代码省略.........
开发者ID:mschoch,项目名称:testrunner,代码行数:103,代码来源:basetestcase.py


注:本文中的couchbase.cluster.Cluster.shutdown方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。