当前位置: 首页>>代码示例>>Python>>正文


Python Cluster.create_default_bucket方法代码示例

本文整理汇总了Python中couchbase.cluster.Cluster.create_default_bucket方法的典型用法代码示例。如果您正苦于以下问题:Python Cluster.create_default_bucket方法的具体用法?Python Cluster.create_default_bucket怎么用?Python Cluster.create_default_bucket使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在couchbase.cluster.Cluster的用法示例。


在下文中一共展示了Cluster.create_default_bucket方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: BaseTestCase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import create_default_bucket [as 别名]
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.cluster = Cluster()
        self.servers = self.input.servers
        self.buckets = {}

        self.default_bucket = self.input.param("default_bucket", True)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)

        if not self.input.param("skip_cleanup", False):
            BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
            for server in self.servers:
                ClusterOperationHelper.cleanup_cluster([server])
            ClusterOperationHelper.wait_for_ns_servers_or_assert([self.servers[0]], self)

        self.quota = self._initialize_nodes(self.cluster, self.servers)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.servers[0], self.bucket_size, self.num_replicas)
            self.buckets['default'] = {1 : KVStore()}
        self._create_sasl_buckets(self.servers[0], self.sasl_buckets)
        # TODO (Mike): Create Standard buckets

    def tearDown(self):
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        ClusterOperationHelper.cleanup_cluster(self.servers)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
        self.buckets = {}
        self.cluster.shutdown()

    def _initialize_nodes(self, cluster, servers):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _get_bucket_size(self, quota, num_buckets, ratio=2.0/3.0):
        ip = self.servers[0]
        for server in self.servers:
            if server.ip == ip:
                return int(ratio / float(self.num_servers) / float(num_buckets) * float(quota))
        return int(ratio / float(num_buckets) * float(quota))

    def _create_sasl_buckets(self, server, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.cluster.async_create_sasl_bucket(server, name,
                                                                      'password',
                                                                      self.bucket_size,
                                                                      self.num_replicas))
            self.buckets[name] = {1 : KVStore()}
        for task in bucket_tasks:
            task.result()

    def _verify_stats_all_buckets(self, servers):
        stats_tasks = []
        for bucket, kv_stores in self.buckets.items():
            items = sum([len(kv_store) for kv_store in kv_stores.values()])
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'curr_items', '==', items))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                               'vb_active_curr_items', '==', items))

            available_replicas = self.num_replicas
            if len(servers) == self.num_replicas:
                available_replicas = len(servers) - 1
            elif len(servers) <= self.num_replicas:
                available_replicas = len(servers) - 1

            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'vb_replica_curr_items', '==', items * available_replicas))
            stats_tasks.append(self.cluster.async_wait_for_stats(servers, bucket, '',
                                   'curr_items_tot', '==', items * (available_replicas + 1)))

        for task in stats_tasks:
            task.result(60)


    """Asynchronously applys load generation to all bucekts in the cluster.

    Args:
        server - A server in the cluster. (TestInputServer)
        kv_gen - The generator to use to generate load. (DocumentGenerator)
#.........这里部分代码省略.........
开发者ID:paul-guo-,项目名称:appstack,代码行数:103,代码来源:basetestcase.py

示例2: XDCRBaseTest

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import create_default_bucket [as 别名]

#.........这里部分代码省略.........
        self._sasl_buckets_int = self._input.param("sasl_buckets", 0)
        self._total_buckets_int = self._sasl_buckets_int + self._default_bucket_bool + self._standard_buckets_int

        #self.num_servers = self._input.param("servers", len(self.servers))
        self._num_replicas_int = self._input.param("replicas", 1)
        self._num_items_int = self._input.param("items", 1000)
        self._dgm_run_bool = self._input.param("dgm_run", False)

        self._mem_quota_int = 0 # will be set in subsequent methods

        self.init_parameters_extended()

        self._log.info("Initializing input parameters completed...")


    def _init_clusters(self):
        for key in self._clusters_keys_olst:
            self._set_cluster_names(key)
            self._setup_cluster(self._clusters_dic[key])


    # This method shall be overridden in case there are parameters that need to be initialized.
    def init_parameters_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during setup.
    def setup_extended(self):
        pass

    # This method shall be overridden in case there are custom steps involved during teardown.
    def teardown_extended(self):
        pass

    def _do_cleanup(self):
        for key in self._clusters_keys_olst:
            nodes = self._clusters_dic[key]
            BucketOperationHelper.delete_all_buckets_or_assert(nodes, self)
            ClusterOperationHelper.cleanup_cluster(nodes)
            ClusterOperationHelper.wait_for_ns_servers_or_assert(nodes, self)

    def _cleanup_broken_setup(self):
        try:
            self.tearDown()
        except:
            self._log.info("Error while cleaning broken setup.")

    def _set_cluster_names(self, key):
        self._cluster_counter_temp_int += 1
        self._cluster_names_dic[key] = "cluster-{0}".format(self._cluster_counter_temp_int)

    def _setup_cluster(self, nodes):
        self._init_nodes(nodes)
        self._create_buckets(nodes)
        self._config_cluster(nodes)

    def _init_nodes(self, nodes):
        _tasks = []
        for node in nodes:
            _tasks.append(self._cluster_helper.async_init_node(node))
        for task in _tasks:
            mem_quota_node = task.result()
            if mem_quota_node < self._mem_quota_int or self._mem_quota_int == 0:
                self._mem_quota_int = mem_quota_node

    def _create_buckets(self, nodes):
        master_node = nodes[0]
        if self._dgm_run_bool:
            self._mem_quota_int = 256
        bucket_size = self._get_bucket_size(master_node, nodes, self._mem_quota_int, self._total_buckets_int)
        if self._default_bucket_bool:
            self._cluster_helper.create_default_bucket(master_node, bucket_size, self._num_replicas_int)
            #self._buckets_arr['default'] = {1 : KVStore()} # - not sure abou this
        self._create_sasl_buckets(master_node, bucket_size, self._sasl_buckets_int)
        # TODO (Mike): Create Standard buckets    

    def _create_sasl_buckets(self, master_node, bucket_size, num_buckets):
        bucket_tasks = []
        for i in range(num_buckets):
            name = 'bucket' + str(i)
            bucket_tasks.append(self.self.cluster_helper.async_create_sasl_bucket(master_node, name,
                                                                                  'password', bucket_size,
                                                                                  self._num_replicas_int))

            task.result()

    def _config_cluster(self, nodes):
        task = self._cluster_helper.async_rebalance(nodes, nodes[1:], [])
            #self._buckets_arr[name] = {1 : KVStore()}
        for task in bucket_tasks:
            task.result()

    def _config_cluster(self, nodes):
        task = self._cluster_helper.async_rebalance(nodes, nodes[1:], [])
        task.result()

    def _get_bucket_size(self, master_node, nodes, mem_quota, num_buckets, ratio = 2.0 / 3.0):
        for node in nodes:
            if node.ip == master_node.ip:
                return int(ratio / float(len(nodes)) / float(num_buckets) * float(mem_quota))
        return int(ratio / float(num_buckets) * float(mem_quota))
开发者ID:jchris,项目名称:testrunner,代码行数:104,代码来源:xdcrbasetests.py

示例3: BaseTestCase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import create_default_bucket [as 别名]
class BaseTestCase(unittest.TestCase):

    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
        if not self.input.param("skip_cleanup", False):
            try:
                self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                rest = RestConnection(self.master)
                alerts = rest.get_alerts()
                if alerts is not None and len(alerts) != 0:
                    self.log.warn("Alerts were found: {0}".format(alerts))
                if rest._rebalance_progress_status() == 'running':
                    self.log.warning("rebalancing is still running, test should be verified")
                    stopped = rest.stop_rebalance()
                    self.assertTrue(stopped, msg="unable to stop rebalance")
                BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                ClusterOperationHelper.cleanup_cluster(self.servers)
                time.sleep(10)
                ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

#.........这里部分代码省略.........
开发者ID:ronniedada,项目名称:testrunner,代码行数:103,代码来源:basetestcase.py

示例4: CheckpointTests

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import create_default_bucket [as 别名]
class CheckpointTests(unittest.TestCase):

    def setUp(self):
        self.cluster = Cluster()

        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.num_servers = self.input.param("servers", 1)

        master = self.servers[0]
        num_replicas = self.input.param("replicas", 1)
        self.bucket = 'default'

        # Start: Should be in a before class function
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        for server in self.servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterOperationHelper.wait_for_ns_servers_or_assert([master], self)
        # End: Should be in a before class function

        self.quota = self.cluster.init_node(master)
        self.old_vbuckets = self._get_vbuckets(master)
        ClusterOperationHelper.set_vbuckets(master, 1)
        self.cluster.create_default_bucket(master, self.quota, num_replicas)
        self.cluster.rebalance(self.servers[:self.num_servers],
                               self.servers[1:self.num_servers], [])

    def tearDown(self):
        master = self.servers[0]
        ClusterOperationHelper.set_vbuckets(master, self.old_vbuckets)
        rest = RestConnection(master)
        rest.stop_rebalance()
        self.cluster.rebalance(self.servers[:self.num_servers], [],
                               self.servers[1:self.num_servers])
        self.cluster.bucket_delete(master, self.bucket)
        self.cluster.shutdown()

    def checkpoint_create_items(self):
        param = 'checkpoint'
        stat_key = 'vb_0:open_checkpoint_id'
        num_items = 6000

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, '5000')
        chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers], self.bucket,
                                          param, stat_key)
        load_thread = self.generate_load(master, self.bucket, num_items)
        load_thread.join()
        tasks = []
        for server, value in chk_stats.items():
            tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param,
                                                           stat_key, '>', value))
        for task in tasks:
            try:
                timeout = 30 if (num_items * .001) < 30 else num_items * .001
                task.result(timeout)
            except TimeoutError:
                self.fail("New checkpoint not created")

    def checkpoint_create_time(self):
        param = 'checkpoint'
        timeout = 60
        stat_key = 'vb_0:open_checkpoint_id'

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(timeout))
        chk_stats = StatsCommon.get_stats(self.servers[:self.num_servers], self.bucket,
                                          param, stat_key)
        load_thread = self.generate_load(master, self.bucket, 1)
        load_thread.join()
        log.info("Sleeping for {0} seconds)".format(timeout))
        time.sleep(timeout)
        tasks = []
        for server, value in chk_stats.items():
            tasks.append(self.cluster.async_wait_for_stats([server], self.bucket, param,
                                                           stat_key, '>', value))
        for task in tasks:
            try:
                task.result(60)
            except TimeoutError:
                self.fail("New checkpoint not created")
        self._set_checkpoint_timeout(self.servers[:self.num_servers], self.bucket, str(600))

    def checkpoint_collapse(self):
        param = 'checkpoint'
        chk_size = 5000
        num_items = 25000
        stat_key = 'vb_0:last_closed_checkpoint_id'
        stat_chk_itms = 'vb_0:num_checkpoint_items'

        master = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, ACTIVE)
        slave1 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA1)
        slave2 = self._get_server_by_state(self.servers[:self.num_servers], self.bucket, REPLICA2)
        self._set_checkpoint_size(self.servers[:self.num_servers], self.bucket, str(chk_size))
        m_stats = StatsCommon.get_stats([master], self.bucket, param, stat_key)
        self._stop_replication(slave2, self.bucket)
        load_thread = self.generate_load(master, self.bucket, num_items)
        load_thread.join()

        tasks = []
#.........这里部分代码省略.........
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:103,代码来源:checkpoint.py

示例5: FailoverBaseTest

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import create_default_bucket [as 别名]
class FailoverBaseTest(unittest.TestCase):

    @staticmethod
    def setUp(self):
        log = logger.Logger.get_logger()
        self._input = TestInputSingleton.input
        self._keys_count = self._input.param("keys_count", DEFAULT_KEY_COUNT)
        self._num_replicas = self._input.param("replica", DEFAULT_REPLICA)
        self.bidirectional = self._input.param("bidirectional", False)
        self.case_number = self._input.param("case_number", 0)
        self._value_size = self._input.param("value_size", 256)
        self.wait_timeout = self._input.param("wait_timeout", 60)
        self._servers = self._input.servers
        self.master = self._servers[0]
        self._failed_nodes = []
        num_buckets = 0
        self.buckets = []
        self.default_bucket = self._input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
            num_buckets += 1
        self._standard_buckets = self._input.param("standard_buckets", 0)
        self._sasl_buckets = self._input.param("sasl_buckets", 0)
        num_buckets += self._standard_buckets + self._sasl_buckets
        self.dgm_run = self._input.param("dgm_run", True)
        self.log = logger.Logger().get_logger()
        self._cluster_helper = Cluster()
        self.disabled_consistent_view = self._input.param("disabled_consistent_view", None)
        self._quota = self._initialize_nodes(self._cluster_helper, self._servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        self.bucket_size = int((2.0 / 3.0) / float(num_buckets) * float(self._quota))
        self.gen_create = BlobGenerator('loadOne', 'loadOne_', self._value_size, end=self._keys_count)
        self.add_back_flag = False
        self._cleanup_nodes = []
        log.info("==============  setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        RemoteUtilHelper.common_basic_setup(self._servers)
        BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
        for server in self._servers:
            ClusterOperationHelper.cleanup_cluster([server])
        ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
        self._setup_cluster()
        self._create_buckets_()
        log.info("==============  setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))

    @staticmethod
    def tearDown(self):
        try:
            self._cluster_helper.shutdown()
            log = logger.Logger.get_logger()
            log.info("==============  tearDown was started for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
            RemoteUtilHelper.common_basic_setup(self._servers)
            log.info("10 seconds delay to wait for membase-server to start")
            time.sleep(10)
            for server in self._cleanup_nodes:
                shell = RemoteMachineShellConnection(server)
                o, r = shell.execute_command("iptables -F")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/sbin/iptables -A INPUT -p tcp -i eth0 --dport 1000:60000 -j ACCEPT")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/sbin/iptables -A OUTPUT -p tcp -o eth0 --dport 1000:60000 -j ACCEPT")
                shell.log_command_output(o, r)
                o, r = shell.execute_command("/etc/init.d/couchbase-server start")
                shell.log_command_output(o, r)
                shell.disconnect()
            BucketOperationHelper.delete_all_buckets_or_assert(self._servers, self)
            ClusterOperationHelper.cleanup_cluster(self._servers)
            ClusterHelper.wait_for_ns_servers_or_assert(self._servers, self)
            log.info("==============  tearDown was finished for test #{0} {1} =============="\
                              .format(self.case_number, self._testMethodName))
        finally:
            pass

    def _initialize_nodes(self, cluster, servers, disabled_consistent_view=None):
        quota = 0
        init_tasks = []
        for server in servers:
            init_tasks.append(cluster.async_init_node(server, disabled_consistent_view))
        for task in init_tasks:
            node_quota = task.result()
            if node_quota < quota or quota == 0:
                quota = node_quota
        return quota

    def _setup_cluster(self):
        rest = RestConnection(self.master)
        credentials = self._input.membase_settings
        ClusterOperationHelper.add_all_nodes_or_assert(self.master, self._servers, credentials, self)
        nodes = rest.node_statuses()
        rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)

    def _create_buckets_(self):
        if self.default_bucket:
            self._cluster_helper.create_default_bucket(self.master, self.bucket_size, self._num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
#.........这里部分代码省略.........
开发者ID:mschoch,项目名称:testrunner,代码行数:103,代码来源:failovertests.py

示例6: BaseTestCase

# 需要导入模块: from couchbase.cluster import Cluster [as 别名]
# 或者: from couchbase.cluster.Cluster import create_default_bucket [as 别名]
class BaseTestCase(unittest.TestCase):
    def setUp(self):
        self.log = logger.Logger.get_logger()
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.buckets = []
        self.master = self.servers[0]
        self.cluster = Cluster()
        self.wait_timeout = self.input.param("wait_timeout", 60)
        #number of case that is performed from testrunner( increment each time)
        self.case_number = self.input.param("case_number", 0)
        self.default_bucket = self.input.param("default_bucket", True)
        if self.default_bucket:
            self.default_bucket_name = "default"
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.total_buckets = self.sasl_buckets + self.default_bucket + self.standard_buckets
        self.num_servers = self.input.param("servers", len(self.servers))
        #initial number of items in the cluster
        self.nodes_init = self.input.param("nodes_init", 1)

        self.num_replicas = self.input.param("replicas", 1)
        self.num_items = self.input.param("items", 1000)
        self.dgm_run = self.input.param("dgm_run", False)
        #max items number to verify in ValidateDataTask, None - verify all
        self.max_verify = self.input.param("max_verify", None)
        #we don't change consistent_view on server by default
        self.disabled_consistent_view = self.input.param("disabled_consistent_view", None)
        self.log.info("==============  basetestcase setup was started for test #{0} {1}=============="\
                      .format(self.case_number, self._testMethodName))
        #avoid clean up if the previous test has been tear down
        if not self.input.param("skip_cleanup", True) or self.case_number == 1:
            self.tearDown()
            self.cluster = Cluster()
        if str(self.__class__).find('rebalanceout.RebalanceOutTests') != -1:
            #rebalance all nodes into the cluster before each test
            self.cluster.rebalance(self.servers[:self.num_servers], self.servers[1:self.num_servers], [])
        elif self.nodes_init > 1:
            self.cluster.rebalance(self.servers[:1], self.servers[1:self.nodes_init], [])
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        if self.dgm_run:
            self.quota = 256
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        self.log.info("==============  basetestcase setup was finished for test #{0} {1} =============="\
                      .format(self.case_number, self._testMethodName))
        self._log_start(self)

    def tearDown(self):
            try:
                if (hasattr(self, '_resultForDoCleanups') and len(self._resultForDoCleanups.failures) > 0 \
                    and TestInputSingleton.input.param("stop-on-failure", False))\
                        or self.input.param("skip_cleanup", False):
                    self.log.warn("CLEANUP WAS SKIPPED")
                else:
                    self.log.info("==============  basetestcase cleanup was started for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
                    rest = RestConnection(self.master)
                    alerts = rest.get_alerts()
                    if alerts is not None and len(alerts) != 0:
                        self.log.warn("Alerts were found: {0}".format(alerts))
                    if rest._rebalance_progress_status() == 'running':
                        self.log.warning("rebalancing is still running, test should be verified")
                        stopped = rest.stop_rebalance()
                        self.assertTrue(stopped, msg="unable to stop rebalance")
                    BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
                    ClusterOperationHelper.cleanup_cluster(self.servers)
                    time.sleep(10)
                    ClusterOperationHelper.wait_for_ns_servers_or_assert(self.servers, self)
                    self.log.info("==============  basetestcase cleanup was finished for test #{0} {1} =============="\
                          .format(self.case_number, self._testMethodName))
            finally:
                #stop all existing task manager threads
                self.cluster.shutdown()
                self._log_finish(self)

    @staticmethod
    def _log_start(self):
        try:
            msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

    @staticmethod
    def _log_finish(self):
        try:
            msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
            RestConnection(self.servers[0]).log_client_error(msg)
        except:
            pass

#.........这里部分代码省略.........
开发者ID:mschoch,项目名称:testrunner,代码行数:103,代码来源:basetestcase.py


注:本文中的couchbase.cluster.Cluster.create_default_bucket方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。