当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.create_bucket方法代码示例

本文整理汇总了Python中lib.membase.api.rest_client.RestConnection.create_bucket方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.create_bucket方法的具体用法?Python RestConnection.create_bucket怎么用?Python RestConnection.create_bucket使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在lib.membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.create_bucket方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_xdcr_and_indexing_with_eventing

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import create_bucket [as 别名]
 def test_xdcr_and_indexing_with_eventing(self):
     rest_src = RestConnection(self.servers[0])
     rest_dst = RestConnection(self.servers[2])
     self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
     self.n1ql_helper = N1QLHelper(shell=self.shell,
                                   max_verify=self.max_verify,
                                   buckets=self.buckets,
                                   item_flag=self.item_flag,
                                   n1ql_port=self.n1ql_port,
                                   full_docs_list=self.full_docs_list,
                                   log=self.log, input=self.input,
                                   master=self.master,
                                   use_rest=True
                                   )
     self.n1ql_helper.create_primary_index(using_gsi=True, server=self.n1ql_node)
     try:
         rest_src.remove_all_replications()
         rest_src.remove_all_remote_clusters()
         rest_src.add_remote_cluster(self.servers[2].ip, self.servers[2].port, self.servers[0].rest_username,
                                     self.servers[0].rest_password, "C2")
         rest_dst.create_bucket(bucket=self.src_bucket_name, ramQuotaMB=100)
         # setup xdcr relationship
         repl_id = rest_src.start_replication('continuous', self.src_bucket_name, "C2")
         if repl_id is not None:
             self.log.info("Replication created successfully")
         self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
                   batch_size=self.batch_size)
         body = self.create_save_function_body(self.function_name, HANDLER_CODE.DELETE_BUCKET_OP_ON_DELETE)
         # deploy function
         self.deploy_function(body)
         # Wait for eventing to catch up with all the update mutations and verify results
         self.verify_eventing_results(self.function_name, self.docs_per_day * 2016)
         stats_xdcr_dst = rest_dst.get_bucket_stats(self.src_bucket_name)
         index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(self.buckets, self.n1ql_node)
         actual_count = index_bucket_map[self.src_bucket_name]
         log.info("No of docs in xdcr destination bucket : {0}".format(stats_xdcr_dst["curr_items"]))
         log.info("No of docs indexed by primary index: {0}".format(actual_count))
         if stats_xdcr_dst["curr_items"] != self.docs_per_day * 2016:
             self.fail("xdcr did not replicate all documents, actual : {0} expected : {1}".format(
                 stats_xdcr_dst["curr_items"], self.docs_per_day * 2016))
         if actual_count != self.docs_per_day * 2016:
             self.fail("Not all the items were indexed, actual : {0} expected : {1}".format(
                 actual_count, self.docs_per_day * 2016))
         # delete all documents
         self.load(self.gens_load, buckets=self.src_bucket, flag=self.item_flag, verify_data=False,
                   batch_size=self.batch_size, op_type='delete')
         # Wait for eventing to catch up with all the delete mutations and verify results
         self.verify_eventing_results(self.function_name, 0, skip_stats_validation=True)
         stats_xdcr_dst = rest_dst.get_bucket_stats(self.src_bucket_name)
         index_bucket_map = self.n1ql_helper.get_index_count_using_primary_index(self.buckets, self.n1ql_node)
         actual_count = index_bucket_map[self.src_bucket_name]
         log.info("No of docs in xdcr destination bucket : {0}".format(stats_xdcr_dst["curr_items"]))
         log.info("No of docs indexed by primary index: {0}".format(actual_count))
         if stats_xdcr_dst["curr_items"] != 0:
             self.fail("xdcr did not replicate all documents, actual : {0} expected : {1}".format(
                 stats_xdcr_dst["curr_items"], 0))
         if actual_count != 0:
             self.fail("Not all the items were indexed, actual : {0} expected : {1}".format(actual_count, 0))
         self.undeploy_and_delete_function(body)
     finally:
         self.n1ql_helper.drop_primary_index(using_gsi=True, server=self.n1ql_node)
         rest_dst.delete_bucket()
开发者ID:membase,项目名称:testrunner,代码行数:64,代码来源:eventing_concurrency.py

示例2: test_cbcollect_with_redaction_enabled_with_xdcr

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import create_bucket [as 别名]
    def test_cbcollect_with_redaction_enabled_with_xdcr(self):
        rest_src = RestConnection(self.master)
        rest_src.remove_all_replications()
        rest_src.remove_all_remote_clusters()

        rest_dest = RestConnection(self.servers[1])
        rest_dest_helper = RestHelper(rest_dest)

        try:
            rest_src.remove_all_replications()
            rest_src.remove_all_remote_clusters()
            self.set_redaction_level()
            rest_src.add_remote_cluster(self.servers[1].ip, self.servers[1].port,
                                        self.servers[1].rest_username,
                                        self.servers[1].rest_password, "C2")

            """ at dest cluster """
            self.add_built_in_server_user(node=self.servers[1])
            rest_dest.create_bucket(bucket='default', ramQuotaMB=512)
            bucket_ready = rest_dest_helper.vbucket_map_ready('default')
            if not bucket_ready:
                self.fail("Bucket default at dest not created after 120 seconds.")
            repl_id = rest_src.start_replication('continuous', 'default', "C2")
            if repl_id is not None:
                self.log.info("Replication created successfully")
            gen = BlobGenerator("ent-backup", "ent-backup-", self.value_size, end=self.num_items)
            tasks = self._async_load_all_buckets(self.master, gen, "create", 0)
            for task in tasks:
                task.result()
            self.sleep(10)

            """ enable firewall """
            if self.interrupt_replication:
                RemoteUtilHelper.enable_firewall(self.master, xdcr=True)

            """ start collect logs """
            self.start_logs_collection()
            result = self.monitor_logs_collection()
            """ verify logs """
            try:
                logs_path = result["perNode"]["[email protected]" + str(self.master.ip)]["path"]
            except KeyError:
                logs_path = result["perNode"]["[email protected]"]["path"]
            redactFileName = logs_path.split('/')[-1]
            nonredactFileName = logs_path.split('/')[-1].replace('-redacted', '')
            remotepath = logs_path[0:logs_path.rfind('/')+1]
            self.verify_log_files_exist(remotepath=remotepath,
                                    redactFileName=redactFileName,
                                    nonredactFileName=nonredactFileName)
            self.log.info("Verify on log ns_server.goxdcr.log")
            self.verify_log_redaction(remotepath=remotepath,
                                  redactFileName=redactFileName,
                                  nonredactFileName=nonredactFileName,
                                  logFileName="ns_server.goxdcr.log")
        finally:
            """ clean up xdcr """
            rest_dest.delete_bucket()
            rest_src.remove_all_replications()
            rest_src.remove_all_remote_clusters()
            if self.interrupt_replication:
                shell = RemoteMachineShellConnection(self.master)
                shell.disable_firewall()
                shell.disconnect()
开发者ID:arod1987,项目名称:testrunner,代码行数:65,代码来源:log_redaction_tests.py

示例3: PerfBase

# 需要导入模块: from lib.membase.api.rest_client import RestConnection [as 别名]
# 或者: from lib.membase.api.rest_client.RestConnection import create_bucket [as 别名]

#.........这里部分代码省略.........

    def _get_bucket_names(self, num_buckets):
        """
        Get a list of bucket names
        """
        if num_buckets > 1:
            buckets = ['bucket-{0}'.format(i) for i in range(num_buckets)]
        else:
            buckets = [self.param('bucket', 'default')]

        return buckets

    def get_bucket_conf(self):
        """ retrieve bucket configurations"""

        num_buckets = max(self.parami('num_buckets', 1),
                          self.parami('xdcr_num_buckets', 1))
        self.buckets = self._get_bucket_names(num_buckets)

    def set_up_buckets(self):
        """Set up data bucket(s)"""

        self.log.info("setting up buckets")

        self.get_bucket_conf()

        for bucket in self.buckets:
            bucket_ram_quota = self.parami('mem_quota', PerfDefaults.mem_quota)
            bucket_ram_quota /= max(self.parami('num_buckets', 1),
                                    self.parami('xdcr_num_buckets', 1))
            replicas = self.parami('replicas', getattr(self, 'replicas', 1))
            index_replicas = self.parami('index_replicas', 1)

            self.rest.create_bucket(bucket=bucket, ramQuotaMB=bucket_ram_quota,
                                    replicaNumber=replicas, authType='sasl',
                                    replica_index=index_replicas)

            status = self.rest_helper.vbucket_map_ready(bucket, 60)
            self.assertTrue(status, msg='vbucket_map not ready .. timed out')
            status = self.rest_helper.bucket_exists(bucket)
            self.assertTrue(status,
                            msg='unable to create {0} bucket'.format(bucket))

    def reconfigure(self):
        """Customize basic Couchbase setup"""
        self.log.info("customizing setup")

        self.set_loglevel()
        self.customize_xdcr_settings()
        self.set_autocompaction()
        self.set_exp_pager_stime()
        self.set_rebalance_options()

    def set_rebalance_options(self):
        # rebalanceMovesBeforeCompaction
        rmbc = self.parami('rebalance_moves_before_compaction', 0)
        if rmbc:
            cmd = 'ns_config:set(rebalance_moves_before_compaction, {0}).'\
                .format(rmbc)
            self.rest.diag_eval(cmd)

    def set_exp_pager_stime(self):
        exp_pager_stime = self.param('exp_pager_stime',
                                     PerfDefaults.exp_pager_stime)
        if exp_pager_stime != PerfDefaults.exp_pager_stime:
            self.set_ep_param('flush_param', 'exp_pager_stime', exp_pager_stime)
开发者ID:,项目名称:,代码行数:70,代码来源:


注:本文中的lib.membase.api.rest_client.RestConnection.create_bucket方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。