当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.get_buckets方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestConnection.get_buckets方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_buckets方法的具体用法?Python RestConnection.get_buckets怎么用?Python RestConnection.get_buckets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.get_buckets方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_max_buckets

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
    def test_max_buckets(self):
        log = logger.Logger.get_logger()
        serverInfo = self.servers[0]
        log.info('picking server : {0} as the master'.format(serverInfo))
        rest = RestConnection(serverInfo)
        proxyPort = rest.get_nodes_self().moxi
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_ram = 100
        bucket_count = info.mcdMemoryReserved / bucket_ram

        for i in range(bucket_count):
            bucket_name = 'max_buckets-{0}'.format(uuid.uuid4())
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               authType='sasl', proxyPort=proxyPort)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            self.assertTrue(ready, "wait_for_memcached failed")

        buckets = []
        try:
            buckets = rest.get_buckets()
        except Exception:
            log.info('15 seconds sleep before calling get_buckets again...')
            time.sleep(15)
            buckets = rest.get_buckets()
        if len(buckets) != bucket_count:
            msg = 'tried to create {0} buckets, only created {1}'.format(bucket_count, len(buckets))
            log.error(msg)
            self.fail(msg=msg)
开发者ID:Boggypop,项目名称:testrunner,代码行数:34,代码来源:createbuckettests.py

示例2: delete_all_buckets_or_assert

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
 def delete_all_buckets_or_assert(servers, test_case):
     log = logger.Logger.get_logger()
     log.info('deleting existing buckets on {0}'.format(servers))
     for serverInfo in servers:
         rest = RestConnection(serverInfo)
         buckets = []
         try:
             buckets = rest.get_buckets()
         except:
             log.info('15 seconds sleep before calling get_buckets again...')
             time.sleep(15)
             buckets = rest.get_buckets()
         for bucket in buckets:
             status = rest.delete_bucket(bucket.name)
             if not status:
                 try:
                     log.info(StatsCommon.get_stats([serverInfo], bucket.name, "timings"))
                 except:
                     log.error("Unable to get timings for bucket")
             log.info('deleted bucket : {0} from {1}'.format(bucket.name, serverInfo.ip))
             msg = 'bucket "{0}" was not deleted even after waiting for two minutes'.format(bucket.name)
             if test_case:
                 if not BucketOperationHelper.wait_for_bucket_deletion(bucket.name, rest, 200):
                     try:
                         log.info(StatsCommon.get_stats([serverInfo], bucket.name, "timings"))
                     except:
                         log.error("Unable to get timings for bucket")
                     test_case.fail(msg)
开发者ID:mschoch,项目名称:testrunner,代码行数:30,代码来源:bucket_helper.py

示例3: common_setup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
    def common_setup(input, testcase, bucket_ram_ratio=(2.8 / 3.0), replica=0):
        log = logger.Logger.get_logger()
        servers = input.servers
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        ClusterHelper.wait_for_ns_servers_or_assert(servers, testcase)
        serverInfo = servers[0]

        log.info('picking server : {0} as the master'.format(serverInfo))
        #if all nodes are on the same machine let's have the bucket_ram_ratio as bucket_ram_ratio * 1/len(servers)
        node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
        rest = RestConnection(serverInfo)
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=int(info.mcdMemoryReserved * node_ram_ratio))
        if "ascii" in TestInputSingleton.input.test_params\
        and TestInputSingleton.input.test_params["ascii"].lower() == "true":
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=False)
        else:
            BucketOperationHelper.create_multiple_buckets(serverInfo, replica, node_ram_ratio * bucket_ram_ratio,
                                                          howmany=1, sasl=True)
        buckets = rest.get_buckets()
        for bucket in buckets:
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket.name)
            testcase.assertTrue(ready, "wait_for_memcached failed")
开发者ID:jchris,项目名称:testrunner,代码行数:29,代码来源:rebalancingtests.py

示例4: items_verification

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
 def items_verification(test, master):
     rest = RestConnection(master)
     # Verify items count across all node
     timeout = 600
     for bucket in rest.get_buckets():
         verified = RebalanceHelper.wait_till_total_numbers_match(master, bucket.name, timeout_in_seconds=timeout)
         test.assertTrue(verified, "Lost items!!.. failing test in {0} secs".format(timeout))
开发者ID:Boggypop,项目名称:testrunner,代码行数:9,代码来源:swaprebalance.py

示例5: common_tearDown

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
    def common_tearDown(servers, testcase):
        log = logger.Logger.get_logger()
        log.info(
            "==============  common_tearDown was started for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
        RemoteUtilHelper.common_basic_setup(servers)

        log.info("10 seconds delay to wait for couchbase-server to start")
        time.sleep(10)
        ClusterOperationHelper.wait_for_ns_servers_or_assert(
            servers, testcase, wait_time=AutoFailoverBaseTest.MAX_FAIL_DETECT_TIME * 15, wait_if_warmup=True
        )
        try:
            rest = RestConnection(self._servers[0])
            buckets = rest.get_buckets()
            for bucket in buckets:
                MemcachedClientHelper.flush_bucket(servers[0], bucket.name)
        except Exception:
            pass
        BucketOperationHelper.delete_all_buckets_or_assert(servers, testcase)
        ClusterOperationHelper.cleanup_cluster(servers)
        log.info(
            "==============  common_tearDown was finished for test #{0} {1} ==============".format(
                testcase.case_number, testcase._testMethodName
            )
        )
开发者ID:jason-hou,项目名称:testrunner,代码行数:30,代码来源:autofailovertests.py

示例6: _cluster_setup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
    def _cluster_setup(self):
        replicas = self.input.param("replicas", 1)
        keys_count = self.input.param("keys-count", 0)
        num_buckets = self.input.param("num-buckets", 1)

        bucket_name = "default"
        master = self.servers[0]
        credentials = self.input.membase_settings
        rest = RestConnection(self.master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=self.master.rest_username,
                          password=self.master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_and_rebalance(self.servers, True)

        if num_buckets == 1:
            bucket_ram = info.memoryQuota * 2 / 3
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
        else:
            created = BucketOperationHelper.create_multiple_buckets(self.master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")

        buckets = rest.get_buckets()
        for bucket in buckets:
                ready = BucketOperationHelper.wait_for_memcached(self.master, bucket.name)
                self.assertTrue(ready, msg="wait_for_memcached failed")

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(self.master, bucket.name, keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))
开发者ID:arod1987,项目名称:testrunner,代码行数:36,代码来源:autofailovertests.py

示例7: start_access_phase

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
 def start_access_phase(self, master):
     loaders = []
     rest = RestConnection(master)
     for bucket in rest.get_buckets():
         loader = dict()
         loader["mcsoda"] = LoadWithMcsoda(
             master,
             self.keys_count / 2,
             bucket=bucket.name,
             password=bucket.saslPassword,
             prefix=str(bucket.name),
             port=8091,
         )
         loader["mcsoda"].cfg["ratio-sets"] = 0.8
         loader["mcsoda"].cfg["ratio-hot"] = 0.2
         loader["mcsoda"].cfg["ratio-creates"] = 0.5
         loader["mcsoda"].cfg["ratio-deletes"] = self.ratio_deletes
         loader["mcsoda"].cfg["ratio-expirations"] = self.ratio_expiry
         loader["mcsoda"].cfg["json"] = 0
         loader["thread"] = Thread(target=loader["mcsoda"].load_data, name="mcloader_" + bucket.name)
         loader["thread"].daemon = True
         loaders.append(loader)
     for loader in loaders:
         loader["thread"].start()
     return loaders
开发者ID:jason-hou,项目名称:testrunner,代码行数:27,代码来源:swaprebalance.py

示例8: xdcr_start_replication

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
def xdcr_start_replication(src_master, dest_cluster_name, bucketFilter = None):
        rest_conn_src = RestConnection(src_master)
        for bucket in rest_conn_src.get_buckets():
            if bucketFilter is None or bucket.name in bucketFilter:
                rep_id = rest_conn_src.start_replication("continuous",
                                                                         bucket.name, dest_cluster_name)
                logger.error("rep_id: %s" %rep_id)
开发者ID:lichia,项目名称:testrunner,代码行数:9,代码来源:rest_client_tasks.py

示例9: get_vBuckets_info

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
    def get_vBuckets_info(master):
        """
        return state and count items for all vbuckets for each node
        format: dict: {u'1node_ip1': {'vb_79': ['replica', '0'], 'vb_78': ['active', '0']..}, u'1node_ip1':....}
        """
        rest = RestConnection(master)
        port = rest.get_nodes_self().memcached
        nodes = rest.node_statuses()
        _nodes_stats= {}
        for node in nodes:
            stat={}
            buckets = []
            _server = {"ip": node.ip, "port": node.port, "username": master.rest_username,
                           "password": master.rest_password}
            try:
                buckets = rest.get_buckets()
                mc = MemcachedClient(node.ip, port)
                stat_hash = mc.stats("hash")
            except Exception:
                if not buckets:
                    log.error("There are not any buckets in {0}:{1} node".format(node.ip, node.port))
                else:
                    log.error("Impossible to get vBucket's information for {0}:{1} node".format(node.ip, node.port))
                    _nodes_stats[node.ip+":"+str(node.port)]
                continue
            mc.close()
            vb_names=[key[:key.index(":")] for key in stat_hash.keys()]

            for name in vb_names:
                stat[name]=[stat_hash[name + ":state"], stat_hash[name+":counted"]]
            _nodes_stats[node.ip+":"+str(port)] = stat
        log.info(_nodes_stats)
        return _nodes_stats
开发者ID:jchris,项目名称:testrunner,代码行数:35,代码来源:rebalance_helper.py

示例10: _cluster_setup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
    def _cluster_setup(self):
        log = logger.Logger.get_logger()

        replicas = self._input.param("replicas", 1)
        keys_count = self._input.param("keys-count", 0)
        num_buckets = self._input.param("num-buckets", 1)

        bucket_name = "default"
        master = self._servers[0]
        credentials = self._input.membase_settings
        rest = RestConnection(master)
        info = rest.get_nodes_self()
        rest.init_cluster(username=master.rest_username,
                          password=master.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        rest.reset_autofailover()
        ClusterOperationHelper.add_all_nodes_or_assert(master, self._servers, credentials, self)
        bucket_ram = info.memoryQuota * 2 / 3

        if num_buckets == 1:
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               replicaNumber=replicas,
                               proxyPort=info.moxi)
            ready = BucketOperationHelper.wait_for_memcached(master, bucket_name)
            nodes = rest.node_statuses()
            rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])
            buckets = rest.get_buckets()
        else:
            created = BucketOperationHelper.create_multiple_buckets(master, replicas, howmany=num_buckets)
            self.assertTrue(created, "unable to create multiple buckets")
            buckets = rest.get_buckets()
            for bucket in buckets:
                ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
                self.assertTrue(ready, msg="wait_for_memcached failed")
                nodes = rest.node_statuses()
                rest.rebalance(otpNodes=[node.id for node in nodes], ejectedNodes=[])

#        self.load_data(master, bucket_name, keys_count)

        for bucket in buckets:
            inserted_keys_cnt = self.load_data(master, bucket.name, keys_count)
            log.info('inserted {0} keys'.format(inserted_keys_cnt))

        msg = "rebalance failed after adding these nodes {0}".format(nodes)
        self.assertTrue(rest.monitorRebalance(), msg=msg)
        self.assertTrue(ready, "wait_for_memcached failed")
开发者ID:IrynaMironava,项目名称:testrunner,代码行数:49,代码来源:autofailovertests.py

示例11: test_max_buckets

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
    def test_max_buckets(self):
        log = logger.Logger.get_logger()
        serverInfo = self.servers[0]
        log.info('picking server : {0} as the master'.format(serverInfo))
        rest = RestConnection(serverInfo)
        proxyPort = rest.get_nodes_self().moxi
        info = rest.get_nodes_self()
        rest.init_cluster(username=serverInfo.rest_username,
                          password=serverInfo.rest_password)
        rest.init_cluster_memoryQuota(memoryQuota=info.mcdMemoryReserved)
        bucket_num = rest.get_internalSettings("maxBucketCount")
        bucket_ram = 100
        testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
                                                'password': 'password'}]
        rolelist = [{'id': 'cbadminbucket', 'name': 'cbadminbucket',
                                                      'roles': 'admin'}]
        RbacBase().create_user_source(testuser, 'builtin', self.master)
        RbacBase().add_user_role(rolelist, RestConnection(self.master), 'builtin')



        for i in range(bucket_num):
            bucket_name = 'max_buckets-{0}'.format(uuid.uuid4())
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               authType='sasl', proxyPort=proxyPort)
            ready = BucketOperationHelper.wait_for_memcached(serverInfo, bucket_name)
            self.assertTrue(ready, "wait_for_memcached failed")

        buckets = rest.get_buckets()
        if len(buckets) != bucket_num:
            msg = 'tried to create {0} buckets, only created {1}'.format(bucket_count, len(buckets))
            self.fail(msg)
        try:
            rest.create_bucket(bucket=bucket_name,
                               ramQuotaMB=bucket_ram,
                               authType='sasl', proxyPort=proxyPort)
            msg = 'bucket creation did not fail even though system was overcommited'
            self.fail(msg)
        except BucketCreationException as ex:
            self.log.info('BucketCreationException was thrown as expected when we try to create {0} buckets'.
                          format(bucket_num + 1))
        buckets = rest.get_buckets()
        if len(buckets) != bucket_num:
            msg = 'tried to create {0} buckets, only created {1}'.format(bucket_num + 1, len(buckets))
            self.fail(msg)
开发者ID:arod1987,项目名称:testrunner,代码行数:48,代码来源:createbuckettests.py

示例12: test_rebalance_in

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
 def test_rebalance_in(self):
     log = logger.Logger().get_logger()
     master = self._servers[0]
     num_of_docs = TestInputSingleton.input.param("num_of_docs", 100000)
     replica = TestInputSingleton.input.param("replica", 100000)
     add_items_count = TestInputSingleton.input.param("num_of_creates", 30000)
     rebalance_in = TestInputSingleton.input.param("rebalance_in", 1)
     size = TestInputSingleton.input.param("item_size", 256)
     params = {"sizes": [size], "count": num_of_docs, "seed": str(uuid.uuid4())[:7]}
     RebalanceBaseTest.common_setup(self._input, self, replica=1)
     rest = RestConnection(master)
     buckets = rest.get_buckets()
     bucket_data = {}
     generators = {}
     for bucket in buckets:
         bucket_data[bucket.name] = {"kv_store": ClientKeyValueStore()}
     while len(rest.node_statuses()) < len(self._servers):
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             add_items_seed = str(uuid.uuid4())[:7]
             self._add_items(add_items_seed, bucket, add_items_count, kv_store)
             errors = RebalanceDataGenerator.do_verification(kv_store, rest, bucket.name)
             if errors:
                 log.error("verification returned {0} errors".format(len(errors)))
             load_set_ops = {"ops": "set", "bucket": bucket.name}
             load_set_ops.update(params)
             load_delete_ops = {
                 "ops": "delete",
                 "bucket": bucket.name,
                 "sizes": [size],
                 "count": add_items_count / 5,
                 "seed": add_items_seed,
             }
             thread = RebalanceDataGenerator.start_load(
                 rest, bucket.name, RebalanceDataGenerator.create_loading_tasks(load_set_ops), kv_store
             )
             generators["set"] = {"thread": thread}
             # restart three times
             generators["set"]["thread"].start()
             thread = RebalanceDataGenerator.start_load(
                 rest, bucket.name, RebalanceDataGenerator.create_loading_tasks(load_delete_ops), kv_store
             )
             generators["delete"] = {"thread": thread}
             generators["delete"]["thread"].start()
         self.log.info("current nodes : {0}".format([node.id for node in rest.node_statuses()]))
         rebalanced_in, which_servers = RebalanceBaseTest.rebalance_in(self._servers, rebalance_in)
         self.assertTrue(rebalanced_in, msg="unable to add and rebalance more nodes")
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             errors = RebalanceDataGenerator.do_verification(kv_store, rest, bucket.name)
             if errors:
                 log.error("verification returned {0} errors".format(len(errors)))
         generators["set"]["thread"].join()
         generators["delete"]["thread"].join()
         for bucket in buckets:
             kv_store = bucket_data[bucket.name]["kv_store"]
             bucket_data[bucket.name]["items_inserted_count"] = len(kv_store.valid_items())
             RebalanceBaseTest.replication_verification(master, bucket_data, replica, self)
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:60,代码来源:rebalancingtests.py

示例13: _parallel_read

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
 def _parallel_read(self):
     rest = RestConnection(self.master)
     buckets = rest.get_buckets()
     while not self.reader_shutdown:
         for bucket in buckets:
             name = bucket.name.encode("ascii", "ignore")
             mc = MemcachedClientHelper.direct_client(self.master, name)
             for key in self.bucket_data[name]["inserted_keys"]:
                 mc.get(key)
开发者ID:arod1987,项目名称:testrunner,代码行数:11,代码来源:drainratetests.py

示例14: xdcr_start_replication

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
def xdcr_start_replication(src_master, dest_cluster_name, bucket_name, xdcr_params):
    rest_conn_src = RestConnection(src_master)
    for bucket in rest_conn_src.get_buckets():
        if bucket.name == bucket_name:
            rep_id = rest_conn_src.start_replication("continuous",
                                                        bucket.name,
                                                        dest_cluster_name,
                                                        xdcr_params=xdcr_params)
            logger.error("rep_id: %s" %rep_id)
开发者ID:EricACooper,项目名称:testrunner,代码行数:11,代码来源:rest_client_tasks.py

示例15: create_buckets

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_buckets [as 别名]
 def create_buckets(servers, testcase, howmany=1, replica=1, bucket_ram_ratio=(2.0 / 3.0)):
     node_ram_ratio = BucketOperationHelper.base_bucket_ratio(servers)
     master = servers[0]
     BucketOperationHelper.create_multiple_buckets(master, replica, node_ram_ratio * bucket_ram_ratio, howmany=howmany)
     rest = RestConnection(master)
     buckets = rest.get_buckets()
     for bucket in buckets:
         ready = BucketOperationHelper.wait_for_memcached(master, bucket.name)
         testcase.assertTrue(ready, "wait_for_memcached failed")
开发者ID:vmx,项目名称:testrunner,代码行数:11,代码来源:xdcr.py


注:本文中的membase.api.rest_client.RestConnection.get_buckets方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。