当前位置: 首页>>代码示例>>Python>>正文


Python RestHelper.is_ns_server_running方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestHelper.is_ns_server_running方法的典型用法代码示例。如果您正苦于以下问题:Python RestHelper.is_ns_server_running方法的具体用法?Python RestHelper.is_ns_server_running怎么用?Python RestHelper.is_ns_server_running使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestHelper的用法示例。


在下文中一共展示了RestHelper.is_ns_server_running方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: cleanup_cluster

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import is_ns_server_running [as 别名]
    def cleanup_cluster(servers, wait_for_rebalance=True, master = None):
        log = logger.Logger.get_logger()
        if master == None:
            master = servers[0]
        rest = RestConnection(master)
        helper = RestHelper(rest)
        helper.is_ns_server_running(timeout_in_seconds=testconstants.NS_SERVER_TIMEOUT)
        nodes = rest.node_statuses()
        master_id = rest.get_nodes_self().id
        for node in nodes:
            if int(node.port) in xrange(9091, 9991):
                rest.eject_node(node)
                nodes.remove(node)

        if len(nodes) > 1:
            log.info("rebalancing all nodes in order to remove nodes")
            rest.log_client_error("Starting rebalance from test, ejected nodes %s" % \
                                                             [node.id for node in nodes if node.id != master_id])
            removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
                                          ejectedNodes=[node.id for node in nodes if node.id != master_id],
                                          wait_for_rebalance=wait_for_rebalance)
            success_cleaned = []
            for removed in [node for node in nodes if (node.id != master_id)]:
                removed.rest_password = servers[0].rest_password
                removed.rest_username = servers[0].rest_username
                try:
                    rest = RestConnection(removed)
                except Exception as ex:
                    log.error("can't create rest connection after rebalance out for ejected nodes,\
                        will retry after 10 seconds according to MB-8430: {0} ".format(ex))
                    time.sleep(10)
                    rest = RestConnection(removed)
                start = time.time()
                while time.time() - start < 30:
                    if len(rest.get_pools_info()["pools"]) == 0:
                        success_cleaned.append(removed)
                        break
                    else:
                        time.sleep(0.1)
                if time.time() - start > 10:
                    log.error("'pools' on node {0}:{1} - {2}".format(
                           removed.ip, removed.port, rest.get_pools_info()["pools"]))
            for node in set([node for node in nodes if (node.id != master_id)]) - set(success_cleaned):
                log.error("node {0}:{1} was not cleaned after removing from cluster".format(
                           removed.ip, removed.port))
                try:
                    rest = RestConnection(node)
                    rest.force_eject_node()
                except Exception as ex:
                    log.error("force_eject_node {0}:{1} failed: {2}".format(removed.ip, removed.port, ex))
            if len(set([node for node in nodes if (node.id != master_id)])\
                    - set(success_cleaned)) != 0:
                raise Exception("not all ejected nodes were cleaned successfully")

            log.info("removed all the nodes from cluster associated with {0} ? {1}".format(servers[0], \
                    [(node.id, node.port) for node in nodes if (node.id != master_id)]))
开发者ID:EricACooper,项目名称:testrunner,代码行数:58,代码来源:cluster_helper.py

示例2: cleanup_cluster

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import is_ns_server_running [as 别名]
 def cleanup_cluster(servers, wait_for_rebalance=True):
     log = logger.Logger.get_logger()
     rest = RestConnection(servers[0])
     helper = RestHelper(rest)
     helper.is_ns_server_running(timeout_in_seconds=testconstants.NS_SERVER_TIMEOUT)
     nodes = rest.node_statuses()
     master_id = rest.get_nodes_self().id
     if len(nodes) > 1:
         log.info("rebalancing all nodes in order to remove nodes")
         removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
                                       ejectedNodes=[node.id for node in nodes if node.id != master_id],
                                       wait_for_rebalance=wait_for_rebalance)
         log.info("removed all the nodes from cluster associated with {0} ? {1}".format(servers[0], removed))
开发者ID:couchbaselabs,项目名称:litmus,代码行数:15,代码来源:cluster_helper.py

示例3: test_crash_entire_cluster

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import is_ns_server_running [as 别名]
    def test_crash_entire_cluster(self):

        self.cluster.rebalance(
            [self.master],
            self.servers[1:], [])


        vbucket = 0
        nodeA = self.servers[0]
        n = 10000
        self.load_docs(nodeA, vbucket, n)

        dcp_client = self.dcp_client(nodeA, PRODUCER)
        stream = dcp_client.stream_req(vbucket, 0, 0, 2*n, 0)
        self.load_docs(nodeA, vbucket, n)

        # stop all nodes
        node_range = range(len(self.servers))
        for i in node_range:
            assert self.stop_node(i)
        time.sleep(2)

        # start all nodes in reverse order
        node_range.reverse()
        for i in node_range:
            assert self.start_node(i)

        rest = RestHelper(RestConnection(nodeA))
        assert rest.is_ns_server_running()

        _, _, high_seqno = self.vb_info(nodeA, vbucket)
        dcp_client = self.dcp_client(nodeA, PRODUCER)
        stream = dcp_client.stream_req(vbucket, 0, 0, high_seqno, 0)
        stream.run()
        assert stream.last_by_seqno == high_seqno
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:37,代码来源:crashtests.py

示例4: test_stream_after_warmup

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import is_ns_server_running [as 别名]
    def test_stream_after_warmup(self):

        nodeA = self.servers[0]
        bucket = 'standard_bucket'+str(self.standard_buckets-1)
        originalVbInfo = self.all_vb_info(nodeA, bucket = bucket)
        expectedVbSeqno = {}

        # load all buckets
        doc_gen = BlobGenerator(
            'dcpdata', 'dcpdata-', self.value_size, end=self.num_items)
        self._load_all_buckets(self.master, doc_gen, "create", 0)
        self._wait_for_stats_all_buckets([nodeA])

        # store expected vb seqnos
        originalVbInfo = self.all_vb_info(nodeA, bucket = bucket)


        # restart node
        assert self.stop_node(0)
        time.sleep(5)
        assert self.start_node(0)
        rest = RestHelper(RestConnection(nodeA))
        assert  rest.is_ns_server_running()
        time.sleep(2)

        # verify original vbInfo can be streamed
        dcp_client = self.dcp_client(nodeA, PRODUCER, auth_user = bucket)
        for vbucket in originalVbInfo:
            vb_uuid, _, high_seqno = originalVbInfo[vbucket]
            stream = dcp_client.stream_req(vbucket, 0, 0, high_seqno, vb_uuid)
            responses = stream.run()
            assert high_seqno == stream.last_by_seqno
开发者ID:abhinavdangeti,项目名称:testrunner,代码行数:34,代码来源:multibucket.py

示例5: test_crash_while_streaming

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import is_ns_server_running [as 别名]
    def test_crash_while_streaming(self):

        vbucket = 0
        nodeA = self.servers[0]
        n = 10000
        self.load_docs(nodeA, vbucket, n)

        dcp_client = self.dcp_client(nodeA, PRODUCER)
        stream = dcp_client.stream_req(vbucket, 0, 0, 2*n, 0)
        self.load_docs(nodeA, vbucket, n)
        assert self.stop_node(0)
        time.sleep(2)
        assert self.start_node(0)
        rest = RestHelper(RestConnection(nodeA))
        assert rest.is_ns_server_running()
        time.sleep(2)

        _, _, high_seqno = self.vb_info(nodeA, vbucket)
        dcp_client = self.dcp_client(nodeA, PRODUCER)
        stream = dcp_client.stream_req(vbucket, 0, 0, high_seqno, 0)
        stream.run()
        assert stream.last_by_seqno == high_seqno
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:24,代码来源:crashtests.py

示例6: test_stream_after_n_crashes

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import is_ns_server_running [as 别名]
    def test_stream_after_n_crashes(self):

        crashes = 5
        vbucket = 0

        # load some data
        nodeA = self.servers[0]
        rest = RestHelper(RestConnection(nodeA))
        for i in xrange(crashes):
            self.load_docs(nodeA, vbucket, self.num_items)
            assert self.stop_node(0)
            time.sleep(5)
            assert self.start_node(0)
            assert rest.is_ns_server_running()
            time.sleep(2)

            vb_uuid, _, high_seqno = self.vb_info(nodeA, vbucket)
            dcp_client = self.dcp_client(nodeA, PRODUCER)
            stream = dcp_client.stream_req(
                vbucket, 0, 0,
                high_seqno, vb_uuid)
            stream.run()

            assert stream.last_by_seqno == high_seqno
开发者ID:DavidAlphaFox,项目名称:couchbase,代码行数:26,代码来源:crashtests.py

示例7: NewUpgradeBaseTest

# 需要导入模块: from membase.api.rest_client import RestHelper [as 别名]
# 或者: from membase.api.rest_client.RestHelper import is_ns_server_running [as 别名]
class NewUpgradeBaseTest(BaseTestCase):

    def setUp(self):
        super(NewUpgradeBaseTest, self).setUp()
        self.product = self.input.param('product', 'couchbase-server')
        self.initial_version = self.input.param('initial_version', '1.8.1-942-rel')
        self.initial_vbuckets = self.input.param('initial_vbuckets', 64)
        self.rest_settings = self.input.membase_settings
        self.rest = RestConnection(self.master)
        self.rest_helper = RestHelper(self.rest)
        self.sleep_time = 10
        self.data_size = self.input.param('data_size', 1024)
        self.op_types = self.input.param('op_types', 'bucket')
        self.item_flag = self.input.param('item_flag', 4042322160)
        self.expire_time = self.input.param('expire_time', 0)

    def tearDown(self):
        super(NewUpgradeBaseTest, self).tearDown()

    def _install(self, servers):
        params = {}
        params['num_nodes'] = len(servers)
        params['product'] = self.product
        params['version'] = self.initial_version
        params['vbuckets'] = [self.initial_vbuckets]
        InstallerJob().parallel_install(servers, params)
        if self.product in ["couchbase", "couchbase-server", "cb"]:
            success = True
            for server in servers:
                success &= RemoteMachineShellConnection(server).is_couchbase_installed()
                if not success:
                    self.log.info("some nodes were not install successfully!")
                    sys.exit(1)

    def operations(self, multi_nodes=False):
        self.quota = self._initialize_nodes(self.cluster, self.servers, self.disabled_consistent_view)
        self.buckets = []
        gc.collect()
        if self.total_buckets > 0:
            self.bucket_size = self._get_bucket_size(self.quota, self.total_buckets)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.master, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="",
                                       num_replicas=self.num_replicas, bucket_size=self.bucket_size))

        self._create_sasl_buckets(self.master, self.sasl_buckets)
        self._create_standard_buckets(self.master, self.standard_buckets)
        if multi_nodes:
            servers_in = [self.servers[i+1] for i in range(self.initial_num_servers-1)]
            self.cluster.rebalance(self.servers[:1], servers_in, [])
        if self.op_types == "data":
            self._load_data_all_buckets("create")
            if multi_nodes:
                self._wait_for_stats_all_buckets(self.servers[:self.initial_num_servers])
            else:
                self._wait_for_stats_all_buckets([self.master])

    def _load_data_all_buckets(self, op_type='create', start=0):
        loaded = False
        count = 0
        gen_load = BlobGenerator('upgrade-', 'upgrade-', self.data_size, start=start, end=self.num_items)
        while not loaded and count < 60:
            try :
                self._load_all_buckets(self.master, gen_load, op_type, self.expire_time, 1,
                                       self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
                loaded = True
            except MemcachedError as error:
                if error.status == 134:
                    loaded = False
                    self.log.error("Memcached error 134, wait for 5 seconds and then try again")
                    count += 1
                    time.sleep(self.sleep_time)

    def _get_build(self, server, version, remote, is_amazon=False):
        info = remote.extract_remote_info()
        builds, changes = BuildQuery().get_all_builds()
        self.log.info("finding build %s for machine %s" % (version, server))
        result = re.search('r', version)

        if result is None:
            appropriate_build = BuildQuery().\
                find_membase_release_build('%s-enterprise' % (self.product), info.deliverable_type,
                                           info.architecture_type, version.strip(), is_amazon=is_amazon)
        else:
            appropriate_build = BuildQuery().\
                find_membase_build(builds, '%s-enterprise' % (self.product), info.deliverable_type,
                                   info.architecture_type, version.strip(), is_amazon=is_amazon)

        return appropriate_build

    def _upgrade(self, upgrade_version, server, remote):
        appropriate_build = self._get_build(server, upgrade_version, remote)
        self.assertTrue(appropriate_build.url, msg="unable to find build {0}".format(upgrade_version))
        remote.download_build(appropriate_build)
        remote.membase_upgrade(appropriate_build, save_upgrade_config=False)
        self.rest_helper.is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
        self.rest.init_cluster_port(self.rest_settings.rest_username, self.rest_settings.rest_password)
        time.sleep(self.sleep_time)

#.........这里部分代码省略.........
开发者ID:mschoch,项目名称:testrunner,代码行数:103,代码来源:newupgradebasetest.py


注:本文中的membase.api.rest_client.RestHelper.is_ns_server_running方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。