当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.get_data_path方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestConnection.get_data_path方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.get_data_path方法的具体用法?Python RestConnection.get_data_path怎么用?Python RestConnection.get_data_path使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.get_data_path方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_index_drop_folder_cleanup

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_data_path [as 别名]
 def test_index_drop_folder_cleanup(self):
     index_dist_factor = 1
     servers = self.get_nodes_from_services_map(service_type="index", get_all_nodes=True)
     num_indexes = len(servers)*index_dist_factor
     self.query_definitions = self._create_query_definitions(index_count=num_indexes)
     self.run_multi_operations(buckets=self.buckets, query_definitions=self.query_definitions,
                               create_index=True, drop_index=False)
     for server in servers:
         shell = RemoteMachineShellConnection(server)
         rest = RestConnection(server)
         data_path = rest.get_data_path()
         before_deletion_files = shell.list_files(data_path + "/@2i/")
         log.info("Files on node {0}: {1}".format(server.ip, before_deletion_files))
         self.assertTrue((len(before_deletion_files) > 1), "Index Files not created on node {0}".format(server.ip))
     self.run_multi_operations(buckets=self.buckets, query_definitions=self.query_definitions,
                               create_index=False, drop_index=True)
     self.sleep(20)
     for server in servers:
         shell = RemoteMachineShellConnection(server)
         rest = RestConnection(server)
         data_path = rest.get_data_path()
         after_deletion_files = shell.list_files(data_path + "/@2i/")
         log.info("Files on node {0}: {1}".format(server.ip, after_deletion_files))
         self.assertEqual(len(after_deletion_files), 1,
                     "Index directory not clean after drop Index on node {0}".format(server.ip))
开发者ID:EricACooper,项目名称:testrunner,代码行数:27,代码来源:index_load_balancing_2i.py

示例2: test_check_http_access_log

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_data_path [as 别名]
 def test_check_http_access_log(self):
     """
         Test to check http access log
     """
     rest = RestConnection(self.master)
     log_path = rest.get_data_path().replace("data", "logs")
     remote_client = RemoteMachineShellConnection(self.master)
     output = remote_client.read_remote_file(log_path, "http_access.log")
     logic = self.verify_http_acesslog(output, [self.master.ip])
     self.assertTrue(logic, "search string not present in http_access.log")
开发者ID:pkdevboxy,项目名称:testrunner,代码行数:12,代码来源:clusterinfoanalysis.py

示例3: collect_data

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_data_path [as 别名]
    def collect_data(self,servers,buckets,userId="Administrator",password="password", data_path = None, perNode = True, getReplica = False, mode = "memory"):
        """
            Method to extract all data information from memory or disk using cbtransfer
            The output is organized like { bucket :{ node { document-key : list of values }}}

            Paramters:

            servers: server information
            bucket: bucket information
            userId: user id of cb server
            password: password of cb server
            data_path: data path on servers, if given we will do cbtransfer on files
            perNode: if set we organize data for each bucket per node basis else we take a union

            Returns:

            If perNode flag is set we return data as follows
              {bucket {node { key: value list}}}
            else
              {bucket {key: value list}}
        """
        completeMap = {}
        for bucket in buckets:
            completeMap[bucket.name] = {}
        headerInfo = None
        for server in servers:
            if  mode  ==  "disk" and data_path == None:
                rest = RestConnection(server)
                data_path = rest.get_data_path()
            headerInfo = []
            bucketMap = {}
            if  server.ip == "127.0.0.1":
                headerInfo,bucketMap = self.get_local_data_map_using_cbtransfer(server,buckets, data_path=data_path, userId=userId,password=password, getReplica = getReplica, mode = mode)
            else:
                remote_client = RemoteMachineShellConnection(server)
                headerInfo,bucketMap = remote_client.get_data_map_using_cbtransfer(buckets, data_path=data_path, userId=userId,password=password, getReplica = getReplica, mode = mode)
                remote_client.disconnect()
            for bucket in bucketMap.keys():
                newMap = self.translateDataFromCSVToMap(0,bucketMap[bucket])
                if perNode:
                    completeMap[bucket][server.ip] = newMap
                else:
                    completeMap[bucket].update(newMap)
        return headerInfo,completeMap
开发者ID:EricACooper,项目名称:testrunner,代码行数:46,代码来源:data_analysis_helper.py

示例4: FailoverTests

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_data_path [as 别名]
class FailoverTests(FailoverBaseTest):
    def setUp(self):
        super(FailoverTests, self).setUp(self)

    def tearDown(self):
        super(FailoverTests, self).tearDown(self)

    def test_failover_firewall(self):
        self.common_test_body('firewall')

    def test_failover_normal(self):
        self.common_test_body('normal')

    def test_failover_stop_server(self):
        self.common_test_body('stop_server')

    def test_failover_then_add_back(self):
        self.add_back_flag = True
        self.common_test_body('normal')

    def common_test_body(self, failover_reason):
        """
            Main Test body which contains the flow of the failover basic steps
            1. Starts Operations if programmed into the test case (before/after)
            2. Start View and Index Building operations
            3. Failover K out of N nodes (failover can be HARDFAILOVER/GRACEFUL)
            4.1 Rebalance the cluster is failover of K nodeStatuses
            4.2 Run Add-Back operation with recoveryType = (full/delta) with rebalance
            5. Verify all expected operations completed by checking stats, replicaiton, views, data correctness
        """
        # Pick the reference node for communication
        # We pick a node in the cluster which will NOT be failed over
        self.filter_list = []
        if self.failoverMaster:
            self.master = self.servers[1]
        self.log.info(" Picking node {0} as reference node for test case".format(self.master.ip))
        self.print_test_params(failover_reason)
        self.rest = RestConnection(self.master)
        self.nodes = self.rest.node_statuses()
        # Set the data path for the cluster
        self.data_path = self.rest.get_data_path()

        # Check if the test case has to be run for 3.0.0
        versions = self.rest.get_nodes_versions()
        self.version_greater_than_2_5 = True
        for version in versions:
            if "3" > version:
                self.version_greater_than_2_5 = False

        # Do not run this this test if graceful category is being used
        if not self.version_greater_than_2_5 and (self.graceful or (self.recoveryType != None)):
            self.log.error("Graceful failover can't be applied to nodes with version less then 3.*")
            self.log.error("Please check configuration parameters: SKIPPING TEST.")
            return

        # Find nodes that will under go failover
        if self.failoverMaster:
            self.chosen = RebalanceHelper.pick_nodes(self.master, howmany=1, target_node = self.servers[0])
        else:
            self.chosen = RebalanceHelper.pick_nodes(self.master, howmany=self.num_failed_nodes)

        # Perform operations - Create/Update/Delete
        # self.withMutationOps = True => Run Operations in parallel to failover
        # self.withMutationOps = False => Run Operations Before failover
        self.load_initial_data()
        if not self.withMutationOps:
            self.run_mutation_operations()
        # Perform View Creation Tasks and check for completion if required before failover
        if self.withViewsOps:
            self.run_view_creation_operations(self.servers)
            if not self.createIndexesDuringFailover:
                self.query_and_monitor_view_tasks(self.servers)

        # Take snap-shot of data set used for validaiton
        record_static_data_set ={}
        prev_vbucket_stats = {}
        prev_failover_stats = {}
        if not self.withMutationOps:
            record_static_data_set = self.get_data_set_all(self.servers, self.buckets, path = None)

        # Capture  vbucket and failover stats if test version >= 2.5.*
        if self.version_greater_than_2_5 and self.upr_check:
            prev_vbucket_stats = self.get_vbucket_seqnos(self.servers, self.buckets)
            prev_failover_stats = self.get_failovers_logs(self.servers, self.buckets)

        # Perform Operations relalted to failover
        if self.withMutationOps or self.withViewsOps or self.compact:
            self.run_failover_operations_with_ops(self.chosen, failover_reason)
        else:
            self.run_failover_operations(self.chosen, failover_reason)

        # Perform Add Back Operation with Rebalance Or only Rebalance with Verificaitons
        if not self.gracefulFailoverFail and self.runRebalanceAfterFailover:
            if self.add_back_flag:
                self.run_add_back_operation_and_verify(self.chosen, prev_vbucket_stats, record_static_data_set, prev_failover_stats)
            else:
                self.run_rebalance_after_failover_and_verify(self.chosen, prev_vbucket_stats, record_static_data_set, prev_failover_stats)
        else:
            return

#.........这里部分代码省略.........
开发者ID:lichia,项目名称:testrunner,代码行数:103,代码来源:failovertests.py

示例5: FailoverTests

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import get_data_path [as 别名]
class FailoverTests(FailoverBaseTest):
    def setUp(self):
        super(FailoverTests, self).setUp(self)

    def tearDown(self):
        super(FailoverTests, self).tearDown(self)

    def test_failover_firewall(self):
        self.common_test_body('firewall')

    def test_failover_normal(self):
        self.common_test_body('normal')

    def test_failover_stop_server(self):
        self.common_test_body('stop_server')

    def test_failover_then_add_back(self):
        self.add_back_flag = True
        self.common_test_body('normal')

    def common_test_body(self, failover_reason):
        """
            Main Test body which contains the flow of the failover basic steps
            1. Starts Operations if programmed into the test case (before/after)
            2. Start View and Index Building operations
            3. Failover K out of N nodes (failover can be HARDFAILOVER/GRACEFUL)
            4.1 Rebalance the cluster is failover of K nodeStatuses
            4.2 Run Add-Back operation with recoveryType = (full/delta) with rebalance
            5. Verify all expected operations completed by checking stats, replicaiton, views, data correctness
        """
        # Pick the reference node for communication
        # We pick a node in the cluster which will NOT be failed over
        self.referenceNode = self.master
        if self.failoverMaster:
            self.referenceNode = self.servers[1]
        self.log.info(" Picking node {0} as reference node for test case".format(self.referenceNode.ip))
        self.print_test_params(failover_reason)
        self.rest = RestConnection(self.referenceNode)
        self.nodes = self.rest.node_statuses()

        # Set the data path for the cluster
        self.data_path = self.rest.get_data_path()

        # Check if the test case has to be run for 3.0.0
        versions = self.rest.get_nodes_versions()
        self.version_greater_than_2_5 = True
        for version in versions:
            if "3" > version:
                self.version_greater_than_2_5 = False

        # Do not run this this test if graceful category is being used
        if not self.version_greater_than_2_5 and (self.graceful or (self.recoveryType != None)):
            self.log.error("Graceful failover can't be applied to nodes with version less then 3.*")
            self.log.error("Please check configuration parameters: SKIPPING TEST.")
            return

        # Find nodes that will under go failover
        self.chosen = RebalanceHelper.pick_nodes(self.referenceNode, howmany=self.num_failed_nodes)

        # Perform operations - Create/Update/Delete
        # self.withOps = True => Run Operations in parallel to failover
        # self.withOps = False => Run Operations Before failover
        self.ops_tasks = self.run_operation_tasks()

        # Perform View Creation Tasks and check for completion if required before failover
        if self.runViews:
            self.run_view_creation_operations(self.servers)
            if not self.runViewsDuringFailover:
                self.run_view_creation_operations(self.servers)
                self.monitor_view_tasks(self.servers)

        # Take snap-shot of data set used for validaiton
        record_static_data_set = self.get_data_set_all(self.servers, self.buckets, path = None)
        prev_vbucket_stats = {}
        prev_failover_stats = {}

        # Capture  vbucket and failover stats if test version >= 2.5.*
        if self.version_greater_than_2_5 and self.upr_check:
            prev_vbucket_stats = self.get_vbucket_seqnos(self.servers, self.buckets)
            prev_failover_stats = self.get_failovers_logs(self.servers, self.buckets)

        # Perform Operations relalted to failover
        self.run_failover_operations(self.chosen, failover_reason)

        # Perform Add Back Operation with Rebalance Or only Rebalance with Verificaitons
        if not self.gracefulFailoverFail:
            if self.add_back_flag:
                self.run_add_back_operation_and_verify(self.chosen, prev_vbucket_stats, record_static_data_set, prev_failover_stats)
            else:
                self.run_rebalance_after_failover_and_verify(self.chosen, prev_vbucket_stats, record_static_data_set, prev_failover_stats)

    def run_rebalance_after_failover_and_verify(self, chosen, prev_vbucket_stats, record_static_data_set, prev_failover_stats):
        """ Method to run rebalance after failover and verify """
        # Need a delay > min because MB-7168
        self.sleep(60, "after failover before invoking rebalance...")
        _servers_ = self.filter_servers(self.servers, chosen)
        # Rebalance after Failover operation
        self.rest.rebalance(otpNodes=[node.id for node in self.nodes],
                               ejectedNodes=[node.id for node in chosen])
        if self.during_ops:
#.........这里部分代码省略.........
开发者ID:ketakigangal,项目名称:testrunner,代码行数:103,代码来源:failovertests.py


注:本文中的membase.api.rest_client.RestConnection.get_data_path方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。