当前位置: 首页>>代码示例>>Python>>正文


Python RestConnection.regenerate_cluster_certificate方法代码示例

本文整理汇总了Python中membase.api.rest_client.RestConnection.regenerate_cluster_certificate方法的典型用法代码示例。如果您正苦于以下问题:Python RestConnection.regenerate_cluster_certificate方法的具体用法?Python RestConnection.regenerate_cluster_certificate怎么用?Python RestConnection.regenerate_cluster_certificate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在membase.api.rest_client.RestConnection的用法示例。


在下文中一共展示了RestConnection.regenerate_cluster_certificate方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_get_cluster_ca_self_signed

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import regenerate_cluster_certificate [as 别名]
 def test_get_cluster_ca_self_signed(self):
     rest = RestConnection(self.master)
     rest.regenerate_cluster_certificate()
     status, content, header = x509main(self.master)._get_cluster_ca_cert()
     content = json.loads(content)
     self.assertTrue(status,"Issue while Cluster CA Cert")
     self.assertEqual(content['cert']['type'],"generated","Type of certificate is mismatch")
开发者ID:EricACooper,项目名称:testrunner,代码行数:9,代码来源:x509tests.py

示例2: test_sdk_change_ca_self_signed

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import regenerate_cluster_certificate [as 别名]
    def test_sdk_change_ca_self_signed(self):
        rest = RestConnection(self.master)
        temp_file_name = '/tmp/newcerts/orig_cert.pem'
        x509main(self.master).setup_master()
        x509main().setup_cluster_nodes_ssl(self.servers)
        rest.create_bucket(bucket='default', ramQuotaMB=100)
        result = self._sdk_connection(host_ip=self.master.ip)
        self.assertTrue(result,"Cannot create a security connection with server")
        rest.regenerate_cluster_certificate()

        temp_cert = rest.get_cluster_ceritificate()
        temp_file = open(temp_file_name,'w')
        temp_file.write(temp_cert)
        temp_file.close()

        result = self._sdk_connection(root_ca_path=temp_file_name,host_ip=self.master.ip)
        self.assertTrue(result,"Cannot create a security connection with server")
开发者ID:EricACooper,项目名称:testrunner,代码行数:19,代码来源:x509tests.py

示例3: _reset_original

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import regenerate_cluster_certificate [as 别名]
 def _reset_original(self):
     self.log.info ("Reverting to original state - regenerating certificate and removing inbox folder")
     for servers in self.servers:
         rest = RestConnection(servers)
         rest.regenerate_cluster_certificate()
         x509main(servers)._delete_inbox_folder()
开发者ID:EricACooper,项目名称:testrunner,代码行数:8,代码来源:x509tests.py

示例4: test_backward_compatibility

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import regenerate_cluster_certificate [as 别名]
    def test_backward_compatibility(self):
        self.c1_version = self.initial_version
        self.c2_version = self.upgrade_versions[0]
        # install older version on C1
        self._install(self.servers[:self.src_init])
        #install latest version on C2
        self.initial_version = self.c2_version
        self._install(self.servers[self.src_init:])
        self.initial_version = self.c1_version
        self.create_buckets()
        # workaround for MB-15761
        if float(self.initial_version[:2]) < 3.0 and self._demand_encryption:
            rest = RestConnection(self.dest_master)
            rest.set_internalSetting('certUseSha1',"true")
            rest.regenerate_cluster_certificate()
        self._join_all_clusters()

        if float(self.c1_version[:2]) >= 3.0:
            for cluster in self.get_cb_clusters():
                for remote_cluster in cluster.get_remote_clusters():
                    remote_cluster.pause_all_replications()

        self.sleep(60)
        bucket = self.src_cluster.get_bucket_by_name('default')
        self._operations()
        self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
        bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
        self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
        bucket = self.dest_cluster.get_bucket_by_name('standard_bucket_1')
        gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)

        if float(self.c1_version[:2]) >= 3.0:
            for cluster in self.get_cb_clusters():
                for remote_cluster in cluster.get_remote_clusters():
                    remote_cluster.resume_all_replications()

        self._wait_for_replication_to_catchup()

        if float(self.c1_version[:2]) > 2.5:
            for remote_cluster in self.src_cluster.get_remote_clusters():
                remote_cluster.modify()
            for remote_cluster in self.dest_cluster.get_remote_clusters():
                remote_cluster.modify()

        self.sleep(30)

        bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
        gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
        bucket = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
        gen_create4 = BlobGenerator('loadFour', 'loadFour', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.dest_master, gen_create4, 'create', exp=0)
        bucket = self.src_cluster.get_bucket_by_name('default')
        self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0)

        self.merge_all_buckets()
        self.sleep(60)
        self._post_upgrade_ops()
        self.sleep(60)
        self.verify_results()
        if float(self.initial_version[:2]) == 3.1 and float(self.upgrade_versions[0][:2]) == 4.1:
            goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'
            for node in self.src_cluster.get_nodes():
                count = NodeHelper.check_goxdcr_log(
                            node,
                            "Failed to repair connections to target cluster",
                            goxdcr_log)
                self.assertEqual(count, 0, "Failed to repair connections to target cluster "
                                        "error message found in " + str(node.ip))
                self.log.info("Failed to repair connections to target cluster "
                                        "error message not found in " + str(node.ip))
开发者ID:EricACooper,项目名称:testrunner,代码行数:75,代码来源:upgradeXDCR.py

示例5: test_settingsCluster

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import regenerate_cluster_certificate [as 别名]

#.........这里部分代码省略.........
                               "view_fragmentation_threshold:percentage":50,
                               "view_fragmentation_threshold:size":10,
                               "purge_interval":3,
                               "source":"ns_server",
                               "user":"Administrator",
                               'source':source,
                               "user":user,
                               'ip':self.ipAddress,
                               'port':1234}
            rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
                                     viewFragmntThresholdPercentage=50,
                                     dbFragmentThreshold=10,
                                     viewFragmntThreshold=10)

        elif (ops == 'modifyCompactionSettingsTime'):
            expectedResults = {"parallel_db_and_view_compaction":False,
                               "database_fragmentation_threshold:percentage":50,
                               "database_fragmentation_threshold:size":10,
                               "view_fragmentation_threshold:percentage":50,
                               "view_fragmentation_threshold:size":10,
                               "allowed_time_period:abort_outside":True,
                               "allowed_time_period:to_minute":15,
                               "allowed_time_period:from_minute":12,
                               "allowed_time_period:to_hour":1,
                               "allowed_time_period:from_hour":1,
                               "purge_interval":3,
                               "source":"ns_server",
                               "user":"Administrator",
                               'source':source,
                               "user":user,
                               'ip':self.ipAddress,
                               'port':1234,
                               }
            rest.set_auto_compaction(dbFragmentThresholdPercentage=50,
                                     viewFragmntThresholdPercentage=50,
                                     dbFragmentThreshold=10,
                                     viewFragmntThreshold=10,
                                     allowedTimePeriodFromHour=1,
                                     allowedTimePeriodFromMin=12,
                                     allowedTimePeriodToHour=1,
                                     allowedTimePeriodToMin=15,
                                     allowedTimePeriodAbort='true')

        elif (ops == "AddGroup"):
            expectedResults = {'group_name':'add group', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.add_zone(expectedResults['group_name'])
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "UpdateGroup"):
            expectedResults = {'group_name':'upGroup', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234, 'nodes':[]}
            rest.add_zone(expectedResults['group_name'])
            rest.rename_zone(expectedResults['group_name'], 'update group')
            expectedResults['group_name'] = 'update group'
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "UpdateGroupAddNodes"):
            sourceGroup = "Group 1"
            destGroup = 'destGroup'
            expectedResults = {'group_name':destGroup, 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234, 'nodes':['[email protected]' + self.master.ip], 'port':1234}
            #rest.add_zone(sourceGroup)
            rest.add_zone(destGroup)
            self.sleep(30)
            rest.shuffle_nodes_in_zones([self.master.ip], sourceGroup, destGroup)
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr

        elif (ops == "DeleteGroup"):
            expectedResults = {'group_name':'delete group', 'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.add_zone(expectedResults['group_name'])
            tempStr = rest.get_zone_uri()[expectedResults['group_name']]
            tempStr = (tempStr.split("/"))[4]
            expectedResults['uuid'] = tempStr
            rest.delete_zone(expectedResults['group_name'])

        elif (ops == "regenCer"):
            expectedResults = {'source':source, 'user':user, 'ip':self.ipAddress, 'port':1234}
            rest.regenerate_cluster_certificate()

        elif (ops == 'renameNode'):
            rest.rename_node(self.master.ip, user, password)
            expectedResults = {"hostname":self.master.ip, "node":"[email protected]" + self.master.ip, "source":source, "user":user, "ip":self.ipAddress, "port":56845}

        try:
            self.checkConfig(self.eventID, self.master, expectedResults)
        finally:
            if (ops == "UpdateGroupAddNodes"):
                sourceGroup = "Group 1"
                destGroup = 'destGroup'
                rest.shuffle_nodes_in_zones([self.master.ip], destGroup, sourceGroup)

            rest = RestConnection(self.master)
            zones = rest.get_zone_names()
            for zone in zones:
                if zone != "Group 1":
                    rest.delete_zone(zone)
开发者ID:pkdevboxy,项目名称:testrunner,代码行数:104,代码来源:audittest.py

示例6: test_backward_compatibility

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import regenerate_cluster_certificate [as 别名]
    def test_backward_compatibility(self):
        if self.bucket_type == "ephemeral" and  float(self.initial_version[:3]) < 5.0:
            self.log.info("Ephemeral buckets not available in version " + str(self.initial_version))
            self.skip_this_version = True
            return
        self.c1_version = self.initial_version
        self.c2_version = self.upgrade_versions[0]
        if self.c1_version[:3] >= self.c2_version[:3]:
            self.log.info("Initial version greater than upgrade version - not supported")
            self.skip_this_version = True
            return
        # install older version on C1
        self._install(self.servers[:self.src_init])
        #install latest version on C2
        self.initial_version = self.c2_version
        self._install(self.servers[self.src_init:])
        self.initial_version = self.c1_version
        self.create_buckets()
        # workaround for MB-15761
        if float(self.initial_version[:2]) < 3.0 and self._demand_encryption:
            rest = RestConnection(self.dest_master)
            rest.set_internalSetting('certUseSha1',"true")
            rest.regenerate_cluster_certificate()
        self._join_all_clusters()

        if float(self.c1_version[:2]) >= 3.0:
            for cluster in self.get_cb_clusters():
                for remote_cluster in cluster.get_remote_clusters():
                    remote_cluster.pause_all_replications()

        self.sleep(60)
        bucket = self.src_cluster.get_bucket_by_name('default')
        self._operations()
        self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
        bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
        self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
        bucket = self.dest_cluster.get_bucket_by_name('standard_bucket_1')
        gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)

        if float(self.c1_version[:2]) >= 3.0:
            for cluster in self.get_cb_clusters():
                for remote_cluster in cluster.get_remote_clusters():
                    remote_cluster.resume_all_replications()

        self._wait_for_replication_to_catchup()

        if float(self.c1_version[:2]) > 2.5:
            for remote_cluster in self.src_cluster.get_remote_clusters():
                remote_cluster.modify()
            for remote_cluster in self.dest_cluster.get_remote_clusters():
                remote_cluster.modify()

        self.sleep(30)

        bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
        gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
        bucket = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
        gen_create4 = BlobGenerator('loadFour', 'loadFour', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.dest_master, gen_create4, 'create', exp=0)
        bucket = self.src_cluster.get_bucket_by_name('default')
        self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0)

        self.merge_all_buckets()
        self.sleep(60)
        self._post_upgrade_ops()
        self.sleep(60)
        self.verify_results()
        if float(self.initial_version[:3]) == 3.1 and float(self.upgrade_versions[0][:3]) == 4.1:
            goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\
                     + '/goxdcr.log*'
            for node in self.src_cluster.get_nodes():
                count1 =  NodeHelper.check_goxdcr_log(
                            node,
                            "Received error response from memcached in target cluster",
                            goxdcr_log)
                count2 = NodeHelper.check_goxdcr_log(
                            node,
                            "EINVAL",
                            goxdcr_log)
                count3 = NodeHelper.check_goxdcr_log(
                            node,
                            "Failed to repair connections to target cluster",
                            goxdcr_log)
                count4 = NodeHelper.check_goxdcr_log(
                            node,
                            "received error response from setMeta client. Repairing connection. response status=EINVAL",
                            goxdcr_log)
                count5 = NodeHelper.check_goxdcr_log(
                            node,
                            "GOGC in new global setting is 0, which is not a valid value and can only have come from "
                            "upgrade. Changed it to 100 instead.",
                            goxdcr_log)
                if count1 > 0 or count2 > 0:
                    self.assertEqual(count3, 0, "Failed to repair connections to target cluster "
                                        "error message found in " + str(node.ip))
                    self.log.info("Failed to repair connections to target cluster "
                                        "error message not found as expected in " + str(node.ip))
                self.assertEqual(count4, 0, "Disconnect errors found in " + str(node.ip))
#.........这里部分代码省略.........
开发者ID:arod1987,项目名称:testrunner,代码行数:103,代码来源:upgradeXDCR.py

示例7: test_backward_compatibility

# 需要导入模块: from membase.api.rest_client import RestConnection [as 别名]
# 或者: from membase.api.rest_client.RestConnection import regenerate_cluster_certificate [as 别名]
    def test_backward_compatibility(self):
        self.c1_version = self.initial_version
        self.c2_version = self.upgrade_versions[0]
        # install older version on C1
        self._install(self.servers[:self.src_init])
        #install latest version on C2
        self.initial_version = self.c2_version
        self._install(self.servers[self.src_init:])
        self.initial_version = self.c1_version
        self.create_buckets()
        # workaround for MB-15761
        if float(self.initial_version[:2]) < 3.0 and self._demand_encryption:
            rest = RestConnection(self.dest_master)
            rest.set_internalSetting('certUseSha1',"true")
            rest.regenerate_cluster_certificate()
        self._join_all_clusters()

        if float(self.c1_version[:2]) >= 3.0:
            for cluster in self.get_cb_clusters():
                for remote_cluster in cluster.get_remote_clusters():
                    remote_cluster.pause_all_replications()

        self.sleep(60)
        bucket = self.src_cluster.get_bucket_by_name('default')
        self._operations()
        self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
        bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
        self._load_bucket(bucket, self.src_master, self.gen_create, 'create', exp=0)
        bucket = self.dest_cluster.get_bucket_by_name('standard_bucket_1')
        gen_create2 = BlobGenerator('loadTwo', 'loadTwo', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.dest_master, gen_create2, 'create', exp=0)

        if float(self.c1_version[:2]) >= 3.0:
            for cluster in self.get_cb_clusters():
                for remote_cluster in cluster.get_remote_clusters():
                    remote_cluster.resume_all_replications()

        self._wait_for_replication_to_catchup()

        if float(self.c1_version[:2]) > 2.5:
            for remote_cluster in self.src_cluster.get_remote_clusters():
                remote_cluster.modify()
            for remote_cluster in self.dest_cluster.get_remote_clusters():
                remote_cluster.modify()

        self.sleep(30)

        bucket = self.src_cluster.get_bucket_by_name('sasl_bucket_1')
        gen_create3 = BlobGenerator('loadThree', 'loadThree', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.src_master, gen_create3, 'create', exp=0)
        bucket = self.dest_cluster.get_bucket_by_name('sasl_bucket_1')
        gen_create4 = BlobGenerator('loadFour', 'loadFour', self._value_size, end=self.num_items)
        self._load_bucket(bucket, self.dest_master, gen_create4, 'create', exp=0)
        bucket = self.src_cluster.get_bucket_by_name('default')
        self._load_bucket(bucket, self.src_master, gen_create2, 'create', exp=0)

        self.merge_all_buckets()
        self.sleep(60)
        self._post_upgrade_ops()
        self.sleep(60)
        self.verify_results()
开发者ID:chethanrao,项目名称:testrunner-archive,代码行数:63,代码来源:upgradeXDCR.py


注:本文中的membase.api.rest_client.RestConnection.regenerate_cluster_certificate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。