当前位置: 首页>>代码示例>>Python>>正文


Python RemoteMachineShellConnection.restore_backupFile方法代码示例

本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.restore_backupFile方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.restore_backupFile方法的具体用法?Python RemoteMachineShellConnection.restore_backupFile怎么用?Python RemoteMachineShellConnection.restore_backupFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在remote.remote_util.RemoteMachineShellConnection的用法示例。


在下文中一共展示了RemoteMachineShellConnection.restore_backupFile方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_queries_after_backup_with_2i

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import restore_backupFile [as 别名]
 def test_queries_after_backup_with_2i(self):
     index_name = "Automation_backup_index"
     method_name = self.input.param('to_run', 'test_any')
     self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                            self.input.membase_settings.rest_password)
     self.backup_location = self.input.param("backup_location", "/tmp/backup")
     self.command_options = self.input.param("command_options", '')
     index_field = self.input.param("index_field", '')
     self.assertTrue(index_field, "Index field should be provided")
     for bucket in self.bucket:
         self.run_cbq_query(query="CREATE INDEX %s ON %s(%s) USING GSI" % (index_name, bucket.name, ','.join(index_field.split(';'))))
     try:
         shell = RemoteMachineShellConnection(self.master)
         fn = getattr(self, method_name)
         fn()
         self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)
         fn = getattr(self, method_name)
         fn()
         for bucket in self.buckets:
             self.cluster.bucket_flush(self.master, bucket=bucket)
         self.sleep(5, 'wait some time before restore')
         shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
         fn = getattr(self, method_name)
         fn()
     finally:
         for bucket in self.buckets:
             self.run_cbq_query(query="DROP INDEX %s.%s" % (bucket.name, index_name))
开发者ID:lichia,项目名称:testrunner,代码行数:29,代码来源:tuq_cluster_ops.py

示例2: IBRSpatialTests

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import restore_backupFile [as 别名]
class IBRSpatialTests(SpatialQueryTests):
    def setUp(self):
        self.input = TestInputSingleton.input
        self.servers = self.input.servers
        self.master = self.servers[0]
        self.log = logger.Logger.get_logger()
        self.helper = SpatialHelper(self, "default")
        self.helper.setup_cluster()
        self.cluster = Cluster()
        self.default_bucket = self.input.param("default_bucket", True)
        self.sasl_buckets = self.input.param("sasl_buckets", 0)
        self.standard_buckets = self.input.param("standard_buckets", 0)
        self.memcached_buckets = self.input.param("memcached_buckets", 0)
        self.servers = self.helper.servers
        self.shell = RemoteMachineShellConnection(self.master)
        info = self.shell.extract_remote_info()
        self.os = info.type.lower()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = self.input.param("backup_location", "/tmp/backup")
        self.command_options = self.input.param("command_options", '')



    def tearDown(self):
        self.helper.cleanup_cluster()

    def test_backup_with_spatial_data(self):
        num_docs = self.helper.input.param("num-docs", 5000)
        self.log.info("description : Make limit queries on a simple "
                      "dataset with {0} docs".format(num_docs))
        data_set = SimpleDataSet(self.helper, num_docs)
        data_set.add_limit_queries()
        self._query_test_init(data_set)

        if not self.command_options:
            self.command_options = []
        options = self.command_options + [' -m full']

        self.total_backups = 1
        self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
        time.sleep(2)

        self.buckets = RestConnection(self.master).get_buckets()
        bucket_names = [bucket.name for bucket in self.buckets]
        BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
        gc.collect()

        self.helper._create_default_bucket()
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)

        SimpleDataSet(self.helper, num_docs)._create_views()
        self._query_test_init(data_set)
开发者ID:EricACooper,项目名称:testrunner,代码行数:55,代码来源:ibr.py

示例3: test_queries_after_backup_restore

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import restore_backupFile [as 别名]
 def test_queries_after_backup_restore(self):
     method_name = self.input.param('to_run', 'test_any')
     self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                            self.input.membase_settings.rest_password)
     self.backup_location = self.input.param("backup_location", "/tmp/backup")
     self.command_options = self.input.param("command_options", '')
     shell = RemoteMachineShellConnection(self.master)
     fn = getattr(self, method_name)
     fn()
     self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)
     fn = getattr(self, method_name)
     fn()
     for bucket in self.buckets:
         self.cluster.bucket_flush(self.master, bucket=bucket)
     self.sleep(5, 'wait some time before restore')
     shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
     fn = getattr(self, method_name)
     fn()
开发者ID:lichia,项目名称:testrunner,代码行数:20,代码来源:tuq_cluster_ops.py

示例4: test_meta_backup

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import restore_backupFile [as 别名]
    def test_meta_backup(self):
        self.log.info(' Starting test-getMeta')
        self._load_ops(ops='set', mutations=20)

        '''Do the backup on the bucket '''
        self.shell = RemoteMachineShellConnection(self.master)
        self.buckets = RestConnection(self.master).get_buckets()
        self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
                                               self.input.membase_settings.rest_password)
        self.backup_location = "/tmp/backup"
        self.command_options = self.input.param("command_options", '')
        try:
            shell = RemoteMachineShellConnection(self.master)
            self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, self.command_options)

            time.sleep(5)
            shell.restore_backupFile(self.couchbase_login_info, self.backup_location, [bucket.name for bucket in self.buckets])
            print 'Done with restore'
        finally:
            self._check_cas(check_conflict_resolution=False)
开发者ID:bharath-gp,项目名称:testrunner,代码行数:22,代码来源:opschangecas.py

示例5: RecoveryUseTransferTests

# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import restore_backupFile [as 别名]

#.........这里部分代码省略.........
        """Recover data with 2.0 couchstore files to a 2.0 online server

        We load a number of items to one node first and then do some mutation on these items.
        Later we use cbtranfer to transfer the couchstore files we have on this
        node to a new node. We verify the data by comparison between the items in KVStore
        and items in the new node."""

        self.load_data()

        kvs_before = {}
        bucket_names = []
        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)

        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_recovery, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
        self._create_standard_buckets(self.server_recovery, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
            transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = "http://%[email protected]%s:%s -b %s -B %s -v -v -v" % (self.couchbase_login_info,
                                                                             self.server_recovery.ip,
                                                                             self.server_recovery.port,
                                                                             bucket.name, bucket.name)
            self.shell.execute_cbtransfer(transfer_source, transfer_destination)
        del kvs_before
        time.sleep(self.expire_time + 1)
        shell_server_recovery = RemoteMachineShellConnection(self.server_recovery)
        for bucket in self.buckets:
            shell_server_recovery.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
        shell_server_recovery.disconnect()
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_recovery])
        self._verify_all_buckets(self.server_recovery, 1, self.wait_timeout * 50, self.max_verify, True, 1)
        self._verify_stats_all_buckets([self.server_recovery])

    def recover_to_backupdir(self):
        """Recover data with 2.0 couchstore files to a 2.0 backup diretory

        We load a number of items to a node first and then do some mutataion on these items.
        Later we use cbtransfer to transfer the couchstore files we have on this node to
        a backup directory. We use cbrestore to restore these backup files to the same node
        for verification."""

        self.load_data()

        kvs_before = {}
        bucket_names = []

        self.shell.delete_files(self.backup_location)
        self.shell.create_directory(self.backup_location)

        for bucket in self.buckets:
            kvs_before[bucket.name] = bucket.kvs[1]
            bucket_names.append(bucket.name)
            transfer_source = "-v -v -v couchstore-files://%s" % (COUCHBASE_DATA_PATH)
            transfer_destination = self.backup_location
            self.shell.execute_cbtransfer(transfer_source, transfer_destination)

        self._all_buckets_delete(self.server_origin)
        if self.default_bucket:
            self.cluster.create_default_bucket(self.server_origin, self.bucket_size, self.num_replicas)
            self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
        self._create_sasl_buckets(self.server_origin, self.sasl_buckets)
        self._create_standard_buckets(self.server_origin, self.standard_buckets)

        for bucket in self.buckets:
            bucket.kvs[1] = kvs_before[bucket.name]
        del kvs_before
        self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
        time.sleep(self.expire_time + 1)
        for bucket in self.buckets:
            self.shell.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
        time.sleep(30)

        self._wait_for_stats_all_buckets([self.server_origin])
        self._verify_all_buckets(self.server_origin, 1, self.wait_timeout * 50, self.max_verify, True, 1)
        self._verify_stats_all_buckets([self.server_origin])

    def load_data(self):
        gen_load = BlobGenerator('nosql', 'nosql-', self.value_size, end=self.num_items)
        gen_update = BlobGenerator('nosql', 'nosql-', self.value_size, end=(self.num_items / 2 - 1))
        gen_expire = BlobGenerator('nosql', 'nosql-', self.value_size, start=self.num_items / 2, end=(self.num_items * 3 / 4 - 1))
        gen_delete = BlobGenerator('nosql', 'nosql-', self.value_size, start=self.num_items * 3 / 4, end=self.num_items)
        self._load_all_buckets(self.server_origin, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)

        if(self.doc_ops is not None):
            if("update" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("delete" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_delete, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
            if("expire" in self.doc_ops):
                self._load_all_buckets(self.server_origin, gen_expire, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
        self._wait_for_stats_all_buckets([self.server_origin])
        time.sleep(30)
开发者ID:Boggypop,项目名称:testrunner,代码行数:104,代码来源:recoveryusetransfer.py


注:本文中的remote.remote_util.RemoteMachineShellConnection.restore_backupFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。