本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.execute_cbtransfer方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.execute_cbtransfer方法的具体用法?Python RemoteMachineShellConnection.execute_cbtransfer怎么用?Python RemoteMachineShellConnection.execute_cbtransfer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类remote.remote_util.RemoteMachineShellConnection
的用法示例。
在下文中一共展示了RemoteMachineShellConnection.execute_cbtransfer方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_Transfer
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbtransfer [as 别名]
def test_Transfer(self):
shell = RemoteMachineShellConnection(self.master)
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=100)
self._load_all_buckets(self.master, gen_update, "create", 0, 1, 0, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
source = "http://" + self.master.ip + ":8091"
info = shell.extract_remote_info()
path = '/tmp/backup'
#if info.type.lower() == "windows":
# path = '/cygdrive/c' + path
shell.delete_files(path)
create_dir = "mkdir " + path
shell.execute_command(create_dir)
options = "-b default " + " -u " + self.master.rest_username + " -p " + self.master.rest_password
shell.execute_cbtransfer(source, path, options)
expectedResults = {"peername":self.master.ip, "sockname":self.master.ip + ":11210", "source":"memcached", "user":"default", 'bucket':'default'}
self.checkConfig(self.eventID, self.master, expectedResults)
示例2: RecoveryUseTransferTests
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import execute_cbtransfer [as 别名]
class RecoveryUseTransferTests(TransferBaseTest):
def setUp(self):
self.times_teardown_called = 1
super(RecoveryUseTransferTests, self).setUp()
self.server_origin = self.servers[0]
self.server_recovery = self.servers[1]
self.shell = RemoteMachineShellConnection(self.server_origin)
info = self.shell.extract_remote_info()
self.os = info.type.lower()
def tearDown(self):
if not self.input.param("skip_cleanup", True):
if self.times_teardown_called > 1 :
if self.os == 'windows':
self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
else:
self.shell.delete_files(self.backup_location)
self.shell.disconnect()
del self.buckets
if self.input.param("skip_cleanup", True):
if self.case_number > 1 or self.times_teardown_called > 1:
if self.os == 'windows':
self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
else:
self.shell.delete_files(self.backup_location)
self.shell.disconnect()
del self.buckets
self.times_teardown_called += 1
super(RecoveryUseTransferTests, self).tearDown()
def recover_to_cbserver(self):
"""Recover data with 2.0 couchstore files to a 2.0 online server
We load a number of items to one node first and then do some mutation on these items.
Later we use cbtranfer to transfer the couchstore files we have on this
node to a new node. We verify the data by comparison between the items in KVStore
and items in the new node."""
self.load_data()
kvs_before = {}
bucket_names = []
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names.append(bucket.name)
if self.default_bucket:
self.cluster.create_default_bucket(self.server_recovery, self.bucket_size, self.num_replicas)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
self._create_standard_buckets(self.server_recovery, self.standard_buckets)
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
transfer_destination = "http://%[email protected]%s:%s -b %s -B %s -v -v -v" % (self.couchbase_login_info,
self.server_recovery.ip,
self.server_recovery.port,
bucket.name, bucket.name)
self.shell.execute_cbtransfer(transfer_source, transfer_destination)
del kvs_before
time.sleep(self.expire_time + 1)
shell_server_recovery = RemoteMachineShellConnection(self.server_recovery)
for bucket in self.buckets:
shell_server_recovery.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
shell_server_recovery.disconnect()
time.sleep(30)
self._wait_for_stats_all_buckets([self.server_recovery])
self._verify_all_buckets(self.server_recovery, 1, self.wait_timeout * 50, self.max_verify, True, 1)
self._verify_stats_all_buckets([self.server_recovery])
def recover_to_backupdir(self):
"""Recover data with 2.0 couchstore files to a 2.0 backup diretory
We load a number of items to a node first and then do some mutataion on these items.
Later we use cbtransfer to transfer the couchstore files we have on this node to
a backup directory. We use cbrestore to restore these backup files to the same node
for verification."""
self.load_data()
kvs_before = {}
bucket_names = []
self.shell.delete_files(self.backup_location)
self.shell.create_directory(self.backup_location)
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names.append(bucket.name)
transfer_source = "-v -v -v couchstore-files://%s" % (COUCHBASE_DATA_PATH)
transfer_destination = self.backup_location
self.shell.execute_cbtransfer(transfer_source, transfer_destination)
self._all_buckets_delete(self.server_origin)
if self.default_bucket:
self.cluster.create_default_bucket(self.server_origin, self.bucket_size, self.num_replicas)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
#.........这里部分代码省略.........