本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.remove_directory方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.remove_directory方法的具体用法?Python RemoteMachineShellConnection.remove_directory怎么用?Python RemoteMachineShellConnection.remove_directory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类remote.remote_util.RemoteMachineShellConnection
的用法示例。
在下文中一共展示了RemoteMachineShellConnection.remove_directory方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tearDown
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import remove_directory [as 别名]
def tearDown(self):
for server in self.servers:
self.log.info("delete remote folder @ {0}".format(self.remote_tmp_folder))
shell = RemoteMachineShellConnection(server)
shell.remove_directory(self.remote_tmp_folder)
shell.disconnect()
示例2: download_backups
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import remove_directory [as 别名]
def download_backups(self,backup_location):
#connect and list all the files under that
#location and download those files
#create random folder in local machine
#create temp folder and then uuid under temp os.getcwd()
local_files = []
cwd = os.getcwd()
local_dir = "{0}/out/tmp/{1}".format(cwd, uuid.uuid4())
if not os.path.exists(local_dir):
os.makedirs(local_dir)
self.log.info("created {0} folder in the local machine...".format(local_dir))
shell = RemoteMachineShellConnection(self.server)
files = shell.list_files(backup_location)
for file in files:
self.log.info("downloading remote file {0}".format(file))
shell.get_file(file['path'], file['file'], "{0}/{1}".format(local_dir,file['file']))
local_files.append({'path': local_dir, 'file': file['file']})
shell.remove_directory(backup_location)
#now we can remove these files:
return local_files
示例3: test_backup_upgrade_restore_default
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import remove_directory [as 别名]
#.........这里部分代码省略.........
self.assertTrue(ready, "wait_for_memcached_failed")
distribution = {10: 0.2, 20: 0.5, 30: 0.25, 40: 0.05}
inserted_keys, rejected_keys = MemcachedClientHelper.load_bucket_and_return_the_keys(servers=[self.master],
name=bucket,
ram_load_ratio=0.5,
value_size_distribution=distribution,
moxi=True,
write_only=True,
delete_ratio=0.1,
number_of_threads=2)
if len(self.servers) > 1:
rest = RestConnection(self.master)
self.assertTrue(RebalanceHelper.wait_for_replication(rest.get_nodes(), timeout=180),
msg="replication did not complete")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_queue_size', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
ready = RebalanceHelper.wait_for_stats_on_all(self.master, bucket, 'ep_flusher_todo', 0)
self.assertTrue(ready, "wait_for ep_queue_size == 0 failed")
node = RestConnection(self.master).get_nodes_self()
shell = RemoteMachineShellConnection(worker)
o, r = shell.execute_command(perm_comm)
shell.log_command_output(o, r)
shell.disconnect()
#Backup
#BackupHelper(self.master, self).backup(bucket, node, remote_tmp)
shell = RemoteMachineShellConnection(worker)
shell.execute_command("/opt/couchbase/bin/cbbackup http://{0}:{1} {2}".format(
self.master.ip, self.master.port, remote_tmp))
shell.disconnect()
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
time.sleep(30)
#Upgrade
for server in self.servers:
self.log.info("Upgrading to current version {0}".format(final_version))
remote = RemoteMachineShellConnection(server)
info = remote.extract_remote_info()
new_build = BuildQuery().find_build(builds, product, info.deliverable_type,
info.architecture_type, final_version)
remote.stop_couchbase()
remote.couchbase_uninstall()
remote.download_build(new_build)
remote.install_server(new_build)
rest = RestConnection(server)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster(server.rest_username, server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
remote.disconnect()
time.sleep(30)
#Restore
rest = RestConnection(self.master)
info = rest.get_nodes_self()
size = int(info.memoryQuota * 2.0 / 3.0)
rest.create_bucket(bucket, ramQuotaMB=size)
ready = BucketOperationHelper.wait_for_memcached(server, bucket)
self.assertTrue(ready, "wait_for_memcached_failed")
#BackupHelper(self.master, self).restore(backup_location=remote_tmp, moxi_port=info.moxi)
shell = RemoteMachineShellConnection(worker)
shell.execute_command("/opt/couchbase/bin/cbrestore {2} http://{0}:{1} -b {3}".format(
self.master.ip, self.master.port, remote_tmp, bucket))
shell.disconnect()
time.sleep(60)
keys_exist = BucketOperationHelper.keys_exist_or_assert_in_parallel(inserted_keys, self.master, bucket, self, concurrency=4)
self.assertTrue(keys_exist, msg="unable to verify keys after restore")
time.sleep(30)
BucketOperationHelper.delete_bucket_or_assert(self.master, bucket, self)
rest = RestConnection(self.master)
helper = RestHelper(rest)
nodes = rest.node_statuses()
master_id = rest.get_nodes_self().id
if len(self.servers) > 1:
removed = helper.remove_nodes(knownNodes=[node.id for node in nodes],
ejectedNodes=[node.id for node in nodes if node.id != master_id],
wait_for_rebalance=True )
shell = RemoteMachineShellConnection(worker)
shell.remove_directory(remote_tmp)
shell.disconnect()
self.servers = copy.copy(original_set)
if initial_version == fin:
builds, changes = BuildQuery().get_all_builds(version=initial_version)
for server in self.servers:
remote = RemoteMachineShellConnection(server)
info = remote.extract_remote_info()
self.log.info("Loading version .. {0}".format(initial_version))
older_build = BuildQuery().find_build(builds, product, info.deliverable_type,
info.architecture_type, initial_version)
remote.stop_couchbase()
remote.couchbase_uninstall()
remote.download_build(older_build)
remote.install_server(older_build)
rest = RestConnection(server)
RestHelper(rest).is_ns_server_running(testconstants.NS_SERVER_TIMEOUT)
rest.init_cluster(server.rest_username, server.rest_password)
rest.init_cluster_memoryQuota(memoryQuota=rest.get_nodes_self().mcdMemoryReserved)
remote.disconnect()
示例4: BackupAndRestoreTests
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import remove_directory [as 别名]
class BackupAndRestoreTests(unittest.TestCase):
input = None
servers = None
log = None
membase = None
shell = None
# simple addnode tests without rebalancing
# add node to itself
# add an already added node
# add node and remove them 10 times serially
# add node and remove the node in parallel threads later...
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.shell = RemoteMachineShellConnection(self.servers[0])
self.remote_tmp_folder = None
#we dont necessarily care about the test case
def common_setUp(self):
ClusterOperationHelper.cleanup_cluster(self.servers)
BucketOperationHelper.delete_all_buckets_or_assert(self.servers,self)
def tearDown(self):
self.log.info("delete remote folder @ {0}".format(self.remote_tmp_folder))
self.shell.remove_directory(self.remote_tmp_folder)
self.shell.start_membase()
#add nodes one by one
def _test_backup_add_restore_bucket_body(self, bucket="default", port_no = 11211, delay_after_data_load=0, startup_flag = True):
self.remote_tmp_folder = "/tmp/{0}-{1}".format("mbbackuptestdefaultbucket", uuid.uuid4())
master = self.servers[0]
node = RestConnection(master).get_nodes_self()
BucketOperationHelper.delete_bucket_or_assert(master, bucket, self)
BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self)
keys = BucketOperationHelper.load_some_data(master, bucket_name=bucket, test = self)
if not startup_flag:
self.shell.stop_membase()
else:
self.log.info("Sleep {0} seconds after data load".format(delay_after_data_load))
time.sleep(delay_after_data_load)
#let's create a unique folder in the remote location
output, error = self.shell.execute_command("mkdir -p {0}".format(self.remote_tmp_folder))
self.shell.log_command_output(output,error)
#now let's back up
BackupHelper(master, self).backup(bucket, node, self.remote_tmp_folder)
if not startup_flag:
self.shell.start_membase()
BucketOperationHelper.delete_bucket_or_assert(master, bucket, self)
BucketOperationHelper.create_bucket(serverInfo=master, name=bucket, replica=1, port=port_no, test_case=self)
if not startup_flag:
self.shell.stop_membase()
BackupHelper(master, self).restore(self.remote_tmp_folder)
if not startup_flag:
self.shell.start_membase()
BucketOperationHelper.verify_data(master.ip, keys, False, False, port_no, self)
def test_backup_add_restore_default_bucket_started_server(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body()
def test_backup_add_restore_non_default_bucket_started_server(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body(bucket="test_bucket", port_no=11220)
def test_backup_add_restore_default_bucket_non_started_server(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body(startup_flag = False)
def test_backup_add_restore_non_default_bucket_non_started_server(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body(bucket="test_bucket", port_no=11220, startup_flag = False)
def test_backup_add_restore_when_ide(self):
self.common_setUp()
self._test_backup_add_restore_bucket_body(delay_after_data_load=120)