本文整理汇总了Python中remote.remote_util.RemoteMachineShellConnection.create_directory方法的典型用法代码示例。如果您正苦于以下问题:Python RemoteMachineShellConnection.create_directory方法的具体用法?Python RemoteMachineShellConnection.create_directory怎么用?Python RemoteMachineShellConnection.create_directory使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类remote.remote_util.RemoteMachineShellConnection
的用法示例。
在下文中一共展示了RemoteMachineShellConnection.create_directory方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_folderMisMatchCluster
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import create_directory [as 别名]
def test_folderMisMatchCluster(self):
auditIns = audit(host=self.master)
orginalPath = auditIns.getAuditLogPath()
newPath = originalPath + 'testFolderMisMatch'
shell = RemoteMachineShellConnection(self.servers[0])
try:
shell.create_directory(newPath)
command = 'chown couchbase:couchbase ' + newPath
shell.execute_command(command)
finally:
shell.disconnect()
auditIns.setsetAuditLogPath(newPath)
for server in self.servers:
rest = RestConnection(sever)
#Create an Event for Bucket Creation
expectedResults = {'name':'TestBucket ' + server.ip, 'ram_quota':536870912, 'num_replicas':1,
'replica_index':False, 'eviction_policy':'value_only', 'type':'membase', \
'auth_type':'sasl', "autocompaction":'false', "purge_interval":"undefined", \
"flush_enabled":False, "num_threads":3, "source":source, \
"user":user, "ip":self.ipAddress, "port":57457, 'sessionid':'' }
rest.create_bucket(expectedResults['name'], expectedResults['ram_quota'] / 1048576, expectedResults['auth_type'], 'password', expectedResults['num_replicas'], \
'11211', 'membase', 0, expectedResults['num_threads'], expectedResults['flush_enabled'], 'valueOnly')
#Check on Events
try:
self.checkConfig(self.eventID, self.servers[0], expectedResults)
except:
self.log.info ("Issue reading the file at Node {0}".format(server.ip))
示例2: createRemoteFolder
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import create_directory [as 别名]
def createRemoteFolder(self, host, newPath):
shell = RemoteMachineShellConnection(host)
try:
shell.create_directory(newPath)
command = 'chown couchbase:couchbase ' + newPath
shell.execute_command(command)
finally:
shell.disconnect()
示例3: test_changeLogPath
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import create_directory [as 别名]
def test_changeLogPath(self):
nodes_init = self.input.param("nodes_init", 0)
auditMaster = audit(host=self.servers[0])
auditSecNode = audit(host=self.servers[1])
#Capture original Audit Log Path
originalPath = auditMaster.getAuditLogPath()
#Create folders on CB server machines and change permission
try:
newPath = auditMaster.getAuditLogPath() + "folder"
for server in self.servers[:nodes_init]:
shell = RemoteMachineShellConnection(server)
try:
shell.create_directory(newPath)
command = 'chown couchbase:couchbase ' + newPath
shell.execute_command(command)
finally:
shell.disconnect()
source = 'ns_server'
user = self.master.rest_username
auditMaster.setAuditLogPath(newPath)
#Create an event of Updating autofailover settings
for server in self.servers[:nodes_init]:
rest = RestConnection(server)
expectedResults = {'max_nodes':1, "timeout":120, 'source':source, "user":user, 'ip':self.ipAddress, 'port':12345}
rest.update_autofailover_settings(True, expectedResults['timeout'])
self.sleep(120, 'Waiting for new audit file to get created')
#check for audit.log remotely
shell = RemoteMachineShellConnection(server)
try:
result = shell.file_exists(newPath, auditMaster.AUDITLOGFILENAME)
finally:
shell.disconnect()
if (result is False):
self.assertTrue(result, 'Issue with file getting create in new directory')
finally:
auditMaster.setAuditLogPath(originalPath)
示例4: _create_inbox_folder
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import create_directory [as 别名]
def _create_inbox_folder(self,host):
shell = RemoteMachineShellConnection(self.host)
final_path = self.install_path + x509main.CHAINFILEPATH
shell.create_directory(final_path)
示例5: offline_cluster_upgrade_non_default_path
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import create_directory [as 别名]
def offline_cluster_upgrade_non_default_path(self):
try:
num_nodes_with_not_default = self.input.param('num_nodes_with_not_default', 1)
prefix_path = ''
if not self.is_linux:
prefix_path = "C:"
data_path = prefix_path + self.input.param('data_path', '/tmp/data').replace('|', "/")
index_path = self.input.param('index_path', data_path).replace('|', "/")
if not self.is_linux and not index_path.startswith("C:"):
index_path = prefix_path + index_path
num_nodes_remove_data = self.input.param('num_nodes_remove_data', 0)
servers_with_not_default = self.servers[:num_nodes_with_not_default]
old_paths = {}
for server in servers_with_not_default:
#to restore servers paths in finally
old_paths[server.ip] = [server.data_path, server.index_path]
server.data_path = data_path
server.index_path = index_path
shell = RemoteMachineShellConnection(server)
#shell.remove_folders([data_path, index_path])
for path in set([data_path, index_path]):
shell.create_directory(path)
shell.disconnect()
self._install(self.servers[:self.nodes_init])
self.operations(self.servers[:self.nodes_init])
if self.ddocs_num and not self.input.param('extra_verification', False):
self.create_ddocs_and_views()
for upgrade_version in self.upgrade_versions:
self.sleep(self.sleep_time, "Pre-setup of old version is done. Wait for upgrade to {0} version".\
format(upgrade_version))
for server in self.servers[:self.nodes_init]:
remote = RemoteMachineShellConnection(server)
remote.stop_server()
remote.disconnect()
self.sleep(self.sleep_time)
#remove data for nodes with non default data paths
tmp = min(num_nodes_with_not_default, num_nodes_remove_data)
self.delete_data(self.servers[:tmp], [data_path + "/*", index_path + "/*"])
#remove data for nodes with default data paths
self.delete_data(self.servers[tmp: max(tmp, num_nodes_remove_data)], ["/opt/couchbase/var/lib/couchbase/data/*"])
upgrade_threads = self._async_update(upgrade_version, self.servers[:self.nodes_init])
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
success_upgrade = True
while not self.queue.empty():
success_upgrade &= self.queue.get()
if not success_upgrade:
self.fail("Upgrade failed!")
self.dcp_rebalance_in_offline_upgrade_from_version2_to_version3()
self.sleep(self.expire_time)
for server in servers_with_not_default:
rest = RestConnection(server)
node = rest.get_nodes_self()
print node.storage[0]
self.assertEqual(node.storage[0].path.lower(), data_path.lower(),
"Server %s. Data path expected:%s, actual %s." % (
server.ip, data_path, node.storage[0].path))
self.assertEqual(node.storage[0].index_path.lower(), index_path.lower(),
"Server %s. Index path expected: %s, actual: %s." % (
server.ip, index_path, node.storage[0].index_path))
if num_nodes_remove_data:
for bucket in self.buckets:
if self.rest_helper.bucket_exists(bucket):
raise Exception("bucket: %s still exists" % bucket.name)
self.buckets = []
if self.input.param('extra_verification', False):
self.bucket_size = 100
self._create_sasl_buckets(self.master, 1)
self._create_standard_buckets(self.master, 1)
if self.ddocs_num:
self.create_ddocs_and_views()
gen_load = BlobGenerator('upgrade', 'upgrade-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", self.expire_time, flag=self.item_flag)
self.verification(self.servers[:self.nodes_init], check_items=not num_nodes_remove_data)
finally:
for server in servers_with_not_default:
server.data_path = old_paths[server.ip][0]
server.index_path = old_paths[server.ip][1]
示例6: RecoveryUseTransferTests
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import create_directory [as 别名]
class RecoveryUseTransferTests(TransferBaseTest):
def setUp(self):
self.times_teardown_called = 1
super(RecoveryUseTransferTests, self).setUp()
self.server_origin = self.servers[0]
self.server_recovery = self.servers[1]
self.shell = RemoteMachineShellConnection(self.server_origin)
info = self.shell.extract_remote_info()
self.os = info.type.lower()
def tearDown(self):
if not self.input.param("skip_cleanup", True):
if self.times_teardown_called > 1 :
if self.os == 'windows':
self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
else:
self.shell.delete_files(self.backup_location)
self.shell.disconnect()
del self.buckets
if self.input.param("skip_cleanup", True):
if self.case_number > 1 or self.times_teardown_called > 1:
if self.os == 'windows':
self.shell.delete_files("/cygdrive/c%s" % (self.backup_location))
else:
self.shell.delete_files(self.backup_location)
self.shell.disconnect()
del self.buckets
self.times_teardown_called += 1
super(RecoveryUseTransferTests, self).tearDown()
def recover_to_cbserver(self):
"""Recover data with 2.0 couchstore files to a 2.0 online server
We load a number of items to one node first and then do some mutation on these items.
Later we use cbtranfer to transfer the couchstore files we have on this
node to a new node. We verify the data by comparison between the items in KVStore
and items in the new node."""
self.load_data()
kvs_before = {}
bucket_names = []
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names.append(bucket.name)
if self.default_bucket:
self.cluster.create_default_bucket(self.server_recovery, self.bucket_size, self.num_replicas)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
self._create_sasl_buckets(self.server_recovery, self.sasl_buckets)
self._create_standard_buckets(self.server_recovery, self.standard_buckets)
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
transfer_source = "couchstore-files://%s" % (COUCHBASE_DATA_PATH)
transfer_destination = "http://%[email protected]%s:%s -b %s -B %s -v -v -v" % (self.couchbase_login_info,
self.server_recovery.ip,
self.server_recovery.port,
bucket.name, bucket.name)
self.shell.execute_cbtransfer(transfer_source, transfer_destination)
del kvs_before
time.sleep(self.expire_time + 1)
shell_server_recovery = RemoteMachineShellConnection(self.server_recovery)
for bucket in self.buckets:
shell_server_recovery.execute_cbepctl(bucket, "", "set flush_param", "exp_pager_stime", 5)
shell_server_recovery.disconnect()
time.sleep(30)
self._wait_for_stats_all_buckets([self.server_recovery])
self._verify_all_buckets(self.server_recovery, 1, self.wait_timeout * 50, self.max_verify, True, 1)
self._verify_stats_all_buckets([self.server_recovery])
def recover_to_backupdir(self):
"""Recover data with 2.0 couchstore files to a 2.0 backup diretory
We load a number of items to a node first and then do some mutataion on these items.
Later we use cbtransfer to transfer the couchstore files we have on this node to
a backup directory. We use cbrestore to restore these backup files to the same node
for verification."""
self.load_data()
kvs_before = {}
bucket_names = []
self.shell.delete_files(self.backup_location)
self.shell.create_directory(self.backup_location)
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names.append(bucket.name)
transfer_source = "-v -v -v couchstore-files://%s" % (COUCHBASE_DATA_PATH)
transfer_destination = self.backup_location
self.shell.execute_cbtransfer(transfer_source, transfer_destination)
self._all_buckets_delete(self.server_origin)
if self.default_bucket:
self.cluster.create_default_bucket(self.server_origin, self.bucket_size, self.num_replicas)
self.buckets.append(Bucket(name="default", authType="sasl", saslPassword="", num_replicas=self.num_replicas, bucket_size=self.bucket_size))
#.........这里部分代码省略.........
示例7: QueriesUpgradeTests
# 需要导入模块: from remote.remote_util import RemoteMachineShellConnection [as 别名]
# 或者: from remote.remote_util.RemoteMachineShellConnection import create_directory [as 别名]
class QueriesUpgradeTests(QueryTests, NewUpgradeBaseTest):
def setUp(self):
super(QueriesUpgradeTests, self).setUp()
if self._testMethodName == 'suite_setUp':
return
self.log.info("============== QueriesUpgradeTests setup has started ==============")
# general setup
self.feature = self.input.param('feature', None)
self.upgrade_type = self.input.param('upgrade_type', None)
self._all_buckets_flush()
self.load(self.gens_load, flag=self.item_flag)
self.bucket_doc_map = {"default": 2016, "standard_bucket0": 2016}
self.bucket_status_map = {"default": "healthy", "standard_bucket0": "healthy"}
# feature specific setup
if self.feature == "ansi-joins":
self.rest.load_sample("travel-sample")
self.bucket_doc_map["travel-sample"] = 31591
self.bucket_status_map["travel-sample"] = "healthy"
if self.feature == "backfill":
self.directory_path = self.input.param("directory_path", "/opt/couchbase/var/lib/couchbase/tmp")
self.create_directory = self.input.param("create_directory", True)
self.tmp_size = self.input.param("tmp_size", 5120)
self.nonint_size = self.input.param("nonint_size", False)
self.out_of_range_size = self.input.param("out_of_range_size", False)
self.set_backfill_directory = self.input.param("set_backfill_directory", True)
self.change_directory = self.input.param("change_directory", False)
self.reset_settings = self.input.param("reset_settings", False)
self.curl_url = "http://%s:%s/settings/querySettings" % (self.master.ip, self.master.port)
if self.feature == "xattrs":
self.system_xattr_data = []
self.user_xattr_data = []
self.meta_ids = []
if self.feature == "curl-whitelist":
self.google_error_msg = "Errorevaluatingprojection.-cause:URLendpointisn'twhitelisted" \
"https://maps.googleapis.com/maps/api/geocode/json."
self.jira_error_msg ="Errorevaluatingprojection.-cause:URLendpointisn'twhitelistedhttps://jira.atlassian." \
"com/rest/api/latest/issue/JRA-9.PleasemakesuretowhitelisttheURLontheUI."
self.cbqpath = '%scbq' % self.path + " -e %s:%s -q -u %s -p %s" \
% (self.master.ip, self.n1ql_port, self.rest.username, self.rest.password)
if self.feature == "auditing":
self.audit_codes = [28672, 28673, 28674, 28675, 28676, 28677, 28678, 28679, 28680, 28681,
28682, 28683, 28684, 28685, 28686, 28687, 28688]
self.unauditedID = self.input.param("unauditedID", "")
self.audit_url = "http://%s:%s/settings/audit" % (self.master.ip, self.master.port)
self.filter = self.input.param("filter", False)
self.log.info("============== QueriesUpgradeTests setup has completed ==============")
def suite_setUp(self):
super(QueriesUpgradeTests, self).suite_setUp()
self.log.info("============== QueriesUpgradeTests suite_setup has started ==============")
self.log.info("============== QueriesUpgradeTests suite_setup has completed ==============")
def tearDown(self):
self.log.info("============== QueriesUpgradeTests tearDown has started ==============")
self.upgrade_servers = self.servers
self.log.info("============== QueriesUpgradeTests tearDown has completed ==============")
super(QueriesUpgradeTests, self).tearDown()
def suite_tearDown(self):
self.log.info("============== QueriesUpgradeTests suite_tearDown has started ==============")
self.log.info("============== QueriesUpgradeTests suite_tearDown has completed ==============")
super(QueriesUpgradeTests, self).suite_tearDown()
# old test
def test_mixed_cluster(self):
self._kill_all_processes_cbq()
self.assertTrue(len(self.servers) > 1, 'Test needs more than 1 server')
method_name = self.input.param('to_run', 'test_all_negative')
self._install(self.servers[:2])
self.bucket_size = 100
self._bucket_creation()
self.load(self.gens_load, flag=self.item_flag)
upgrade_threads = self._async_update(self.upgrade_versions[0], [self.servers[1]], None, True)
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
self.cluster.rebalance(self.servers[:1], self.servers[1:2], [])
self.shell = RemoteMachineShellConnection(self.servers[1])
self._kill_all_processes_cbq()
self._start_command_line_query(self.servers[1])
self.shell.execute_command("ps -aef| grep cbq-engine")
self.master = self.servers[1]
getattr(self, method_name)()
for th in threading.enumerate():
th._Thread__stop() if th != threading.current_thread() else None
# old test
def test_upgrade_old(self):
self._kill_all_processes_cbq()
method_name = self.input.param('to_run', 'test_any')
self._install(self.servers[:2])
self.bucket_size = 100
self._bucket_creation()
self.load(self.gens_load, flag=self.item_flag)
self.cluster.rebalance(self.servers[:1], self.servers[1:2], [])
upgrade_threads = self._async_update(self.upgrade_versions[0], self.servers[:2])
for upgrade_thread in upgrade_threads:
upgrade_thread.join()
#.........这里部分代码省略.........