本文整理汇总了Python中ovs.extensions.generic.sshclient.SSHClient.dir_delete方法的典型用法代码示例。如果您正苦于以下问题:Python SSHClient.dir_delete方法的具体用法?Python SSHClient.dir_delete怎么用?Python SSHClient.dir_delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ovs.extensions.generic.sshclient.SSHClient
的用法示例。
在下文中一共展示了SSHClient.dir_delete方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def setUp(cls):
for node in TestArakoonInstaller.nodes:
client = SSHClient(node)
client.dir_delete('/tmp/db')
client.dir_delete('/tmp/cfg')
client.dir_create('/tmp/db')
client.dir_create('/tmp/cfg')
示例2: delete_volume
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def delete_volume(vdisk, vpool, loop_device=None, root_client=None, wait=True):
"""
Delete a volume
:param vdisk: Virtual disk to delete
:param vpool: vPool which hosts the Virtual Disk
:param loop_device: Loop device where volume is mounted on
:param root_client: SSHClient object
:param wait: Wait for the volume to be deleted from model
:return: None
"""
location = GeneralVDisk.get_filesystem_location(vpool=vpool,
vdisk_name=vdisk.name)
if root_client is None:
root_client = SSHClient('127.0.0.1', username='root')
if loop_device is not None:
root_client.run('umount /dev/{0}'.format(loop_device))
root_client.run('losetup -d /dev/{0}'.format(loop_device))
root_client.dir_delete('/mnt/{0}'.format(loop_device))
root_client.file_delete(location)
if wait is True:
counter = 0
timeout = 60
volume_name = os.path.basename(location).replace('-flat.vmdk', '').replace('.raw', '')
while True and counter < timeout:
time.sleep(1)
vdisks = GeneralVDisk.get_vdisk_by_name(name=volume_name)
if vdisks is None:
break
counter += 1
if counter == timeout:
raise RuntimeError('Disk {0} was not deleted from model after {1} seconds'.format(volume_name, timeout))
示例3: setUp
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def setUp(cls):
for node in TestArakoonInstaller.nodes:
client = SSHClient(node)
root_client = SSHClient(node, username='root')
root_client.dir_delete('/tmp/db')
root_client.dir_create('/tmp/db')
client.dir_delete(TestArakoonInstaller.cluster_config_path)
client.dir_create(TestArakoonInstaller.cluster_config_path)
示例4: extend_cluster
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def extend_cluster(master_ip, new_ip, cluster_name):
"""
Extends a cluster to a given new node
:param cluster_name: Name of the cluster to be extended
:param new_ip: IP address of the node to be added
:param master_ip: IP of one of the already existing nodes
"""
logger.debug('Extending cluster "{0}" from {1} to {2}'.format(cluster_name, master_ip, new_ip))
client = SSHClient(master_ip, username='root')
if not EtcdInstaller._is_healty(cluster_name, client):
raise RuntimeError('Cluster "{0}" unhealthy, aborting extend'.format(cluster_name))
cluster_members = client.run('etcdctl member list').splitlines()
for cluster_member in cluster_members:
if EtcdInstaller.SERVER_URL.format(new_ip) in cluster_member:
logger.info('Node {0} already member of etcd cluster'.format(new_ip))
return
current_cluster = []
for item in client.run('etcdctl member list').splitlines():
info = re.search(EtcdInstaller.MEMBER_REGEX, item).groupdict()
current_cluster.append('{0}={1}'.format(info['name'], info['peer']))
client = SSHClient(new_ip, username='root')
node_name = System.get_my_machine_id(client)
current_cluster.append('{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(new_ip)))
data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
EtcdInstaller.stop(cluster_name, client) # Stop a possible proxy service
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(new_ip),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(new_ip),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
'INITIAL_CLUSTER': ','.join(current_cluster),
'INITIAL_STATE': 'existing',
'INITIAL_PEERS': ''},
target_name=target_name)
master_client = SSHClient(master_ip, username='root')
master_client.run('etcdctl member add {0} {1}'.format(node_name, EtcdInstaller.SERVER_URL.format(new_ip)))
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client)
logger.debug('Extending cluster "{0}" from {1} to {2} completed'.format(cluster_name, master_ip, new_ip))
示例5: remove_mds_service
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def remove_mds_service(mds_service, vpool, reload_config):
"""
Removes an MDS service
:param mds_service: The MDS service to remove
:param vpool: The vPool for which the MDS service will be removed
:param reload_config: If True, the volumedriver's updated configuration will be reloaded
"""
if len(mds_service.vdisks_guids) > 0:
raise RuntimeError('Cannot remove MDSService that is still serving disks')
storagerouter = mds_service.service.storagerouter
client = SSHClient(storagerouter)
mdsservice_type = ServiceTypeList.get_by_name('MetadataServer')
# Clean up model
directories_to_clean = []
for sd_partition in mds_service.storagedriver_partitions:
directories_to_clean.append(sd_partition.path)
sd_partition.delete()
mds_service.delete()
mds_service.service.delete()
# Generate new mds_nodes section
mds_nodes = []
for service in mdsservice_type.services:
if service.storagerouter_guid == storagerouter.guid:
mds_service = service.mds_service
if mds_service.vpool_guid == vpool.guid:
sdp = [sd_partition.path for sd_partition in mds_service.storagedriver_partitions if sd_partition.role == DiskPartition.ROLES.DB]
mds_nodes.append({'host': service.storagerouter.ip,
'port': service.ports[0],
'db_directory': sdp[0],
'scratch_directory': sdp[0]})
# Generate the correct section in the Storage Driver's configuration
storagedriver_config = StorageDriverConfiguration('storagedriver', vpool.name)
storagedriver_config.load(client)
storagedriver_config.clean() # Clean out obsolete values
storagedriver_config.configure_metadata_server(mds_nodes=mds_nodes)
storagedriver_config.save(client, reload_config=reload_config)
tries = 5
while tries > 0:
try:
root_client = SSHClient(storagerouter, username='root')
root_client.dir_delete(directories=directories_to_clean,
follow_symlinks=True)
for dir_name in directories_to_clean:
logger.debug('Recursively removed {0}'.format(dir_name))
break
except Exception:
time.sleep(5)
logger.debug('Waiting for the MDS service to go down...')
tries -= 1
if tries == 0:
raise
示例6: create_cluster
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def create_cluster(cluster_name, ip, server_port=DEFAULT_SERVER_PORT, client_port=DEFAULT_CLIENT_PORT):
"""
Creates a cluster
:param cluster_name: Name of the cluster
:type cluster_name: str
:param ip: IP address of the first node of the new cluster
:type ip: str
:param server_port: Port to be used by server
:type server_port: int
:param client_port: Port to be used by client
:type client_port: int
:return: None
"""
EtcdInstaller._logger.debug('Creating cluster "{0}" on {1}'.format(cluster_name, ip))
client = SSHClient(ip, username='root')
target_name = 'ovs-etcd-{0}'.format(cluster_name)
if ServiceManager.has_service(target_name, client) and ServiceManager.get_service_status(target_name, client) is True:
EtcdInstaller._logger.info('Service {0} already configured and running'.format(target_name))
return
node_name = System.get_my_machine_id(client)
data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(ip, server_port),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(ip, client_port),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1', client_port),
'INITIAL_CLUSTER': '{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(ip, server_port)),
'INITIAL_STATE': 'new',
'INITIAL_PEERS': '-initial-advertise-peer-urls {0}'.format(EtcdInstaller.SERVER_URL.format(ip, server_port))},
target_name=target_name)
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client, client_port=client_port)
EtcdInstaller._logger.debug('Creating cluster "{0}" on {1} completed'.format(cluster_name, ip))
示例7: remove_proxy
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def remove_proxy(cluster_name, ip):
"""
Remove a proxy
:param cluster_name: Name of cluster
:type cluster_name: str
:param ip: IP of the node on which to remove the proxy
:type ip: str
:return: None
"""
root_client = SSHClient(ip, username='root')
EtcdInstaller.stop(cluster_name=cluster_name, client=root_client)
EtcdInstaller.remove(cluster_name=cluster_name, client=root_client)
data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
root_client.dir_delete([wal_dir, data_dir])
示例8: _destroy_node
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def _destroy_node(config, node):
"""
Cleans up a single node (remove services, directories and configuration files)
"""
logger.debug("Destroy node {0} in cluster {1}".format(node.ip, config.cluster_id))
# Removes services for a cluster on a given node
ovs_client = SSHClient(node.ip)
root_client = SSHClient(node.ip, username="root")
ArakoonInstaller.stop(config.cluster_id, client=root_client)
ArakoonInstaller.remove(config.cluster_id, client=root_client)
# Cleans all directories on a given node
root_client.dir_delete([node.log_dir, node.tlog_dir, node.home])
# Removes a configuration file from a node
config.delete_config(ovs_client)
logger.debug("Destroy node {0} in cluster {1} completed".format(node.ip, config.cluster_id))
示例9: _destroy_node
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def _destroy_node(config, node, delay_unregistration=False):
"""
Cleans up a single node (remove services, directories and configuration files)
"""
ArakoonInstaller._logger.debug('Destroy node {0} in cluster {1}'.format(node.ip, config.cluster_id))
# Removes services for a cluster on a given node
root_client = SSHClient(node.ip, username='root')
ArakoonInstaller.stop(config.cluster_id, client=root_client)
ArakoonInstaller.remove(config.cluster_id, client=root_client, delay_unregistration=delay_unregistration)
# Cleans all directories on a given node
abs_paths = {node.tlog_dir, node.home} # That's a set
if node.log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.log_sinks)))
if node.crash_log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.crash_log_sinks)))
root_client.dir_delete(list(abs_paths))
ArakoonInstaller._logger.debug('Destroy node {0} in cluster {1} completed'.format(node.ip, config.cluster_id))
示例10: create_cluster
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def create_cluster(cluster_name, ip):
"""
Creates a cluster
:param base_dir: Base directory that should contain the data
:param ip: IP address of the first node of the new cluster
:param cluster_name: Name of the cluster
"""
logger.debug('Creating cluster "{0}" on {1}'.format(cluster_name, ip))
client = SSHClient(ip, username='root')
node_name = System.get_my_machine_id(client)
data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(ip),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(ip),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
'INITIAL_CLUSTER': '{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(ip)),
'INITIAL_STATE': 'new',
'INITIAL_PEERS': '-initial-advertise-peer-urls {0}'.format(EtcdInstaller.SERVER_URL.format(ip))},
target_name=target_name)
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client)
logger.debug('Creating cluster "{0}" on {1} completed'.format(cluster_name, ip))
示例11: extend_cluster
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def extend_cluster(master_ip, new_ip, cluster_name, server_port=DEFAULT_SERVER_PORT, client_port=DEFAULT_CLIENT_PORT):
"""
Extends a cluster to a given new node
:param master_ip: IP of one of the already existing nodes
:type master_ip: str
:param new_ip: IP address of the node to be added
:type new_ip: str
:param cluster_name: Name of the cluster to be extended
:type cluster_name: str
:param server_port: Port to be used by server
:type server_port: int
:param client_port: Port to be used by client
:type client_port: int
"""
EtcdInstaller._logger.debug('Extending cluster "{0}" from {1} to {2}'.format(cluster_name, master_ip, new_ip))
master_client = SSHClient(master_ip, username='root')
if not EtcdInstaller._is_healty(cluster_name, master_client, client_port=client_port):
raise RuntimeError('Cluster "{0}" unhealthy, aborting extend'.format(cluster_name))
command = ['etcdctl', 'member', 'list']
new_server_url = EtcdInstaller.SERVER_URL.format(new_ip, server_port)
if client_port != EtcdInstaller.DEFAULT_CLIENT_PORT:
command = ['etcdctl', '--peers={0}:{1}'.format(master_ip, client_port), 'member', 'list']
cluster_members = master_client.run(command).splitlines()
for cluster_member in cluster_members:
if new_server_url in cluster_member:
EtcdInstaller._logger.info('Node {0} already member of etcd cluster'.format(new_ip))
return
current_cluster = []
for item in cluster_members:
info = re.search(EtcdInstaller.MEMBER_REGEX, item).groupdict()
current_cluster.append('{0}={1}'.format(info['name'], info['peer']))
new_client = SSHClient(new_ip, username='root')
node_name = System.get_my_machine_id(new_client)
current_cluster.append('{0}={1}'.format(node_name, new_server_url))
data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
abs_paths = [data_dir, wal_dir]
new_client.dir_delete(abs_paths)
new_client.dir_create(abs_paths)
new_client.dir_chmod(abs_paths, 0755, recursive=True)
new_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
EtcdInstaller.stop(cluster_name, new_client) # Stop a possible proxy service
params = ServiceManager.add_service(name=base_name,
client=new_client,
target_name=target_name,
delay_registration=True,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': new_server_url,
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(new_ip, client_port),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1', client_port),
'INITIAL_CLUSTER': ','.join(current_cluster),
'INITIAL_STATE': 'existing',
'INITIAL_PEERS': ''})
add_command = ['etcdctl', 'member', 'add', node_name, new_server_url]
if client_port != EtcdInstaller.DEFAULT_CLIENT_PORT:
add_command = ['etcdctl', '--peers={0}:{1}'.format(master_ip, client_port), 'member', 'add', node_name, new_server_url]
master_client.run(add_command)
EtcdInstaller.start(cluster_name, new_client)
EtcdInstaller.wait_for_cluster(cluster_name, new_client, client_port=client_port)
ServiceManager.register_service(service_metadata=params, node_name=node_name)
EtcdInstaller._logger.debug('Extending cluster "{0}" from {1} to {2} completed'.format(cluster_name, master_ip, new_ip))
示例12: migrate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
#.........这里部分代码省略.........
for ip in master_ips + extra_ips:
if EtcdInstaller.has_cluster(ip, 'config'):
cluster_ip = ip
break
node_ip = None
path = '/opt/OpenvStorage/config/ovs.json'
if os.path.exists(path):
with open(path) as config_file:
config = json.load(config_file)
node_ip = config['grid']['ip']
if node_ip is not None:
if cluster_ip is None:
EtcdInstaller.create_cluster('config', node_ip)
EtcdConfiguration.initialize()
EtcdConfiguration.initialize_host(host_id)
else:
EtcdInstaller.extend_cluster(cluster_ip, node_ip, 'config')
EtcdConfiguration.initialize_host(host_id)
etcd_migrate = True
if etcd_migrate is True:
# Migrating configuration files
path = '/opt/OpenvStorage/config/ovs.json'
if os.path.exists(path):
with open(path) as config_file:
config = json.load(config_file)
EtcdConfiguration.set('/ovs/framework/cluster_id', config['support']['cid'])
if not EtcdConfiguration.exists('/ovs/framework/install_time'):
EtcdConfiguration.set('/ovs/framework/install_time', config['core']['install_time'])
else:
EtcdConfiguration.set('/ovs/framework/install_time', min(EtcdConfiguration.get('/ovs/framework/install_time'), config['core']['install_time']))
EtcdConfiguration.set('/ovs/framework/registered', config['core']['registered'])
EtcdConfiguration.set('/ovs/framework/plugins/installed', config['plugins'])
EtcdConfiguration.set('/ovs/framework/stores', config['core']['storage'])
EtcdConfiguration.set('/ovs/framework/paths', {'cfgdir': config['core']['cfgdir'],
'basedir': config['core']['basedir'],
'ovsdb': config['core']['ovsdb']})
EtcdConfiguration.set('/ovs/framework/support', {'enablesupport': config['support']['enablesupport'],
'enabled': config['support']['enabled'],
'interval': config['support']['interval']})
EtcdConfiguration.set('/ovs/framework/storagedriver', {'mds_safety': config['storagedriver']['mds']['safety'],
'mds_tlogs': config['storagedriver']['mds']['tlogs'],
'mds_maxload': config['storagedriver']['mds']['maxload']})
EtcdConfiguration.set('/ovs/framework/webapps', {'html_endpoint': config['webapps']['html_endpoint'],
'oauth2': config['webapps']['oauth2']})
EtcdConfiguration.set('/ovs/framework/messagequeue', {'endpoints': [],
'protocol': config['core']['broker']['protocol'],
'user': config['core']['broker']['login'],
'port': config['core']['broker']['port'],
'password': config['core']['broker']['password'],
'queues': config['core']['broker']['queues']})
host_key = '/ovs/framework/hosts/{0}{{0}}'.format(host_id)
EtcdConfiguration.set(host_key.format('/storagedriver'), {'rsp': config['storagedriver']['rsp'],
'vmware_mode': config['storagedriver']['vmware_mode']})
EtcdConfiguration.set(host_key.format('/ports'), config['ports'])
EtcdConfiguration.set(host_key.format('/setupcompleted'), config['core']['setupcompleted'])
EtcdConfiguration.set(host_key.format('/versions'), config['core'].get('versions', {}))
EtcdConfiguration.set(host_key.format('/type'), config['core']['nodetype'])
EtcdConfiguration.set(host_key.format('/ip'), config['grid']['ip'])
path = '{0}/memcacheclient.cfg'.format(EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
if os.path.exists(path):
config = RawConfigParser()
config.read(path)
nodes = [config.get(node.strip(), 'location').strip()
for node in config.get('main', 'nodes').split(',')]
EtcdConfiguration.set('/ovs/framework/memcache|endpoints', nodes)
os.remove(path)
path = '{0}/rabbitmqclient.cfg'.format(EtcdConfiguration.get('/ovs/framework/paths|cfgdir'))
if os.path.exists(path):
config = RawConfigParser()
config.read(path)
nodes = [config.get(node.strip(), 'location').strip()
for node in config.get('main', 'nodes').split(',')]
EtcdConfiguration.set('/ovs/framework/messagequeue|endpoints', nodes)
os.remove(path)
# Migrate arakoon configuration files
from ovs.extensions.db.arakoon import ArakoonInstaller
reload(ArakoonInstaller)
from ovs.extensions.db.arakoon.ArakoonInstaller import ArakoonInstaller, ArakoonClusterConfig
from ovs.extensions.generic.sshclient import SSHClient
if master_ips is not None:
config_dir = '/opt/OpenvStorage/config/arakoon/'
for ip in master_ips:
client = SSHClient(ip)
if client.dir_exists(config_dir):
for cluster_name in client.dir_list(config_dir):
try:
with open('{0}/{1}/{1}.cfg'.format(config_dir, cluster_name)) as config_file:
EtcdConfiguration.set(ArakoonClusterConfig.ETCD_CONFIG_KEY.format(cluster_name),
config_file.read(),
raw=True)
ArakoonInstaller.deploy_cluster(cluster_name, ip)
except:
logger.exception('Error migrating {0} on {1}'.format(cluster_name, ip))
client.dir_delete(config_dir)
except:
logger.exception('Error migrating to version 4')
working_version = 4
return working_version
示例13: test_basic_logrotate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
#.........这里部分代码省略.........
assert_raises(excClass=CalledProcessError,
callableObj=root_client.run,
command='logrotate {0}'.format(custom_logrotate_cfg_file))
##########################################
# Test 1st logrotate configuration entry #
##########################################
root_client.dir_create(directories=custom_logrotate_dir)
root_client.dir_chown(directories=custom_logrotate_dir,
user='ovs',
group='ovs',
recursive=True)
root_client.run(command='touch {0}'.format(custom_logrotate_file1))
root_client.run(command='touch {0}'.format(custom_logrotate_file2))
root_client.file_chmod(filename=custom_logrotate_file1, mode=666)
root_client.file_chmod(filename=custom_logrotate_file2, mode=666)
# Write data to the file less than size for rotation and verify rotation
GeneralVDisk.write_to_volume(location=custom_logrotate_file1,
count=15,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=2,
msg='More files than expected present in {0}'.format(custom_logrotate_dir))
# Write data to file larger than size in configuration and verify amount of rotations
files_to_delete = []
for counter in range(7):
expected_file = '{0}.{1}.gz'.format(custom_logrotate_file1, counter + 1 if counter < 5 else 5)
GeneralVDisk.write_to_volume(location=custom_logrotate_file1,
count=30,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=counter + 3 if counter < 5 else 7,
msg='Not the expected amount of files present in {0}'.format(custom_logrotate_dir))
assert_true(expr=root_client.file_exists(filename=expected_file),
msg='Logrotate did not create the expected file {0}'.format(expected_file))
user_info = General.get_owner_group_for_path(path=expected_file,
root_client=root_client)
assert_equal(first='root',
second=user_info['user']['name'],
msg='Expected file to be owned by user "root", but instead its owned by "{0}"'.format(user_info['user']['name']))
assert_equal(first='root',
second=user_info['group']['name'],
msg='Expected file to be owned by group "root", but instead its owned by "{0}"'.format(user_info['group']['name']))
files_to_delete.append(expected_file)
root_client.file_delete(filenames=files_to_delete)
##########################################
# Test 2nd logrotate configuration entry #
##########################################
root_client.file_chown(filenames=custom_logrotate_file2,
user='ovs',
group='ovs')
# Write data to the file less than size for rotation and verify rotation
GeneralVDisk.write_to_volume(location=custom_logrotate_file2,
count=15,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=2,
msg='More files than expected present in {0}'.format(custom_logrotate_dir))
# Write data to file larger than size in configuration and verify amount of rotations
for counter in range(12):
if counter == 0: # Delaycompress --> file is not compressed during initial cycle
expected_file = '{0}.1'.format(custom_logrotate_file2)
else:
expected_file = '{0}.{1}.gz'.format(custom_logrotate_file2, counter + 1 if counter < 10 else 10)
GeneralVDisk.write_to_volume(location=custom_logrotate_file2,
count=30,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=counter + 3 if counter < 10 else 12,
msg='Not the expected amount of files present in {0}'.format(custom_logrotate_dir))
assert_true(expr=root_client.file_exists(filename=expected_file),
msg='Logrotate did not create the expected file {0}'.format(expected_file))
user_info = General.get_owner_group_for_path(path=expected_file,
root_client=root_client)
assert_equal(first='ovs',
second=user_info['user']['name'],
msg='Expected file to be owned by user "root", but instead its owned by "{0}"'.format(user_info['user']['name']))
assert_equal(first='ovs',
second=user_info['group']['name'],
msg='Expected file to be owned by group "root", but instead its owned by "{0}"'.format(user_info['group']['name']))
root_client.dir_delete(directories=custom_logrotate_dir)
root_client.file_delete(filenames=custom_logrotate_cfg_file)
示例14: remove_mds_service
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def remove_mds_service(mds_service, vpool, reconfigure, allow_offline=False):
"""
Removes an MDS service
:param mds_service: The MDS service to remove
:param vpool: The vPool for which the MDS service will be removed
:param reconfigure: Indicates whether reconfiguration is required
:param allow_offline: Indicates whether it's OK that the node for which mds services are cleaned is offline
"""
if len(mds_service.vdisks_guids) > 0 and allow_offline is False:
raise RuntimeError('Cannot remove MDSService that is still serving disks')
mdsservice_type = ServiceTypeList.get_by_name('MetadataServer')
# Clean up model
directories_to_clean = []
for sd_partition in mds_service.storagedriver_partitions:
directories_to_clean.append(sd_partition.path)
sd_partition.delete()
if allow_offline is True: # Certain vdisks might still be attached to this offline MDS service --> Delete relations
for junction in mds_service.vdisks:
junction.delete()
mds_service.delete()
mds_service.service.delete()
storagerouter = mds_service.service.storagerouter
try:
client = SSHClient(storagerouter)
if reconfigure is True:
# Generate new mds_nodes section
mds_nodes = []
for service in mdsservice_type.services:
if service.storagerouter_guid == storagerouter.guid:
mds_service = service.mds_service
if mds_service.vpool_guid == vpool.guid:
sdp = [sd_partition.path for sd_partition in mds_service.storagedriver_partitions if sd_partition.role == DiskPartition.ROLES.DB]
mds_nodes.append({'host': service.storagerouter.ip,
'port': service.ports[0],
'db_directory': sdp[0],
'scratch_directory': sdp[0]})
# Generate the correct section in the Storage Driver's configuration
storagedriver_config = StorageDriverConfiguration('storagedriver', vpool.name)
storagedriver_config.load(client)
storagedriver_config.clean() # Clean out obsolete values
storagedriver_config.configure_metadata_server(mds_nodes=mds_nodes)
storagedriver_config.save(client, reload_config=reconfigure)
tries = 5
while tries > 0:
try:
root_client = SSHClient(storagerouter, username='root')
root_client.dir_delete(directories=directories_to_clean,
follow_symlinks=True)
for dir_name in directories_to_clean:
logger.debug('Recursively removed {0}'.format(dir_name))
break
except Exception:
logger.debug('Waiting for the MDS service to go down...')
time.sleep(5)
tries -= 1
if tries == 0:
raise
except UnableToConnectException:
if allow_offline is True:
logger.info('Allowed offline node during mds service removal')
else:
raise
示例15: remove_mds_service
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_delete [as 别名]
def remove_mds_service(mds_service, vpool, reconfigure, allow_offline=False):
"""
Removes an MDS service
:param mds_service: The MDS service to remove
:type mds_service: MDSService
:param vpool: The vPool for which the MDS service will be removed
:type vpool: VPool
:param reconfigure: Indicates whether reconfiguration is required
:type reconfigure: bool
:param allow_offline: Indicates whether it's OK that the node for which mds services are cleaned is offline
:type allow_offline: bool
"""
if len(mds_service.vdisks_guids) > 0 and allow_offline is False:
raise RuntimeError("Cannot remove MDSService that is still serving disks")
mdsservice_type = ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.MD_SERVER)
# Clean up model
directories_to_clean = []
for sd_partition in mds_service.storagedriver_partitions:
directories_to_clean.append(sd_partition.path)
sd_partition.delete()
if (
allow_offline is True
): # Certain vdisks might still be attached to this offline MDS service --> Delete relations
for junction in mds_service.vdisks:
junction.delete()
mds_service.delete()
mds_service.service.delete()
storagerouter = mds_service.service.storagerouter
try:
client = SSHClient(storagerouter)
if reconfigure is True:
# Generate new mds_nodes section
mds_nodes = []
for service in mdsservice_type.services:
if service.storagerouter_guid == storagerouter.guid:
mds_service = service.mds_service
if mds_service.vpool_guid == vpool.guid:
sdp = [
sd_partition
for sd_partition in mds_service.storagedriver_partitions
if sd_partition.role == DiskPartition.ROLES.DB
and sd_partition.sub_role == StorageDriverPartition.SUBROLE.MDS
][0]
mds_nodes.append(
{
"host": service.storagerouter.ip,
"port": service.ports[0],
"db_directory": sdp.path,
"scratch_directory": sdp.path,
}
)
# Generate the correct section in the Storage Driver's configuration
storagedriver = [sd for sd in storagerouter.storagedrivers if sd.vpool_guid == vpool.guid][0]
storagedriver_config = StorageDriverConfiguration(
"storagedriver", vpool.guid, storagedriver.storagedriver_id
)
storagedriver_config.load()
storagedriver_config.configure_metadata_server(mds_nodes=mds_nodes)
storagedriver_config.save(client, reload_config=reconfigure)
tries = 5
while tries > 0:
try:
root_client = SSHClient(storagerouter, username="root")
root_client.dir_delete(directories=directories_to_clean, follow_symlinks=True)
for dir_name in directories_to_clean:
MDSServiceController._logger.debug("Recursively removed {0}".format(dir_name))
break
except Exception:
MDSServiceController._logger.debug("Waiting for the MDS service to go down...")
time.sleep(5)
tries -= 1
if tries == 0:
raise
except UnableToConnectException:
if allow_offline is True:
MDSServiceController._logger.info("Allowed offline node during mds service removal")
else:
raise