本文整理汇总了Python中ovs.extensions.generic.sshclient.SSHClient.dir_exists方法的典型用法代码示例。如果您正苦于以下问题:Python SSHClient.dir_exists方法的具体用法?Python SSHClient.dir_exists怎么用?Python SSHClient.dir_exists使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ovs.extensions.generic.sshclient.SSHClient
的用法示例。
在下文中一共展示了SSHClient.dir_exists方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: archive_existing_arakoon_data
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def archive_existing_arakoon_data(ip, directory, top_dir, cluster_name):
"""
Copy existing arakoon data, when setting up a new arakoon cluster, to the side
:param ip: IP on which to check for existing data
:param directory: Directory to check for existence
:param top_dir: Top directory
:param cluster_name: Name of arakoon cluster
:return: None
"""
new_client = SSHClient(ip)
logger.debug('archive - check if {0} exists'.format(directory))
if new_client.dir_exists(directory):
logger.debug('archive - from {0}'.format(directory))
archive_dir = '/'.join([top_dir, 'archive', cluster_name])
if new_client.dir_exists(archive_dir + '/' + os.path.basename(directory)):
logger.debug('archive - from existing archive {0}'.format(archive_dir))
timestamp = time.strftime('%Y%m%d%H%M%S', time.gmtime())
new_archive_dir = archive_dir + '-' + timestamp
new_client.dir_create(new_archive_dir)
new_client.run('mv {0} {1}'.format(archive_dir, new_archive_dir))
logger.debug('archive - to new {0}'.format(new_archive_dir))
logger.debug('create archive dir: {0}'.format(archive_dir))
new_client.dir_create(archive_dir)
logger.debug('archive from {0} to {1}'.format(directory, archive_dir))
if cluster_name == os.path.basename(directory) and new_client.dir_list(directory):
new_client.run('mv {0}/* {1}'.format(directory, archive_dir))
else:
new_client.run('mv {0} {1}'.format(directory, archive_dir))
示例2: clean_leftover_arakoon_data
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def clean_leftover_arakoon_data(ip, directories):
"""
Delete existing arakoon data or copy to the side
Directories should be a dict with key the absolute paths and value a boolean indicating archive or delete
eg: {'/var/log/arakoon/ovsdb': True, --> Files under this directory will be archived
'/opt/OpenvStorage/db/arakoon/ovsdb/tlogs': False} --> Files under this directory will be deleted
:param ip: IP on which to check for existing data
:type ip: str
:param directories: Directories to archive or delete
:type directories: dict
:return: None
"""
root_client = SSHClient(ip, username='root')
# Verify whether all files to be archived have been released properly
open_file_errors = []
ArakoonInstaller._logger.debug('Cleanup old arakoon - Checking open files')
dirs_with_files = {}
for directory, archive in directories.iteritems():
ArakoonInstaller._logger.debug('Cleaning old arakoon - Checking directory {0}'.format(directory))
if root_client.dir_exists(directory):
ArakoonInstaller._logger.debug('Cleaning old arakoon - Directory {0} exists'.format(directory))
file_names = root_client.file_list(directory, abs_path=True, recursive=True)
if len(file_names) > 0:
ArakoonInstaller._logger.debug('Cleaning old arakoon - Files found in directory {0}'.format(directory))
dirs_with_files[directory] = {'files': file_names,
'archive': archive}
for file_name in file_names:
try:
open_files = root_client.run('lsof {0}'.format(file_name))
if open_files != '':
open_file_errors.append('Open file {0} detected in directory {1}'.format(os.path.basename(file_name), directory))
except CalledProcessError:
continue
if len(open_file_errors) > 0:
raise RuntimeError('\n - ' + '\n - '.join(open_file_errors))
for directory, info in dirs_with_files.iteritems():
if info['archive'] is True:
# Create zipped tar
ArakoonInstaller._logger.debug('Cleanup old arakoon - Start archiving directory {0}'.format(directory))
archive_dir = '{0}/archive'.format(directory)
if not root_client.dir_exists(archive_dir):
ArakoonInstaller._logger.debug('Cleanup old arakoon - Creating archive directory {0}'.format(archive_dir))
root_client.dir_create(archive_dir)
ArakoonInstaller._logger.debug('Cleanup old arakoon - Creating tar file')
tar_name = '{0}/{1}.tgz'.format(archive_dir, int(time.time()))
root_client.run('cd {0}; tar -cz -f {1} --exclude "archive" *'.format(directory, tar_name))
ArakoonInstaller._logger.debug('Cleanup old arakoon - Removing old files from {0}'.format(directory))
root_client.file_delete(info['files'])
示例3: _deploy
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def _deploy(config, offline_nodes=None):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
ArakoonInstaller._logger.debug('Deploying cluster {0}'.format(config.cluster_id))
if offline_nodes is None:
offline_nodes = []
for node in config.nodes:
if node.ip in offline_nodes:
continue
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1}'.format(config.cluster_id, node.ip))
root_client = SSHClient(node.ip, username='root')
# Distributes a configuration file to all its nodes
config.write_config()
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = [node.log_dir, node.tlog_dir, node.home]
if not root_client.dir_exists(abs_paths):
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
# Creates services for/on all nodes in the config
base_name = 'ovs-arakoon'
target_name = 'ovs-arakoon-{0}'.format(config.cluster_id)
ServiceManager.add_service(base_name, root_client,
params={'CLUSTER': config.cluster_id,
'NODE_ID': node.name,
'CONFIG_PATH': ArakoonInstaller.ETCD_CONFIG_PATH.format(config.cluster_id)},
target_name=target_name)
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1} completed'.format(config.cluster_id, node.ip))
示例4: migrate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def migrate(previous_version, master_ips=None, extra_ips=None):
"""
Migrates from any version to any version, running all migrations required
If previous_version is for example 0 and this script is at
verison 3 it will execute two steps:
- 1 > 2
- 2 > 3
:param previous_version: The previous version from which to start the migration.
:param master_ips: IP addresses of the MASTER nodes
:param extra_ips: IP addresses of the EXTRA nodes
"""
working_version = previous_version
# Version 1 introduced:
# - Flexible SSD layout
if working_version < 1:
from ovs.extensions.generic.configuration import Configuration
if Configuration.exists('ovs.arakoon'):
Configuration.delete('ovs.arakoon', remove_root=True)
Configuration.set('ovs.core.ovsdb', '/opt/OpenvStorage/db')
working_version = 1
# Version 2 introduced:
# - Registration
if working_version < 2:
import time
from ovs.extensions.generic.configuration import Configuration
if not Configuration.exists('ovs.core.registered'):
Configuration.set('ovs.core.registered', False)
Configuration.set('ovs.core.install_time', time.time())
working_version = 2
# Version 3 introduced:
# - New arakoon clients
if working_version < 3:
from ovs.extensions.db.arakoon import ArakoonInstaller
reload(ArakoonInstaller)
from ovs.extensions.db.arakoon import ArakoonInstaller
from ovs.extensions.generic.sshclient import SSHClient
from ovs.extensions.generic.configuration import Configuration
if master_ips is not None:
for ip in master_ips:
client = SSHClient(ip)
if client.dir_exists(ArakoonInstaller.ArakoonInstaller.ARAKOON_CONFIG_DIR):
for cluster_name in client.dir_list(ArakoonInstaller.ArakoonInstaller.ARAKOON_CONFIG_DIR):
try:
ArakoonInstaller.ArakoonInstaller.deploy_cluster(cluster_name, ip)
except:
pass
if Configuration.exists('ovs.core.storage.persistent'):
Configuration.set('ovs.core.storage.persistent', 'pyrakoon')
working_version = 3
return working_version
示例5: clean_leftover_arakoon_data
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def clean_leftover_arakoon_data(ip, directories):
"""
Delete existing arakoon data
:param ip: IP on which to check for existing data
:type ip: str
:param directories: Directories to delete
:type directories: list
:return: None
"""
if os.environ.get('RUNNING_UNITTESTS') == 'True':
return
root_client = SSHClient(ip, username='root')
# Verify whether all files to be archived have been released properly
open_file_errors = []
ArakoonInstaller._logger.debug('Cleanup old arakoon - Checking open files')
dirs_with_files = {}
for directory in directories:
ArakoonInstaller._logger.debug('Cleaning old arakoon - Checking directory {0}'.format(directory))
if root_client.dir_exists(directory):
ArakoonInstaller._logger.debug('Cleaning old arakoon - Directory {0} exists'.format(directory))
file_names = root_client.file_list(directory, abs_path=True, recursive=True)
if len(file_names) > 0:
ArakoonInstaller._logger.debug('Cleaning old arakoon - Files found in directory {0}'.format(directory))
dirs_with_files[directory] = file_names
for file_name in file_names:
try:
open_files = root_client.run(['lsof', file_name])
if open_files != '':
open_file_errors.append('Open file {0} detected in directory {1}'.format(os.path.basename(file_name), directory))
except CalledProcessError:
continue
if len(open_file_errors) > 0:
raise RuntimeError('\n - ' + '\n - '.join(open_file_errors))
for directory, info in dirs_with_files.iteritems():
ArakoonInstaller._logger.debug('Cleanup old arakoon - Removing old files from {0}'.format(directory))
root_client.file_delete(info)
示例6: get_owner_group_for_path
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def get_owner_group_for_path(path, root_client=None):
"""
Retrieve the owner and group name for the specified path
:param path: Path to retrieve information about
:param root_client: SSHClient object
:return: Owner and group information
"""
if root_client is None:
root_client = SSHClient(endpoint='127.0.0.1', username='root')
if not root_client.file_exists(filename=path) and not root_client.dir_exists(directory=path):
raise ValueError('The specified path is not a file nor a directory')
stat_info = os.stat(path)
uid = stat_info.st_uid
gid = stat_info.st_gid
user = pwd.getpwuid(uid)[0]
group = grp.getgrgid(gid)[0]
return {'user': {'id': uid,
'name': user},
'group': {'id': gid,
'name': group}}
示例7: check_vpool_cleanup
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def check_vpool_cleanup(vpool_info, storagerouters=None):
"""
Check if everything related to a vPool has been cleaned up on the storagerouters provided
vpool_info should be a dictionary containing:
- type
- guid
- files
- directories
- name (optional)
- vpool (optional)
If vpool is provided:
- storagerouters need to be provided, because on these Storage Routers, we check whether the vPool has been cleaned up
If name is provided:
- If storagerouters is NOT provided, all Storage Routers will be checked for a correct vPool removal
- If storagerouters is provided, only these Storage Routers will be checked for a correct vPool removal
:param vpool_info: Information about the vPool
:param storagerouters: Storage Routers to check if vPool has been cleaned up
:return: None
"""
for required_param in ['type', 'guid', 'files', 'directories']:
if required_param not in vpool_info:
raise ValueError('Incorrect vpool_info provided')
if 'vpool' in vpool_info and 'name' in vpool_info:
raise ValueError('vpool and name are mutually exclusive')
if 'vpool' not in vpool_info and 'name' not in vpool_info:
raise ValueError('Either vpool or vpool_name needs to be provided')
vpool = vpool_info.get('vpool')
vpool_name = vpool_info.get('name')
vpool_guid = vpool_info['guid']
vpool_type = vpool_info['type']
files = vpool_info['files']
directories = vpool_info['directories']
supported_backend_types = GeneralBackend.get_valid_backendtypes()
if vpool_type not in supported_backend_types:
raise ValueError('Unsupported Backend Type provided. Please choose from: {0}'.format(', '.join(supported_backend_types)))
if storagerouters is None:
storagerouters = GeneralStorageRouter.get_storage_routers()
if vpool_name is not None:
assert GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) is None, 'A vPool with name {0} still exists'.format(vpool_name)
# Prepare some fields to check
vpool_name = vpool.name if vpool else vpool_name
vpool_services = ['ovs-dtl_{0}'.format(vpool_name),
'ovs-volumedriver_{0}'.format(vpool_name)]
if vpool_type == 'alba':
vpool_services.append('ovs-albaproxy_{0}'.format(vpool_name))
# Check etcd
if vpool is None:
assert EtcdConfiguration.exists('/ovs/vpools/{0}'.format(vpool_guid), raw=True) is False, 'vPool config still found in etcd'
else:
remaining_sd_ids = set([storagedriver.storagedriver_id for storagedriver in vpool.storagedrivers])
current_sd_ids = set([item for item in EtcdConfiguration.list('/ovs/vpools/{0}/hosts'.format(vpool_guid))])
assert not remaining_sd_ids.difference(current_sd_ids), 'There are more storagedrivers modelled than present in etcd'
assert not current_sd_ids.difference(remaining_sd_ids), 'There are more storagedrivers in etcd than present in model'
# Perform checks on all storagerouters where vpool was removed
for storagerouter in storagerouters:
# Check management center
mgmt_center = GeneralManagementCenter.get_mgmt_center(pmachine=storagerouter.pmachine)
if mgmt_center is not None:
assert GeneralManagementCenter.is_host_configured(pmachine=storagerouter.pmachine) is False, 'Management Center is still configured on Storage Router {0}'.format(storagerouter.ip)
# Check MDS services
mds_services = GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.MD_SERVER)
assert len([mds_service for mds_service in mds_services if mds_service.storagerouter_guid == storagerouter.guid]) == 0, 'There are still MDS services present for Storage Router {0}'.format(storagerouter.ip)
# Check services
root_client = SSHClient(storagerouter, username='root')
for service in vpool_services:
if ServiceManager.has_service(service, client=root_client):
raise RuntimeError('Service {0} is still configured on Storage Router {1}'.format(service, storagerouter.ip))
# Check KVM vpool
if storagerouter.pmachine.hvtype == 'KVM':
vpool_overview = root_client.run('virsh pool-list --all').splitlines()
vpool_overview.pop(1)
vpool_overview.pop(0)
for vpool_info in vpool_overview:
kvm_vpool_name = vpool_info.split()[0].strip()
if vpool_name == kvm_vpool_name:
raise ValueError('vPool {0} is still defined on Storage Router {1}'.format(vpool_name, storagerouter.ip))
# Check file and directory existence
if storagerouter.guid not in directories:
raise ValueError('Could not find directory information for Storage Router {0}'.format(storagerouter.ip))
if storagerouter.guid not in files:
raise ValueError('Could not find file information for Storage Router {0}'.format(storagerouter.ip))
for directory in directories[storagerouter.guid]:
assert root_client.dir_exists(directory) is False, 'Directory {0} still exists on Storage Router {1}'.format(directory, storagerouter.ip)
for file_name in files[storagerouter.guid]:
assert root_client.file_exists(file_name) is False, 'File {0} still exists on Storage Router {1}'.format(file_name, storagerouter.ip)
# Look for errors in storagedriver log
for error_type in ['error', 'fatal']:
#.........这里部分代码省略.........
示例8: validate_vpool_sanity
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
#.........这里部分代码省略.........
'alba_connection_port': None,
'alba_connection_preset': None,
'alba_connection_timeout': 15,
'backend_type': u'{0}'.format(vpool.backend_type.code.upper())})
elif backend_type == 'distributed':
expected_config['backend_connection_manager'].update({'backend_type': u'LOCAL',
'local_connection_path': u'{0}'.format(generic_settings['distributed_mountpoint'])})
assert EtcdConfiguration.exists('/ovs/arakoon/voldrv/config', raw=True), 'Volumedriver arakoon does not exist'
# Do some verifications for all SDs
storage_ip = None
voldrv_config = GeneralArakoon.get_config('voldrv')
all_files = GeneralVPool.get_related_files(vpool=vpool)
all_directories = GeneralVPool.get_related_directories(vpool=vpool)
for storagedriver in vpool.storagedrivers:
storagerouter = storagedriver.storagerouter
root_client = SSHClient(storagerouter, username='root')
assert EtcdConfiguration.exists('/ovs/vpools/{0}/hosts/{1}/config'.format(vpool.guid, storagedriver.storagedriver_id), raw=True), 'vPool config not found in etcd'
current_config_sections = set([item for item in EtcdConfiguration.list('/ovs/vpools/{0}/hosts/{1}/config'.format(vpool.guid, storagedriver.storagedriver_id))])
assert not current_config_sections.difference(set(expected_config.keys())), 'New section appeared in the storage driver config in etcd'
assert not set(expected_config.keys()).difference(current_config_sections), 'Config section expected for storage driver, but not found in etcd'
for key, values in expected_config.iteritems():
current_config = EtcdConfiguration.get('/ovs/vpools/{0}/hosts/{1}/config/{2}'.format(vpool.guid, storagedriver.storagedriver_id, key))
assert set(current_config.keys()).union(set(values.keys())) == set(values.keys()), 'Not all expected keys match for key "{0}" on Storage Driver {1}'.format(key, storagedriver.name)
for sub_key, value in current_config.iteritems():
expected_value = values[sub_key]
if expected_value is None:
continue
assert value == expected_value, 'Key: {0} - Sub key: {1} - Value: {2} - Expected value: {3}'.format(key, sub_key, value, expected_value)
# Check services
if storagerouter.node_type == 'MASTER':
for service_name in vpool_services['all'] + vpool_services['master']:
if service_name == 'ovs-arakoon-voldrv' and GeneralStorageDriver.has_role(storagedriver, 'DB') is False:
continue
if ServiceManager.get_service_status(name=service_name,
client=root_client) is not True:
raise ValueError('Service {0} is not running on node {1}'.format(service_name, storagerouter.ip))
else:
for service_name in vpool_services['all'] + vpool_services['extra']:
if ServiceManager.get_service_status(name=service_name,
client=root_client) is not True:
raise ValueError('Service {0} is not running on node {1}'.format(service_name, storagerouter.ip))
# Check arakoon config
if not voldrv_config.has_section(storagerouter.machine_id):
raise ValueError('Voldrv arakoon cluster does not have section {0}'.format(storagerouter.machine_id))
# Basic SD checks
assert storagedriver.cluster_ip == storagerouter.ip, 'Incorrect cluster IP. Expected: {0} - Actual: {1}'.format(storagerouter.ip, storagedriver.cluster_ip)
assert storagedriver.mountpoint == '/mnt/{0}'.format(vpool.name), 'Incorrect mountpoint. Expected: {0} - Actual: {1}'.format(mountpoint, storagedriver.mountpoint)
if storage_ip is not None:
assert storagedriver.storage_ip == storage_ip, 'Incorrect storage IP. Expected: {0} - Actual: {1}'.format(storage_ip, storagedriver.storage_ip)
storage_ip = storagedriver.storage_ip
# Check required directories and files
if storagerouter.guid not in all_directories:
raise ValueError('Could not find directory information for Storage Router {0}'.format(storagerouter.ip))
if storagerouter.guid not in all_files:
raise ValueError('Could not find file information for Storage Router {0}'.format(storagerouter.ip))
for directory in all_directories[storagerouter.guid]:
if root_client.dir_exists(directory) is False:
raise ValueError('Directory {0} does not exist on Storage Router {1}'.format(directory, storagerouter.ip))
for file_name in all_files[storagerouter.guid]:
if root_client.file_exists(file_name) is False:
raise ValueError('File {0} does not exist on Storage Router {1}'.format(file_name, storagerouter.ip))
for partition in storagedriver.partitions:
if partition.role in sd_partitions and partition.sub_role in sd_partitions[partition.role]:
sd_partitions[partition.role].remove(partition.sub_role)
elif partition.role in sd_partitions and partition.sub_role is None:
sd_partitions[partition.role].remove('None')
# Verify vPool writeable
if storagerouter.pmachine.hvtype == 'VMWARE':
GeneralVPool.mount_vpool(vpool=vpool,
root_client=root_client)
vdisk = GeneralVDisk.create_volume(size=10,
vpool=vpool,
root_client=root_client)
GeneralVDisk.write_to_volume(vdisk=vdisk,
vpool=vpool,
root_client=root_client,
count=10,
bs='1M',
input_type='random')
GeneralVDisk.delete_volume(vdisk=vdisk,
vpool=vpool,
root_client=root_client)
for role, sub_roles in sd_partitions.iteritems():
for sub_role in sub_roles:
raise ValueError('Not a single Storage Driver found with partition role {0} and sub-role {1}'.format(role, sub_role))
示例9: migrate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
#.........这里部分代码省略.........
for directory in dirs:
ports_dir = '/'.join([root, directory, 'ports'])
if not rem.os.path.exists(ports_dir):
continue
for sub_root, sub_dirs, _ in rem.os.walk(ports_dir):
if sub_root != ports_dir:
continue
for sub_directory in sub_dirs:
state_file = '/'.join([sub_root, sub_directory, 'state'])
if rem.os.path.exists(state_file):
if 'ACTIVE' in client.run('cat {0}'.format(state_file)):
rdma_capable = True
storagerouter.rdma_capable = rdma_capable
change = True
if change is True:
storagerouter.save()
working_version = 5
# Version 6 introduced:
# - Distributed scrubbing
if working_version < 6:
from ovs.dal.hybrids.diskpartition import DiskPartition
from ovs.dal.lists.storagedriverlist import StorageDriverList
from ovs.extensions.generic.sshclient import SSHClient
for storage_driver in StorageDriverList.get_storagedrivers():
root_client = SSHClient(storage_driver.storagerouter, username='root')
for partition in storage_driver.partitions:
if partition.role == DiskPartition.ROLES.SCRUB:
old_path = partition.path
partition.sub_role = None
partition.save()
partition.invalidate_dynamics(['folder', 'path'])
if root_client.dir_exists(partition.path):
continue # New directory already exists
if '_mds_' in old_path:
if root_client.dir_exists(old_path):
root_client.symlink({partition.path: old_path})
if not root_client.dir_exists(partition.path):
root_client.dir_create(partition.path)
root_client.dir_chmod(partition.path, 0777)
working_version = 6
# Version 7 introduced:
# - vPool status
if working_version < 7:
from ovs.dal.hybrids import vpool
reload(vpool)
from ovs.dal.hybrids.vpool import VPool
from ovs.dal.lists.vpoollist import VPoolList
for _vpool in VPoolList.get_vpools():
vpool = VPool(_vpool.guid)
if hasattr(vpool, 'status') and vpool.status is None:
vpool.status = VPool.STATUSES.RUNNING
vpool.save()
working_version = 7
# Version 10 introduced:
# - Reverse indexes are stored in persistent store
# - Store more non-changing metadata on disk iso using a dynamic property
if working_version < 10:
from ovs.dal.helpers import HybridRunner, Descriptor
from ovs.dal.datalist import DataList
from ovs.extensions.storage.persistentfactory import PersistentFactory
示例10: migrate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def migrate(previous_version, master_ips=None, extra_ips=None):
"""
Migrates from any version to any version, running all migrations required
If previous_version is for example 0 and this script is at
verison 3 it will execute two steps:
- 1 > 2
- 2 > 3
:param previous_version: The previous version from which to start the migration.
:param master_ips: IP addresses of the MASTER nodes
:param extra_ips: IP addresses of the EXTRA nodes
"""
working_version = previous_version
# Version 1 introduced:
# - Flexible SSD layout
if working_version < 1:
try:
from ovs.extensions.generic.configuration import Configuration
if Configuration.exists('ovs.arakoon'):
Configuration.delete('ovs.arakoon', remove_root=True)
Configuration.set('ovs.core.ovsdb', '/opt/OpenvStorage/db')
except:
logger.exception('Error migrating to version 1')
working_version = 1
# Version 2 introduced:
# - Registration
if working_version < 2:
try:
import time
from ovs.extensions.generic.configuration import Configuration
if not Configuration.exists('ovs.core.registered'):
Configuration.set('ovs.core.registered', False)
Configuration.set('ovs.core.install_time', time.time())
except:
logger.exception('Error migrating to version 2')
working_version = 2
# Version 3 introduced:
# - New arakoon clients
if working_version < 3:
try:
from ovs.extensions.db.arakoon import ArakoonInstaller
reload(ArakoonInstaller)
from ovs.extensions.db.arakoon.ArakoonInstaller import ArakoonInstaller
from ovs.extensions.generic.sshclient import SSHClient
from ovs.extensions.generic.configuration import Configuration
if master_ips is not None:
for ip in master_ips:
client = SSHClient(ip)
if client.dir_exists(ArakoonInstaller.ARAKOON_CONFIG_DIR):
for cluster_name in client.dir_list(ArakoonInstaller.ARAKOON_CONFIG_DIR):
try:
ArakoonInstaller.deploy_cluster(cluster_name, ip)
except:
pass
if Configuration.exists('ovs.core.storage.persistent'):
Configuration.set('ovs.core.storage.persistent', 'pyrakoon')
except:
logger.exception('Error migrating to version 3')
working_version = 3
# Version 4 introduced:
# - Etcd
if working_version < 4:
try:
import os
import json
from ConfigParser import RawConfigParser
from ovs.extensions.db.etcd import installer
reload(installer)
from ovs.extensions.db.etcd.installer import EtcdInstaller
from ovs.extensions.db.etcd.configuration import EtcdConfiguration
from ovs.extensions.generic.system import System
host_id = System.get_my_machine_id()
etcd_migrate = False
if EtcdInstaller.has_cluster('127.0.0.1', 'config'):
etcd_migrate = True
else:
if master_ips is not None and extra_ips is not None:
cluster_ip = None
for ip in master_ips + extra_ips:
if EtcdInstaller.has_cluster(ip, 'config'):
cluster_ip = ip
break
node_ip = None
path = '/opt/OpenvStorage/config/ovs.json'
if os.path.exists(path):
with open(path) as config_file:
config = json.load(config_file)
node_ip = config['grid']['ip']
if node_ip is not None:
if cluster_ip is None:
EtcdInstaller.create_cluster('config', node_ip)
EtcdConfiguration.initialize()
EtcdConfiguration.initialize_host(host_id)
#.........这里部分代码省略.........
示例11: check_license_headers_test
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def check_license_headers_test():
"""
Check license headers
"""
license_header = re.compile('Copyright 201[4-9] iNuron NV')
license_to_check = ['',
'Licensed under the Apache License, Version 2.0 (the "License");',
'you may not use this file except in compliance with the License.',
'You may obtain a copy of the License at',
'',
' http://www.apache.org/licenses/LICENSE-2.0',
'',
'Unless required by applicable law or agreed to in writing, software',
'distributed under the License is distributed on an "AS IS" BASIS,',
'WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.',
'See the License for the specific language governing permissions and',
'limitations under the License.']
exclude_dirs = ['/opt/OpenvStorage/config/templates/cinder-unit-tests/',
'/opt/OpenvStorage/config/templates/cinder-volume-driver/',
'/opt/OpenvStorage/webapps/frontend/css/',
'/opt/OpenvStorage/webapps/frontend/lib/',
'/opt/OpenvStorage/ovs/extensions/db/arakoon/arakoon/arakoon/',
'/opt/OpenvStorage/ovs/extensions/db/arakoon/pyrakoon/pyrakoon/']
include_dirs = ['/opt/OpenvStorage/webapps/frontend/lib/ovs/']
exclude_files = ['/opt/OpenvStorage/ovs/extensions/generic/fakesleep.py']
include_files = ['/opt/OpenvStorage/webapps/frontend/css/ovs.css']
extension_comments_map = {'.py': ['#'],
'.js': ['//'],
'.html': ['<!--', '-->'],
'.css': ['/*', '*', '*/']}
storagerouters = GeneralStorageRouter.get_storage_routers()
files_with_diff_licenses = {}
for storagerouter in storagerouters:
root_client = SSHClient(storagerouter, username='root')
files_with_diff_licenses[storagerouter.guid] = []
for root_folder in ['/opt/OpenvStorage', '/opt/asd-manager']:
if not root_client.dir_exists(root_folder):
raise ValueError('Root folder {0} does not exist'.format(root_folder))
unfiltered_files = root_client.file_list(directory=root_folder,
abs_path=True,
recursive=True)
filtered_files = General.filter_files(files=unfiltered_files,
extensions=extension_comments_map.keys(),
exclude_dirs=exclude_dirs,
include_dirs=include_dirs,
exclude_files=exclude_files,
include_files=include_files)
for file_name in filtered_files:
# Read file
with open(file_name, 'r') as utf_file:
data = utf_file.read().decode("utf-8-sig").encode("utf-8")
lines_to_check = data.splitlines()
# Check relevant comment type for current file
comments = []
for extension, cmts in extension_comments_map.iteritems():
if file_name.endswith(extension):
comments = cmts
break
if len(comments) == 0:
raise ValueError('Something must have gone wrong filtering the files, because file {0} does not have a correct extension'.format(file_name))
# Search license header
index = 0
lic_header_found = False
for index, line in enumerate(lines_to_check):
for comment in comments:
line = line.replace(comment, '', 1)
if re.match(license_header, line.strip()):
lic_header_found = True
break
# License header not found, continuing
if lic_header_found is False:
files_with_diff_licenses[storagerouter.guid].append(file_name)
continue
# License header found, checking rest of license
index += 1
for license_line in license_to_check:
line_to_check = lines_to_check[index]
for comment in comments:
line_to_check = line_to_check.replace(comment, '', 1)
if license_line.strip() == line_to_check.strip():
index += 1
continue
files_with_diff_licenses[storagerouter.guid].append(file_name)
break
for storagerouter in storagerouters:
assert len(files_with_diff_licenses[storagerouter.guid]) == 0, 'Following files were found with different licenses:\n - {0}'.format('\n - '.join(files_with_diff_licenses[storagerouter.guid]))
示例12: check_vpool_cleanup
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
#.........这里部分代码省略.........
storagerouters = GeneralStorageRouter.get_storage_routers()
if vpool_name is not None:
assert (
GeneralVPool.get_vpool_by_name(vpool_name=vpool_name) is None
), "A vPool with name {0} still exists".format(vpool_name)
# Prepare some fields to check
vpool_name = vpool.name if vpool else vpool_name
vpool_services = ["ovs-dtl_{0}".format(vpool_name), "ovs-volumedriver_{0}".format(vpool_name)]
if vpool_type == "alba":
vpool_services.append("ovs-albaproxy_{0}".format(vpool_name))
# Check configuration
if vpool is None:
assert (
Configuration.exists("/ovs/vpools/{0}".format(vpool_guid), raw=True) is False
), "vPool config still found in etcd"
else:
remaining_sd_ids = set([storagedriver.storagedriver_id for storagedriver in vpool.storagedrivers])
current_sd_ids = set([item for item in Configuration.list("/ovs/vpools/{0}/hosts".format(vpool_guid))])
assert not remaining_sd_ids.difference(
current_sd_ids
), "There are more storagedrivers modelled than present in etcd"
assert not current_sd_ids.difference(
remaining_sd_ids
), "There are more storagedrivers in etcd than present in model"
# Perform checks on all storagerouters where vpool was removed
for storagerouter in storagerouters:
# Check MDS services
mds_services = GeneralService.get_services_by_name(ServiceType.SERVICE_TYPES.MD_SERVER)
assert (
len(
[
mds_service
for mds_service in mds_services
if mds_service.storagerouter_guid == storagerouter.guid
]
)
== 0
), "There are still MDS services present for Storage Router {0}".format(storagerouter.ip)
# Check services
root_client = SSHClient(storagerouter, username="root")
for service in vpool_services:
if ServiceManager.has_service(service, client=root_client):
raise RuntimeError(
"Service {0} is still configured on Storage Router {1}".format(service, storagerouter.ip)
)
# Check KVM vpool
if GeneralHypervisor.get_hypervisor_type() == "KVM":
vpool_overview = root_client.run(["virsh", "pool-list", "--all"]).splitlines()
vpool_overview.pop(1)
vpool_overview.pop(0)
for vpool_info in vpool_overview:
kvm_vpool_name = vpool_info.split()[0].strip()
if vpool_name == kvm_vpool_name:
raise ValueError(
"vPool {0} is still defined on Storage Router {1}".format(vpool_name, storagerouter.ip)
)
# Check file and directory existence
if storagerouter.guid not in directories:
raise ValueError("Could not find directory information for Storage Router {0}".format(storagerouter.ip))
if storagerouter.guid not in files:
raise ValueError("Could not find file information for Storage Router {0}".format(storagerouter.ip))
for directory in directories[storagerouter.guid]:
assert (
root_client.dir_exists(directory) is False
), "Directory {0} still exists on Storage Router {1}".format(directory, storagerouter.ip)
for file_name in files[storagerouter.guid]:
assert (
root_client.file_exists(file_name) is False
), "File {0} still exists on Storage Router {1}".format(file_name, storagerouter.ip)
# Look for errors in storagedriver log
for error_type in ["error", "fatal"]:
cmd = "cat -vet /var/log/ovs/volumedriver/{0}.log | tail -1000 | grep ' {1} '; echo true > /dev/null".format(
vpool_name, error_type
)
errors = []
for line in root_client.run(cmd, allow_insecure=True).splitlines():
if "HierarchicalArakoon" in line:
continue
errors.append(line)
if len(errors) > 0:
if error_type == "error":
print "Volumedriver log file contains errors on Storage Router {0}\n - {1}".format(
storagerouter.ip, "\n - ".join(errors)
)
else:
raise RuntimeError(
"Fatal errors found in volumedriver log file on Storage Router {0}\n - {1}".format(
storagerouter.ip, "\n - ".join(errors)
)
)
示例13: validate_vpool_sanity
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
#.........这里部分代码省略.........
# @todo: replace next lines with implementation defined in: http://jira.openvstorage.com/browse/OVS-4577
# current_config_sections = set([item for item in Configuration.list('/ovs/vpools/{0}/hosts/{1}/config'.format(vpool.guid, storagedriver.storagedriver_id))])
# assert not current_config_sections.difference(set(expected_config.keys())), 'New section appeared in the storage driver config in configuration'
# assert not set(expected_config.keys()).difference(current_config_sections), 'Config section expected for storage driver, but not found in configuration'
#
# for key, values in expected_config.iteritems():
# current_config = Configuration.get('/ovs/vpools/{0}/hosts/{1}/config/{2}'.format(vpool.guid, storagedriver.storagedriver_id, key))
# assert set(current_config.keys()).union(set(values.keys())) == set(values.keys()), 'Not all expected keys match for key "{0}" on Storage Driver {1}'.format(key, storagedriver.name)
#
# for sub_key, value in current_config.iteritems():
# expected_value = values[sub_key]
# if expected_value is None:
# continue
# assert value == expected_value, 'Key: {0} - Sub key: {1} - Value: {2} - Expected value: {3}'.format(key, sub_key, value, expected_value)
# Check services
if storagerouter.node_type == "MASTER":
for service_name in vpool_services["all"] + vpool_services["master"]:
if (
service_name == "ovs-arakoon-voldrv"
and GeneralStorageDriver.has_role(storagedriver, "DB") is False
):
continue
exitcode, output = ServiceManager.get_service_status(name=service_name, client=root_client)
if exitcode is not True:
raise ValueError(
"Service {0} is not running on node {1} - {2}".format(
service_name, storagerouter.ip, output
)
)
else:
for service_name in vpool_services["all"] + vpool_services["extra"]:
exitcode, output = ServiceManager.get_service_status(name=service_name, client=root_client)
if exitcode is not True:
raise ValueError(
"Service {0} is not running on node {1} - {2}".format(
service_name, storagerouter.ip, output
)
)
# Check arakoon config
if not voldrv_config.has_section(storagerouter.machine_id):
raise ValueError("Voldrv arakoon cluster does not have section {0}".format(storagerouter.machine_id))
# Basic SD checks
assert (
storagedriver.cluster_ip == storagerouter.ip
), "Incorrect cluster IP. Expected: {0} - Actual: {1}".format(storagerouter.ip, storagedriver.cluster_ip)
assert storagedriver.mountpoint == "/mnt/{0}".format(
vpool.name
), "Incorrect mountpoint. Expected: {0} - Actual: {1}".format(mountpoint, storagedriver.mountpoint)
if storage_ip is not None:
assert (
storagedriver.storage_ip == storage_ip
), "Incorrect storage IP. Expected: {0} - Actual: {1}".format(storage_ip, storagedriver.storage_ip)
storage_ip = storagedriver.storage_ip
# Check required directories and files
if storagerouter.guid not in all_directories:
raise ValueError("Could not find directory information for Storage Router {0}".format(storagerouter.ip))
if storagerouter.guid not in all_files:
raise ValueError("Could not find file information for Storage Router {0}".format(storagerouter.ip))
for directory in all_directories[storagerouter.guid]:
if root_client.dir_exists(directory) is False:
raise ValueError(
"Directory {0} does not exist on Storage Router {1}".format(directory, storagerouter.ip)
)
for file_name in all_files[storagerouter.guid]:
if root_client.file_exists(file_name) is False:
raise ValueError(
"File {0} does not exist on Storage Router {1}".format(file_name, storagerouter.ip)
)
# @TODO: check roles and sub_roles for all storagedrivers and not just once
for partition in storagedriver.partitions:
if partition.role in sd_partitions and partition.sub_role in sd_partitions[partition.role]:
sd_partitions[partition.role].remove(partition.sub_role)
elif (
partition.role in sd_partitions
and partition.sub_role is None
and len(sd_partitions[partition.role])
):
sd_partitions[partition.role].remove("None")
# Verify vPool writeable
if GeneralHypervisor.get_hypervisor_type() == "VMWARE":
GeneralVPool.mount_vpool(vpool=vpool, root_client=root_client)
vdisk = GeneralVDisk.create_volume(size=10, vpool=vpool, root_client=root_client)
GeneralVDisk.write_to_volume(
vdisk=vdisk, vpool=vpool, root_client=root_client, count=10, bs="1M", input_type="random"
)
GeneralVDisk.delete_volume(vdisk=vdisk, vpool=vpool, root_client=root_client)
for role, sub_roles in sd_partitions.iteritems():
for sub_role in sub_roles:
raise ValueError(
"Not a single Storage Driver found with partition role {0} and sub-role {1}".format(role, sub_role)
)
示例14: check_license_headers_test
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_exists [as 别名]
def check_license_headers_test():
"""
Check license headers
"""
license_header = re.compile('Copyright \(C\) 201[4-9] iNuron NV')
license_to_check = ["",
" This file is part of Open vStorage Open Source Edition (OSE),",
" as available from",
"",
" http://www.openvstorage.org and",
" http://www.openvstorage.com.",
"",
" This file is free software; you can redistribute it and/or modify it",
" under the terms of the GNU Affero General Public License v3 (GNU AGPLv3)",
" as published by the Free Software Foundation, in version 3 as it comes",
" in the LICENSE.txt file of the Open vStorage OSE distribution.",
"",
" Open vStorage is distributed in the hope that it will be useful,",
" but WITHOUT ANY WARRANTY of any kind."]
exclude_dirs = ['/opt/OpenvStorage/config/templates/cinder-unit-tests/',
'/opt/OpenvStorage/config/templates/cinder-volume-driver/',
'/opt/OpenvStorage/webapps/frontend/css/',
'/opt/OpenvStorage/webapps/frontend/lib/',
'/opt/OpenvStorage/ovs/extensions/db/arakoon/arakoon/arakoon/',
'/opt/OpenvStorage/ovs/extensions/db/arakoon/pyrakoon/pyrakoon/',
'/opt/asd-manager/source/tools/pyrakoon/pyrakoon/']
include_dirs = ['/opt/OpenvStorage/webapps/frontend/lib/ovs/']
exclude_files = ['/opt/OpenvStorage/ovs/extensions/generic/fakesleep.py']
include_files = ['/opt/OpenvStorage/webapps/frontend/css/ovs.css']
extension_comments_map = {'.py': ['#'],
'.sh': ['#'],
'.js': ['//'],
'.html': ['<!--', '-->'],
'.css': ['/*', '*', '*/']}
storagerouters = GeneralStorageRouter.get_storage_routers()
files_with_diff_licenses = {}
for storagerouter in storagerouters:
root_client = SSHClient(storagerouter, username='root')
files_with_diff_licenses[storagerouter.guid] = []
for root_folder in ['/opt/OpenvStorage', '/opt/asd-manager']:
if not root_client.dir_exists(root_folder):
raise ValueError('Root folder {0} does not exist'.format(root_folder))
unfiltered_files = root_client.file_list(directory=root_folder,
abs_path=True,
recursive=True)
filtered_files = General.filter_files(files=unfiltered_files,
extensions=extension_comments_map.keys(),
exclude_dirs=exclude_dirs,
include_dirs=include_dirs,
exclude_files=exclude_files,
include_files=include_files)
for file_name in filtered_files:
# Read file
with open(file_name, 'r') as utf_file:
data = utf_file.read().decode("utf-8-sig").encode("utf-8")
lines_to_check = data.splitlines()
# Check relevant comment type for current file
comments = []
for extension, cmts in extension_comments_map.iteritems():
if file_name.endswith(extension):
comments = cmts
break
if len(comments) == 0:
raise ValueError('Something must have gone wrong filtering the files, because file {0} does not have a correct extension'.format(file_name))
# Search license header
index = 0
lic_header_found = False
for index, line in enumerate(lines_to_check):
for comment in comments:
line = line.replace(comment, '', 1)
if re.match(license_header, line.strip()):
lic_header_found = True
break
# License header not found, continuing
if lic_header_found is False:
files_with_diff_licenses[storagerouter.guid].append(file_name)
continue
# License header found, checking rest of license
index += 1
for license_line in license_to_check:
line_to_check = lines_to_check[index]
for comment in comments:
line_to_check = line_to_check.replace(comment, '', 1)
if license_line.strip() == line_to_check.strip():
index += 1
else:
files_with_diff_licenses[storagerouter.guid].append(file_name)
break
for storagerouter in storagerouters:
assert len(files_with_diff_licenses[storagerouter.guid]) == 0, 'Following files were found with different licenses:\n - {0}'.format('\n - '.join(files_with_diff_licenses[storagerouter.guid]))