本文整理汇总了Python中ovs.extensions.generic.sshclient.SSHClient.dir_chown方法的典型用法代码示例。如果您正苦于以下问题:Python SSHClient.dir_chown方法的具体用法?Python SSHClient.dir_chown怎么用?Python SSHClient.dir_chown使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ovs.extensions.generic.sshclient.SSHClient
的用法示例。
在下文中一共展示了SSHClient.dir_chown方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _deploy
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def _deploy(config):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
logger.debug("Deploying cluster {0}".format(config.cluster_id))
for node in config.nodes:
logger.debug(" Deploying cluster {0} on {1}".format(config.cluster_id, node.ip))
ovs_client = SSHClient(node.ip)
root_client = SSHClient(node.ip, username="root")
# Distributes a configuration file to all its nodes
config.write_config(ovs_client)
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = [node.log_dir, node.tlog_dir, node.home]
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, "ovs", "ovs", recursive=True)
# Creates services for/on all nodes in the config
base_name = "ovs-arakoon"
target_name = "ovs-arakoon-{0}".format(config.cluster_id)
ServiceManager.prepare_template(base_name, target_name, ovs_client)
ServiceManager.add_service(target_name, root_client, params={"CLUSTER": config.cluster_id})
logger.debug(" Deploying cluster {0} on {1} completed".format(config.cluster_id, node.ip))
示例2: _deploy
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def _deploy(config, offline_nodes=None):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
ArakoonInstaller._logger.debug('Deploying cluster {0}'.format(config.cluster_id))
if offline_nodes is None:
offline_nodes = []
for node in config.nodes:
if node.ip in offline_nodes:
continue
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1}'.format(config.cluster_id, node.ip))
root_client = SSHClient(node.ip, username='root')
# Distributes a configuration file to all its nodes
config.write_config()
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = [node.log_dir, node.tlog_dir, node.home]
if not root_client.dir_exists(abs_paths):
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
# Creates services for/on all nodes in the config
base_name = 'ovs-arakoon'
target_name = 'ovs-arakoon-{0}'.format(config.cluster_id)
ServiceManager.add_service(base_name, root_client,
params={'CLUSTER': config.cluster_id,
'NODE_ID': node.name,
'CONFIG_PATH': ArakoonInstaller.ETCD_CONFIG_PATH.format(config.cluster_id)},
target_name=target_name)
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1} completed'.format(config.cluster_id, node.ip))
示例3: extend_cluster
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def extend_cluster(master_ip, new_ip, cluster_name):
"""
Extends a cluster to a given new node
:param cluster_name: Name of the cluster to be extended
:param new_ip: IP address of the node to be added
:param master_ip: IP of one of the already existing nodes
"""
logger.debug('Extending cluster "{0}" from {1} to {2}'.format(cluster_name, master_ip, new_ip))
client = SSHClient(master_ip, username='root')
if not EtcdInstaller._is_healty(cluster_name, client):
raise RuntimeError('Cluster "{0}" unhealthy, aborting extend'.format(cluster_name))
cluster_members = client.run('etcdctl member list').splitlines()
for cluster_member in cluster_members:
if EtcdInstaller.SERVER_URL.format(new_ip) in cluster_member:
logger.info('Node {0} already member of etcd cluster'.format(new_ip))
return
current_cluster = []
for item in client.run('etcdctl member list').splitlines():
info = re.search(EtcdInstaller.MEMBER_REGEX, item).groupdict()
current_cluster.append('{0}={1}'.format(info['name'], info['peer']))
client = SSHClient(new_ip, username='root')
node_name = System.get_my_machine_id(client)
current_cluster.append('{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(new_ip)))
data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
EtcdInstaller.stop(cluster_name, client) # Stop a possible proxy service
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(new_ip),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(new_ip),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
'INITIAL_CLUSTER': ','.join(current_cluster),
'INITIAL_STATE': 'existing',
'INITIAL_PEERS': ''},
target_name=target_name)
master_client = SSHClient(master_ip, username='root')
master_client.run('etcdctl member add {0} {1}'.format(node_name, EtcdInstaller.SERVER_URL.format(new_ip)))
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client)
logger.debug('Extending cluster "{0}" from {1} to {2} completed'.format(cluster_name, master_ip, new_ip))
示例4: _deploy
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def _deploy(config, filesystem, offline_nodes=None, plugins=None, delay_service_registration=False):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
if os.environ.get('RUNNING_UNITTESTS') == 'True':
if filesystem is True:
raise NotImplementedError('At this moment, there is no support for unit-testing filesystem backend Arakoon clusters')
ArakoonInstaller._logger.debug('Deploying cluster {0}'.format(config.cluster_id))
if offline_nodes is None:
offline_nodes = []
service_metadata = {}
for node in config.nodes:
if node.ip in offline_nodes:
continue
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1}'.format(config.cluster_id, node.ip))
root_client = SSHClient(node.ip, username='root')
# Distributes a configuration file to all its nodes
config.write_config(node.ip)
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = {node.tlog_dir, node.home} # That's a set
if node.log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.log_sinks)))
if node.crash_log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.crash_log_sinks)))
abs_paths = list(abs_paths)
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
# Creates services for/on all nodes in the config
if config.filesystem is True:
config_path = config.config_path
else:
config_path = Configuration.get_configuration_path(config.config_path)
extra_version_cmd = ''
if plugins is not None:
extra_version_cmd = ';'.join(plugins)
metadata = ServiceManager.add_service(name='ovs-arakoon',
client=root_client,
params={'CLUSTER': config.cluster_id,
'NODE_ID': node.name,
'CONFIG_PATH': config_path,
'EXTRA_VERSION_CMD': extra_version_cmd},
target_name='ovs-arakoon-{0}'.format(config.cluster_id),
startup_dependency=('ovs-watcher-config' if filesystem is False else None),
delay_registration=delay_service_registration)
service_metadata[node.ip] = metadata
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1} completed'.format(config.cluster_id, node.ip))
return service_metadata
示例5: create_cluster
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def create_cluster(cluster_name, ip, server_port=DEFAULT_SERVER_PORT, client_port=DEFAULT_CLIENT_PORT):
"""
Creates a cluster
:param cluster_name: Name of the cluster
:type cluster_name: str
:param ip: IP address of the first node of the new cluster
:type ip: str
:param server_port: Port to be used by server
:type server_port: int
:param client_port: Port to be used by client
:type client_port: int
:return: None
"""
EtcdInstaller._logger.debug('Creating cluster "{0}" on {1}'.format(cluster_name, ip))
client = SSHClient(ip, username='root')
target_name = 'ovs-etcd-{0}'.format(cluster_name)
if ServiceManager.has_service(target_name, client) and ServiceManager.get_service_status(target_name, client) is True:
EtcdInstaller._logger.info('Service {0} already configured and running'.format(target_name))
return
node_name = System.get_my_machine_id(client)
data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(ip, server_port),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(ip, client_port),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1', client_port),
'INITIAL_CLUSTER': '{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(ip, server_port)),
'INITIAL_STATE': 'new',
'INITIAL_PEERS': '-initial-advertise-peer-urls {0}'.format(EtcdInstaller.SERVER_URL.format(ip, server_port))},
target_name=target_name)
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client, client_port=client_port)
EtcdInstaller._logger.debug('Creating cluster "{0}" on {1} completed'.format(cluster_name, ip))
示例6: ovs_3671_validate_archiving_of_existing_arakoon_data_on_create_test
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def ovs_3671_validate_archiving_of_existing_arakoon_data_on_create_test():
"""
Validate arakoon archiving on extending a cluster with already existing data
"""
first_sr = GeneralStorageRouter.get_storage_routers()[0]
cluster_name = 'OVS_3671-single-node-cluster'
cluster_basedir = '/var/tmp'
root_client = SSHClient(first_sr, username='root')
for directory in ['/'.join([cluster_basedir, 'arakoon']), '/var/log/arakoon']:
root_client.dir_create(os.path.dirname(directory))
root_client.dir_chmod(os.path.dirname(directory), 0755, recursive=True)
root_client.dir_chown(os.path.dirname(directory), 'ovs', 'ovs', recursive=True)
files_to_create = ['/'.join([cluster_basedir, 'arakoon', cluster_name, 'db', 'one.db']),
'/'.join([cluster_basedir, 'arakoon', cluster_name, 'tlogs', 'one.tlog'])]
client = SSHClient(first_sr, username='ovs')
for filename in files_to_create:
client.dir_create(os.path.dirname(filename))
client.dir_chmod(os.path.dirname(filename), 0755, recursive=True)
client.dir_chown(os.path.dirname(filename), 'ovs', 'ovs', recursive=True)
client.file_create(files_to_create)
for filename in files_to_create:
assert client.file_exists(filename) is True, 'File {0} not present'.format(filename)
TestArakoon.logger.info('===================================================')
TestArakoon.logger.info('setup and validate single node cluster')
create_info = ArakoonInstaller.create_cluster(cluster_name, ServiceType.ARAKOON_CLUSTER_TYPES.FWK, first_sr.ip,
cluster_basedir, filesystem=False)
TestArakoon.logger.info('create_info: \n{0}'.format(create_info))
ArakoonInstaller.start_cluster(cluster_name, first_sr.ip, False)
ArakoonInstaller.claim_cluster(cluster_name, first_sr, False, metadata=create_info['metadata'])
TestArakoon.validate_arakoon_config_files([first_sr], cluster_name)
TestArakoon.verify_arakoon_structure(root_client, cluster_name, True, True)
for filename in files_to_create:
assert client.file_exists(filename) is False, 'File {0} is missing'.format(filename)
TestArakoon.logger.info('===================================================')
TestArakoon.logger.info('remove cluster')
ArakoonInstaller.delete_cluster(cluster_name, first_sr.ip, False)
for filename in files_to_create:
assert client.file_exists(filename) is False, 'File {0} is missing'.format(filename)
TestArakoon.verify_arakoon_structure(root_client, cluster_name, False, False)
示例7: _deploy
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def _deploy(config, filesystem, offline_nodes=None):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
if os.environ.get('RUNNING_UNITTESTS') == 'True':
if filesystem is True:
raise NotImplementedError('At this moment, there is no support for unittesting filesystem backend Arakoon clusters')
ArakoonInstaller._logger.debug('Deploying cluster {0}'.format(config.cluster_id))
if offline_nodes is None:
offline_nodes = []
for node in config.nodes:
if node.ip in offline_nodes:
continue
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1}'.format(config.cluster_id, node.ip))
root_client = SSHClient(node.ip, username='root')
# Distributes a configuration file to all its nodes
config.write_config(node.ip)
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = {node.tlog_dir, node.home} # That's a set
if node.log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.log_sinks)))
if node.crash_log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.crash_log_sinks)))
abs_paths = list(abs_paths)
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
# Creates services for/on all nodes in the config
if config.filesystem is True:
config_path = config.config_path
else:
config_path = Configuration.get_configuration_path(config.config_path)
base_name = 'ovs-arakoon'
target_name = 'ovs-arakoon-{0}'.format(config.cluster_id)
ServiceManager.add_service(base_name, root_client,
params={'CLUSTER': config.cluster_id,
'NODE_ID': node.name,
'CONFIG_PATH': config_path,
'STARTUP_DEPENDENCY': 'started ovs-watcher-config' if filesystem is False else '(local-filesystems and started networking)'},
target_name=target_name)
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1} completed'.format(config.cluster_id, node.ip))
示例8: create_cluster
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def create_cluster(cluster_name, ip):
"""
Creates a cluster
:param base_dir: Base directory that should contain the data
:param ip: IP address of the first node of the new cluster
:param cluster_name: Name of the cluster
"""
logger.debug('Creating cluster "{0}" on {1}'.format(cluster_name, ip))
client = SSHClient(ip, username='root')
node_name = System.get_my_machine_id(client)
data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(ip),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(ip),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
'INITIAL_CLUSTER': '{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(ip)),
'INITIAL_STATE': 'new',
'INITIAL_PEERS': '-initial-advertise-peer-urls {0}'.format(EtcdInstaller.SERVER_URL.format(ip))},
target_name=target_name)
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client)
logger.debug('Creating cluster "{0}" on {1} completed'.format(cluster_name, ip))
示例9: extend_cluster
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def extend_cluster(master_ip, new_ip, cluster_name, server_port=DEFAULT_SERVER_PORT, client_port=DEFAULT_CLIENT_PORT):
"""
Extends a cluster to a given new node
:param master_ip: IP of one of the already existing nodes
:type master_ip: str
:param new_ip: IP address of the node to be added
:type new_ip: str
:param cluster_name: Name of the cluster to be extended
:type cluster_name: str
:param server_port: Port to be used by server
:type server_port: int
:param client_port: Port to be used by client
:type client_port: int
"""
EtcdInstaller._logger.debug('Extending cluster "{0}" from {1} to {2}'.format(cluster_name, master_ip, new_ip))
master_client = SSHClient(master_ip, username='root')
if not EtcdInstaller._is_healty(cluster_name, master_client, client_port=client_port):
raise RuntimeError('Cluster "{0}" unhealthy, aborting extend'.format(cluster_name))
command = ['etcdctl', 'member', 'list']
new_server_url = EtcdInstaller.SERVER_URL.format(new_ip, server_port)
if client_port != EtcdInstaller.DEFAULT_CLIENT_PORT:
command = ['etcdctl', '--peers={0}:{1}'.format(master_ip, client_port), 'member', 'list']
cluster_members = master_client.run(command).splitlines()
for cluster_member in cluster_members:
if new_server_url in cluster_member:
EtcdInstaller._logger.info('Node {0} already member of etcd cluster'.format(new_ip))
return
current_cluster = []
for item in cluster_members:
info = re.search(EtcdInstaller.MEMBER_REGEX, item).groupdict()
current_cluster.append('{0}={1}'.format(info['name'], info['peer']))
new_client = SSHClient(new_ip, username='root')
node_name = System.get_my_machine_id(new_client)
current_cluster.append('{0}={1}'.format(node_name, new_server_url))
data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
abs_paths = [data_dir, wal_dir]
new_client.dir_delete(abs_paths)
new_client.dir_create(abs_paths)
new_client.dir_chmod(abs_paths, 0755, recursive=True)
new_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
EtcdInstaller.stop(cluster_name, new_client) # Stop a possible proxy service
params = ServiceManager.add_service(name=base_name,
client=new_client,
target_name=target_name,
delay_registration=True,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': new_server_url,
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(new_ip, client_port),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1', client_port),
'INITIAL_CLUSTER': ','.join(current_cluster),
'INITIAL_STATE': 'existing',
'INITIAL_PEERS': ''})
add_command = ['etcdctl', 'member', 'add', node_name, new_server_url]
if client_port != EtcdInstaller.DEFAULT_CLIENT_PORT:
add_command = ['etcdctl', '--peers={0}:{1}'.format(master_ip, client_port), 'member', 'add', node_name, new_server_url]
master_client.run(add_command)
EtcdInstaller.start(cluster_name, new_client)
EtcdInstaller.wait_for_cluster(cluster_name, new_client, client_port=client_port)
ServiceManager.register_service(service_metadata=params, node_name=node_name)
EtcdInstaller._logger.debug('Extending cluster "{0}" from {1} to {2} completed'.format(cluster_name, master_ip, new_ip))
示例10: migrate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
#.........这里部分代码省略.........
stat_dir = directory
while not rem.os.path.exists(stat_dir) and stat_dir != '/':
stat_dir = stat_dir.rsplit('/', 1)[0]
if not stat_dir:
stat_dir = '/'
inode = rem.os.stat(stat_dir).st_dev
if partition.inode == inode:
if role not in partition.roles:
partition.roles.append(role)
partition.save()
number = 0
migrated = False
for sd_partition in storagedriver.partitions:
if sd_partition.role == role and sd_partition.sub_role == subrole:
if sd_partition.mds_service == mds_service:
migrated = True
break
if sd_partition.partition_guid == partition.guid:
number = max(sd_partition.number, number)
if migrated is False:
sd_partition = StorageDriverPartition()
sd_partition.role = role
sd_partition.sub_role = subrole
sd_partition.partition = partition
sd_partition.storagedriver = storagedriver
sd_partition.mds_service = mds_service
sd_partition.size = None
sd_partition.number = number + 1
sd_partition.save()
client = SSHClient(storagedriver.storagerouter, username='root')
path = sd_partition.path.rsplit('/', 1)[0]
if path:
client.dir_create(path)
client.dir_chown(path, 'ovs', 'ovs')
client.dir_create(directory)
client.dir_chown(directory, 'ovs', 'ovs')
client.symlink({sd_partition.path: directory})
for storagedriver in StorageDriverList.get_storagedrivers():
migrated_objects = {}
for disk in storagedriver.storagerouter.disks:
for partition in disk.partitions:
# Process all mountpoints that are unique and don't have a specified size
for key, (role, sr_info) in {'mountpoint_md': (DiskPartition.ROLES.DB, {'metadata_{0}': StorageDriverPartition.SUBROLE.MD,
'tlogs_{0}': StorageDriverPartition.SUBROLE.TLOG}),
'mountpoint_fragmentcache': (DiskPartition.ROLES.WRITE, {'fcache_{0}': StorageDriverPartition.SUBROLE.FCACHE}),
'mountpoint_foc': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
'mountpoint_dtl': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
'mountpoint_readcaches': (DiskPartition.ROLES.READ, {'': None}),
'mountpoint_writecaches': (DiskPartition.ROLES.WRITE, {'sco_{0}': StorageDriverPartition.SUBROLE.SCO})}.iteritems():
if key in storagedriver._data:
is_list = isinstance(storagedriver._data[key], list)
entries = storagedriver._data[key][:] if is_list is True else [storagedriver._data[key]]
for entry in entries:
if not entry:
if is_list:
storagedriver._data[key].remove(entry)
if len(storagedriver._data[key]) == 0:
del storagedriver._data[key]
else:
del storagedriver._data[key]
else:
with remote(storagedriver.storagerouter.ip, [os], username='root') as rem:
inode = rem.os.stat(entry).st_dev
if partition.inode == inode:
示例11: ovs_3671_validate_archiving_of_existing_arakoon_data_on_create_and_extend_test
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def ovs_3671_validate_archiving_of_existing_arakoon_data_on_create_and_extend_test():
"""
Validate arakoon archiving when creating and extending an arakoon cluster
"""
storagerouters = GeneralStorageRouter.get_storage_routers()
storagerouters.sort(key=lambda k: k.ip)
if len(storagerouters) < 2:
TestArakoon.logger.info('Environment has only {0} node(s)'.format(len(storagerouters)))
return
cluster_name = 'OVS_3671-multi-node-cluster'
cluster_basedir = '/var/tmp'
archived_files = []
files_to_create = []
for index, sr in enumerate(storagerouters):
root_client = SSHClient(sr, username='root')
for directory in ['/'.join([cluster_basedir, 'arakoon']), '/var/log/arakoon']:
root_client.dir_create(os.path.dirname(directory))
root_client.dir_chmod(os.path.dirname(directory), 0755, recursive=True)
root_client.dir_chown(os.path.dirname(directory), 'ovs', 'ovs', recursive=True)
files_to_create = ['/'.join([cluster_basedir, 'arakoon', cluster_name, 'db', 'one.db']),
'/'.join([cluster_basedir, 'arakoon', cluster_name, 'tlogs', 'one.tlog'])]
client = SSHClient(sr, username='ovs')
for filename in files_to_create:
client.dir_create(os.path.dirname(filename))
client.dir_chmod(os.path.dirname(filename), 0755, recursive=True)
client.dir_chown(os.path.dirname(filename), 'ovs', 'ovs', recursive=True)
client.file_create(files_to_create)
for filename in files_to_create:
assert client.file_exists(filename) is True, 'File {0} not present'.format(filename)
archived_files = ['/'.join(['/var/log/arakoon', cluster_name, 'archive', 'one.log'])]
TestArakoon.logger.info('===================================================')
TestArakoon.logger.info('setup and validate single node cluster')
if index == 0:
create_info = ArakoonInstaller.create_cluster(cluster_name, ServiceType.ARAKOON_CLUSTER_TYPES.FWK,
sr.ip, cluster_basedir, filesystem=False)
TestArakoon.logger.info('create_info: \n{0}'.format(create_info))
ArakoonInstaller.start_cluster(cluster_name, sr.ip, False)
ArakoonInstaller.claim_cluster(cluster_name, sr, False, metadata=create_info['metadata'])
else:
ArakoonInstaller.extend_cluster(storagerouters[0].ip, sr.ip, cluster_name, cluster_basedir)
TestArakoon.validate_arakoon_config_files(storagerouters[:index + 1], cluster_name)
TestArakoon.verify_arakoon_structure(root_client, cluster_name, True, True)
TestArakoon.check_archived_directory(client, archived_files)
for filename in files_to_create:
assert client.file_exists(filename) is False, 'File {0} is missing'.format(filename)
TestArakoon.logger.info('===================================================')
TestArakoon.logger.info('remove cluster')
ArakoonInstaller.delete_cluster(cluster_name, storagerouters[0].ip, False)
for sr in storagerouters:
client = SSHClient(sr, username='ovs')
TestArakoon.check_archived_directory(client, archived_files)
for filename in files_to_create:
assert client.file_exists(filename) is False, 'File {0} is missing'.format(filename)
TestArakoon.verify_arakoon_structure(client, cluster_name, False, False)
示例12: test_basic_logrotate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
def test_basic_logrotate():
"""
Verify current openvstorage logrotate configuration
Apply the openvstorage logrotate on custom logfile and see if it rotates as predicted
Update ownership of custom file and verify logrotate raises issue
"""
storagerouters = GeneralStorageRouter.get_storage_routers()
logrotate_content = """{0} {{
rotate 5
size 20M
compress
copytruncate
notifempty
}}
{1} {{
su ovs ovs
rotate 10
size 19M
compress
delaycompress
notifempty
create 666 ovs ovs
postrotate
/usr/bin/pkill -SIGUSR1 arakoon
endscript
}}"""
if len(storagerouters) == 0:
raise ValueError('No Storage Routers found in the model')
logrotate_include_dir = '/etc/logrotate.d'
logrotate_cfg_file = '/etc/logrotate.conf'
logrotate_cron_file = '/etc/cron.daily/logrotate'
logrotate_ovs_file = '{0}/openvstorage-logs'.format(logrotate_include_dir)
expected_logrotate_content = logrotate_content.format('/var/log/ovs/*.log', '/var/log/arakoon/*/*.log')
# Verify basic logrotate configurations
for storagerouter in storagerouters:
root_client = SSHClient(endpoint=storagerouter, username='root')
assert_true(expr=root_client.file_exists(filename=logrotate_cfg_file),
msg='Logrotate config {0} does not exist on Storage Router {1}'.format(logrotate_cfg_file, storagerouter.name))
assert_true(expr=root_client.file_exists(filename=logrotate_ovs_file),
msg='Logrotate file {0} does not exist on Storage Router {1}'.format(logrotate_ovs_file, storagerouter.name))
assert_true(expr=root_client.file_exists(filename=logrotate_cron_file),
msg='Logrotate file {0} does not exist on Storage Router {1}'.format(logrotate_cron_file, storagerouter.name))
assert_true(expr='include {0}'.format(logrotate_include_dir) in root_client.file_read(filename=logrotate_cfg_file).splitlines(),
msg='Logrotate on Storage Router {0} does not include {1}'.format(storagerouter.name, logrotate_include_dir))
assert_true(expr='/usr/sbin/logrotate /etc/logrotate.conf' in root_client.file_read(filename=logrotate_cron_file).splitlines(),
msg='Logrotate will not be executed on Storage Router {0}'.format(storagerouter.name))
actual_file_contents = root_client.file_read(filename=logrotate_ovs_file).rstrip('\n')
assert_equal(first=expected_logrotate_content,
second=actual_file_contents,
msg='Logrotate contents does not match expected contents on Storage Router {0}'.format(storagerouter.name))
# Create custom logrotate file for testing purposes
custom_logrotate_cfg_file = '/opt/OpenvStorage/ci/logrotate-conf'
custom_logrotate_dir = '/opt/OpenvStorage/ci/logrotate'
custom_logrotate_file1 = '{0}/logrotate_test_file1.log'.format(custom_logrotate_dir)
custom_logrotate_file2 = '{0}/logrotate_test_file2.log'.format(custom_logrotate_dir)
custom_logrotate_content = logrotate_content.format(custom_logrotate_file1, custom_logrotate_file2)
local_sr = GeneralStorageRouter.get_local_storagerouter()
root_client = SSHClient(endpoint=local_sr, username='root')
root_client.file_write(filename=custom_logrotate_cfg_file, contents=custom_logrotate_content)
# No logfile present --> logrotate should fail
assert_raises(excClass=CalledProcessError,
callableObj=root_client.run,
command='logrotate {0}'.format(custom_logrotate_cfg_file))
##########################################
# Test 1st logrotate configuration entry #
##########################################
root_client.dir_create(directories=custom_logrotate_dir)
root_client.dir_chown(directories=custom_logrotate_dir,
user='ovs',
group='ovs',
recursive=True)
root_client.run(command='touch {0}'.format(custom_logrotate_file1))
root_client.run(command='touch {0}'.format(custom_logrotate_file2))
root_client.file_chmod(filename=custom_logrotate_file1, mode=666)
root_client.file_chmod(filename=custom_logrotate_file2, mode=666)
# Write data to the file less than size for rotation and verify rotation
GeneralVDisk.write_to_volume(location=custom_logrotate_file1,
count=15,
bs='1M',
input_type='zero',
root_client=root_client)
root_client.run('logrotate {0}'.format(custom_logrotate_cfg_file))
assert_equal(first=len(root_client.file_list(directory=custom_logrotate_dir)),
second=2,
msg='More files than expected present in {0}'.format(custom_logrotate_dir))
# Write data to file larger than size in configuration and verify amount of rotations
files_to_delete = []
for counter in range(7):
expected_file = '{0}.{1}.gz'.format(custom_logrotate_file1, counter + 1 if counter < 5 else 5)
GeneralVDisk.write_to_volume(location=custom_logrotate_file1,
count=30,
bs='1M',
#.........这里部分代码省略.........
示例13: migrate
# 需要导入模块: from ovs.extensions.generic.sshclient import SSHClient [as 别名]
# 或者: from ovs.extensions.generic.sshclient.SSHClient import dir_chown [as 别名]
#.........这里部分代码省略.........
stat_dir = directory
while not remote.os.path.exists(stat_dir) and stat_dir != '/':
stat_dir = stat_dir.rsplit('/', 1)[0]
if not stat_dir:
stat_dir = '/'
inode = remote.os.stat(stat_dir).st_dev
if partition.inode == inode:
if role not in partition.roles:
partition.roles.append(role)
partition.save()
number = 0
migrated = False
for sd_partition in storagedriver.partitions:
if sd_partition.role == role and sd_partition.sub_role == subrole:
if sd_partition.mds_service == mds_service:
migrated = True
break
if sd_partition.partition_guid == partition.guid:
number = max(sd_partition.number, number)
if migrated is False:
sd_partition = StorageDriverPartition()
sd_partition.role = role
sd_partition.sub_role = subrole
sd_partition.partition = partition
sd_partition.storagedriver = storagedriver
sd_partition.mds_service = mds_service
sd_partition.size = None
sd_partition.number = number + 1
sd_partition.save()
client = SSHClient(storagedriver.storagerouter, username='root')
path = sd_partition.path.rsplit('/', 1)[0]
if path:
client.dir_create(path)
client.dir_chown(path, 'ovs', 'ovs')
client.dir_create(directory)
client.dir_chown(directory, 'ovs', 'ovs')
client.symlink({sd_partition.path: directory})
for storagedriver in StorageDriverList.get_storagedrivers():
migrated_objects = {}
for disk in storagedriver.storagerouter.disks:
for partition in disk.partitions:
# Process all mountpoints that are unique and don't have a specified size
for key, (role, sr_info) in {'mountpoint_md': (DiskPartition.ROLES.DB, {'metadata_{0}': StorageDriverPartition.SUBROLE.MD,
'tlogs_{0}': StorageDriverPartition.SUBROLE.TLOG}),
'mountpoint_fragmentcache': (DiskPartition.ROLES.WRITE, {'fcache_{0}': StorageDriverPartition.SUBROLE.FCACHE}),
'mountpoint_foc': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
'mountpoint_dtl': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
'mountpoint_readcaches': (DiskPartition.ROLES.READ, {'': None}),
'mountpoint_writecaches': (DiskPartition.ROLES.WRITE, {'sco_{0}': StorageDriverPartition.SUBROLE.SCO})}.iteritems():
if key in storagedriver._data:
is_list = isinstance(storagedriver._data[key], list)
entries = storagedriver._data[key][:] if is_list is True else [storagedriver._data[key]]
for entry in entries:
if not entry:
if is_list:
storagedriver._data[key].remove(entry)
if len(storagedriver._data[key]) == 0:
del storagedriver._data[key]
else:
del storagedriver._data[key]
else:
with Remote(storagedriver.storagerouter.ip, [os], username='root') as remote:
inode = remote.os.stat(entry).st_dev
if partition.inode == inode: