本文整理汇总了Python中ovs.extensions.services.service.ServiceManager.add_service方法的典型用法代码示例。如果您正苦于以下问题:Python ServiceManager.add_service方法的具体用法?Python ServiceManager.add_service怎么用?Python ServiceManager.add_service使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ovs.extensions.services.service.ServiceManager
的用法示例。
在下文中一共展示了ServiceManager.add_service方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: add_services
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def add_services(client, node_type, logger):
"""
Add the services required by the OVS cluster
:param client: Client on which to add the services
:type client: ovs.extensions.generic.sshclient.SSHClient
:param node_type: Type of node ('master' or 'extra')
:type node_type: str
:param logger: Logger object used for logging
:type logger: ovs.log.log_handler.LogHandler
:return: None
"""
Toolbox.log(logger=logger, messages='Adding services')
services = {}
worker_queue = System.get_my_machine_id(client=client)
if node_type == 'master':
worker_queue += ',ovs_masters'
services.update({'memcached': {'MEMCACHE_NODE_IP': client.ip, 'WORKER_QUEUE': worker_queue},
'rabbitmq-server': {'MEMCACHE_NODE_IP': client.ip, 'WORKER_QUEUE': worker_queue},
'scheduled-tasks': {},
'webapp-api': {},
'volumerouter-consumer': {}})
services.update({'workers': {'WORKER_QUEUE': worker_queue},
'watcher-framework': {}})
for service_name, params in services.iteritems():
if not ServiceManager.has_service(service_name, client):
Toolbox.log(logger=logger, messages='Adding service {0}'.format(service_name))
ServiceManager.add_service(name=service_name, params=params, client=client)
示例2: _deploy
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def _deploy(config, offline_nodes=None):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
ArakoonInstaller._logger.debug('Deploying cluster {0}'.format(config.cluster_id))
if offline_nodes is None:
offline_nodes = []
for node in config.nodes:
if node.ip in offline_nodes:
continue
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1}'.format(config.cluster_id, node.ip))
root_client = SSHClient(node.ip, username='root')
# Distributes a configuration file to all its nodes
config.write_config()
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = [node.log_dir, node.tlog_dir, node.home]
if not root_client.dir_exists(abs_paths):
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
# Creates services for/on all nodes in the config
base_name = 'ovs-arakoon'
target_name = 'ovs-arakoon-{0}'.format(config.cluster_id)
ServiceManager.add_service(base_name, root_client,
params={'CLUSTER': config.cluster_id,
'NODE_ID': node.name,
'CONFIG_PATH': ArakoonInstaller.ETCD_CONFIG_PATH.format(config.cluster_id)},
target_name=target_name)
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1} completed'.format(config.cluster_id, node.ip))
示例3: _deploy
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def _deploy(config):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
logger.debug("Deploying cluster {0}".format(config.cluster_id))
for node in config.nodes:
logger.debug(" Deploying cluster {0} on {1}".format(config.cluster_id, node.ip))
ovs_client = SSHClient(node.ip)
root_client = SSHClient(node.ip, username="root")
# Distributes a configuration file to all its nodes
config.write_config(ovs_client)
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = [node.log_dir, node.tlog_dir, node.home]
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, "ovs", "ovs", recursive=True)
# Creates services for/on all nodes in the config
base_name = "ovs-arakoon"
target_name = "ovs-arakoon-{0}".format(config.cluster_id)
ServiceManager.prepare_template(base_name, target_name, ovs_client)
ServiceManager.add_service(target_name, root_client, params={"CLUSTER": config.cluster_id})
logger.debug(" Deploying cluster {0} on {1} completed".format(config.cluster_id, node.ip))
示例4: _setup_proxy
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def _setup_proxy(initial_cluster, slave_client, cluster_name, force=False):
base_name = 'ovs-etcd-proxy'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
if force is False and ServiceManager.has_service(target_name, slave_client) and \
ServiceManager.get_service_status(target_name, slave_client) is True:
logger.info('Service {0} already configured and running'.format(target_name))
return
EtcdInstaller.stop(cluster_name, slave_client)
data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
abs_paths = [data_dir, wal_dir]
slave_client.dir_delete(abs_paths)
slave_client.dir_create(data_dir)
slave_client.dir_chmod(data_dir, 0755, recursive=True)
slave_client.dir_chown(data_dir, 'ovs', 'ovs', recursive=True)
ServiceManager.add_service(base_name, slave_client,
params={'CLUSTER': cluster_name,
'DATA_DIR': data_dir,
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
'INITIAL_CLUSTER': initial_cluster},
target_name=target_name)
EtcdInstaller.start(cluster_name, slave_client)
EtcdInstaller.wait_for_cluster(cluster_name, slave_client)
示例5: extend_cluster
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def extend_cluster(master_ip, new_ip, cluster_name):
"""
Extends a cluster to a given new node
:param cluster_name: Name of the cluster to be extended
:param new_ip: IP address of the node to be added
:param master_ip: IP of one of the already existing nodes
"""
logger.debug('Extending cluster "{0}" from {1} to {2}'.format(cluster_name, master_ip, new_ip))
client = SSHClient(master_ip, username='root')
if not EtcdInstaller._is_healty(cluster_name, client):
raise RuntimeError('Cluster "{0}" unhealthy, aborting extend'.format(cluster_name))
cluster_members = client.run('etcdctl member list').splitlines()
for cluster_member in cluster_members:
if EtcdInstaller.SERVER_URL.format(new_ip) in cluster_member:
logger.info('Node {0} already member of etcd cluster'.format(new_ip))
return
current_cluster = []
for item in client.run('etcdctl member list').splitlines():
info = re.search(EtcdInstaller.MEMBER_REGEX, item).groupdict()
current_cluster.append('{0}={1}'.format(info['name'], info['peer']))
client = SSHClient(new_ip, username='root')
node_name = System.get_my_machine_id(client)
current_cluster.append('{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(new_ip)))
data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
EtcdInstaller.stop(cluster_name, client) # Stop a possible proxy service
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(new_ip),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(new_ip),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
'INITIAL_CLUSTER': ','.join(current_cluster),
'INITIAL_STATE': 'existing',
'INITIAL_PEERS': ''},
target_name=target_name)
master_client = SSHClient(master_ip, username='root')
master_client.run('etcdctl member add {0} {1}'.format(node_name, EtcdInstaller.SERVER_URL.format(new_ip)))
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client)
logger.debug('Extending cluster "{0}" from {1} to {2} completed'.format(cluster_name, master_ip, new_ip))
示例6: _enable_openstack_events_consumer
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def _enable_openstack_events_consumer(self):
"""
Enable service ovs-openstack-events-consumer
"""
from ovs.extensions.services.service import ServiceManager
service_name = 'ovs-openstack-events-consumer'
if not ServiceManager.has_service(service_name, self.client):
ServiceManager.add_service(service_name, self.client)
ServiceManager.enable_service(service_name, self.client)
ServiceManager.start_service(service_name, self.client)
示例7: _roll_out_dtl_services
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def _roll_out_dtl_services(vpool, storagerouters):
"""
Deploy and start the DTL service on all storagerouters
:param storagerouters: StorageRouters to deploy and start a DTL service on
:return: None
"""
service_name = 'dtl_{0}'.format(vpool.name)
for sr in storagerouters.values():
client = SSHClient(sr, 'root')
ServiceManager.add_service(name=service_name, client=client)
ServiceManager.start_service(name=service_name, client=client)
示例8: create_cluster
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def create_cluster(cluster_name, ip, server_port=DEFAULT_SERVER_PORT, client_port=DEFAULT_CLIENT_PORT):
"""
Creates a cluster
:param cluster_name: Name of the cluster
:type cluster_name: str
:param ip: IP address of the first node of the new cluster
:type ip: str
:param server_port: Port to be used by server
:type server_port: int
:param client_port: Port to be used by client
:type client_port: int
:return: None
"""
EtcdInstaller._logger.debug('Creating cluster "{0}" on {1}'.format(cluster_name, ip))
client = SSHClient(ip, username='root')
target_name = 'ovs-etcd-{0}'.format(cluster_name)
if ServiceManager.has_service(target_name, client) and ServiceManager.get_service_status(target_name, client) is True:
EtcdInstaller._logger.info('Service {0} already configured and running'.format(target_name))
return
node_name = System.get_my_machine_id(client)
data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(ip, server_port),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(ip, client_port),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1', client_port),
'INITIAL_CLUSTER': '{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(ip, server_port)),
'INITIAL_STATE': 'new',
'INITIAL_PEERS': '-initial-advertise-peer-urls {0}'.format(EtcdInstaller.SERVER_URL.format(ip, server_port))},
target_name=target_name)
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client, client_port=client_port)
EtcdInstaller._logger.debug('Creating cluster "{0}" on {1} completed'.format(cluster_name, ip))
示例9: _deploy
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def _deploy(config, filesystem, offline_nodes=None):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
if os.environ.get('RUNNING_UNITTESTS') == 'True':
if filesystem is True:
raise NotImplementedError('At this moment, there is no support for unittesting filesystem backend Arakoon clusters')
ArakoonInstaller._logger.debug('Deploying cluster {0}'.format(config.cluster_id))
if offline_nodes is None:
offline_nodes = []
for node in config.nodes:
if node.ip in offline_nodes:
continue
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1}'.format(config.cluster_id, node.ip))
root_client = SSHClient(node.ip, username='root')
# Distributes a configuration file to all its nodes
config.write_config(node.ip)
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = {node.tlog_dir, node.home} # That's a set
if node.log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.log_sinks)))
if node.crash_log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.crash_log_sinks)))
abs_paths = list(abs_paths)
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
# Creates services for/on all nodes in the config
if config.filesystem is True:
config_path = config.config_path
else:
config_path = Configuration.get_configuration_path(config.config_path)
base_name = 'ovs-arakoon'
target_name = 'ovs-arakoon-{0}'.format(config.cluster_id)
ServiceManager.add_service(base_name, root_client,
params={'CLUSTER': config.cluster_id,
'NODE_ID': node.name,
'CONFIG_PATH': config_path,
'STARTUP_DEPENDENCY': 'started ovs-watcher-config' if filesystem is False else '(local-filesystems and started networking)'},
target_name=target_name)
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1} completed'.format(config.cluster_id, node.ip))
示例10: _setup_proxy
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def _setup_proxy(initial_cluster, slave_client, cluster_name):
base_name = 'ovs-etcd-proxy'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
EtcdInstaller.stop(cluster_name, slave_client)
data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
abs_paths = [data_dir, wal_dir]
slave_client.dir_delete(abs_paths)
slave_client.dir_create(data_dir)
slave_client.dir_chmod(data_dir, 0755, recursive=True)
slave_client.dir_chown(data_dir, 'ovs', 'ovs', recursive=True)
ServiceManager.add_service(base_name, slave_client,
params={'CLUSTER': cluster_name,
'DATA_DIR': data_dir,
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
'INITIAL_CLUSTER': initial_cluster},
target_name=target_name)
EtcdInstaller.start(cluster_name, slave_client)
示例11: _deploy
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def _deploy(config, filesystem, offline_nodes=None, plugins=None, delay_service_registration=False):
"""
Deploys a complete cluster: Distributing the configuration files, creating directories and services
"""
if os.environ.get('RUNNING_UNITTESTS') == 'True':
if filesystem is True:
raise NotImplementedError('At this moment, there is no support for unit-testing filesystem backend Arakoon clusters')
ArakoonInstaller._logger.debug('Deploying cluster {0}'.format(config.cluster_id))
if offline_nodes is None:
offline_nodes = []
service_metadata = {}
for node in config.nodes:
if node.ip in offline_nodes:
continue
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1}'.format(config.cluster_id, node.ip))
root_client = SSHClient(node.ip, username='root')
# Distributes a configuration file to all its nodes
config.write_config(node.ip)
# Create dirs as root because mountpoint /mnt/cache1 is typically owned by root
abs_paths = {node.tlog_dir, node.home} # That's a set
if node.log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.log_sinks)))
if node.crash_log_sinks.startswith('/'):
abs_paths.add(os.path.dirname(os.path.abspath(node.crash_log_sinks)))
abs_paths = list(abs_paths)
root_client.dir_create(abs_paths)
root_client.dir_chmod(abs_paths, 0755, recursive=True)
root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
# Creates services for/on all nodes in the config
if config.filesystem is True:
config_path = config.config_path
else:
config_path = Configuration.get_configuration_path(config.config_path)
extra_version_cmd = ''
if plugins is not None:
extra_version_cmd = ';'.join(plugins)
metadata = ServiceManager.add_service(name='ovs-arakoon',
client=root_client,
params={'CLUSTER': config.cluster_id,
'NODE_ID': node.name,
'CONFIG_PATH': config_path,
'EXTRA_VERSION_CMD': extra_version_cmd},
target_name='ovs-arakoon-{0}'.format(config.cluster_id),
startup_dependency=('ovs-watcher-config' if filesystem is False else None),
delay_registration=delay_service_registration)
service_metadata[node.ip] = metadata
ArakoonInstaller._logger.debug(' Deploying cluster {0} on {1} completed'.format(config.cluster_id, node.ip))
return service_metadata
示例12: create_cluster
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def create_cluster(cluster_name, ip):
"""
Creates a cluster
:param base_dir: Base directory that should contain the data
:param ip: IP address of the first node of the new cluster
:param cluster_name: Name of the cluster
"""
logger.debug('Creating cluster "{0}" on {1}'.format(cluster_name, ip))
client = SSHClient(ip, username='root')
node_name = System.get_my_machine_id(client)
data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
abs_paths = [data_dir, wal_dir]
client.dir_delete(abs_paths)
client.dir_create(abs_paths)
client.dir_chmod(abs_paths, 0755, recursive=True)
client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
ServiceManager.add_service(base_name, client,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': EtcdInstaller.SERVER_URL.format(ip),
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(ip),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
'INITIAL_CLUSTER': '{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(ip)),
'INITIAL_STATE': 'new',
'INITIAL_PEERS': '-initial-advertise-peer-urls {0}'.format(EtcdInstaller.SERVER_URL.format(ip))},
target_name=target_name)
EtcdInstaller.start(cluster_name, client)
EtcdInstaller.wait_for_cluster(cluster_name, client)
logger.debug('Creating cluster "{0}" on {1} completed'.format(cluster_name, ip))
示例13: demote_node
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
#.........这里部分代码省略.........
if arakoon_metadata['internal'] is True:
Toolbox.log(logger=NodeTypeController._logger, messages='Restarting master node services')
remaining_nodes = ip_client_map.keys()[:]
if cluster_ip in remaining_nodes:
remaining_nodes.remove(cluster_ip)
PersistentFactory.store = None
VolatileFactory.store = None
for service in storagerouter.services:
if service.name == 'arakoon-ovsdb':
service.delete()
target_client = None
if storagerouter in offline_nodes:
if unconfigure_rabbitmq is True:
Toolbox.log(logger=NodeTypeController._logger, messages='Removing/unconfiguring offline RabbitMQ node')
client = ip_client_map[master_ip]
try:
client.run(['rabbitmqctl', 'forget_cluster_node', '[email protected]{0}'.format(storagerouter.name)])
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to forget RabbitMQ cluster node', ex], loglevel='exception')
else:
target_client = ip_client_map[cluster_ip]
if unconfigure_rabbitmq is True:
Toolbox.log(logger=NodeTypeController._logger, messages='Removing/unconfiguring RabbitMQ')
try:
if ServiceManager.has_service('rabbitmq-server', client=target_client):
Toolbox.change_service_state(target_client, 'rabbitmq-server', 'stop', NodeTypeController._logger)
target_client.run(['rabbitmq-server', '-detached'])
time.sleep(5)
target_client.run(['rabbitmqctl', 'stop_app'])
time.sleep(5)
target_client.run(['rabbitmqctl', 'reset'])
time.sleep(5)
target_client.run(['rabbitmqctl', 'stop'])
time.sleep(5)
target_client.file_unlink("/var/lib/rabbitmq/.erlang.cookie")
Toolbox.change_service_state(target_client, 'rabbitmq-server', 'stop', NodeTypeController._logger) # To be sure
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to remove/unconfigure RabbitMQ', ex], loglevel='exception')
Toolbox.log(logger=NodeTypeController._logger, messages='Stopping services')
services = ['memcached', 'rabbitmq-server']
if unconfigure_rabbitmq is False:
services.remove('rabbitmq-server')
if unconfigure_memcached is False:
services.remove('memcached')
for service in services:
if ServiceManager.has_service(service, client=target_client):
Toolbox.log(logger=NodeTypeController._logger, messages='Stopping service {0}'.format(service))
try:
Toolbox.change_service_state(target_client, service, 'stop', NodeTypeController._logger)
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to stop service'.format(service), ex], loglevel='exception')
Toolbox.log(logger=NodeTypeController._logger, messages='Removing services')
services = ['scheduled-tasks', 'webapp-api', 'volumerouter-consumer']
for service in services:
if ServiceManager.has_service(service, client=target_client):
Toolbox.log(logger=NodeTypeController._logger, messages='Removing service {0}'.format(service))
try:
Toolbox.change_service_state(target_client, service, 'stop', NodeTypeController._logger)
ServiceManager.remove_service(service, client=target_client)
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to remove service'.format(service), ex], loglevel='exception')
if ServiceManager.has_service('workers', client=target_client):
ServiceManager.add_service(name='workers',
client=target_client,
params={'WORKER_QUEUE': '{0}'.format(unique_id)})
try:
NodeTypeController._configure_amqp_to_volumedriver()
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to configure AMQP to Storage Driver', ex], loglevel='exception')
Toolbox.log(logger=NodeTypeController._logger, messages='Restarting services')
NodeTypeController.restart_framework_and_memcache_services(clients=ip_client_map, logger=NodeTypeController._logger, offline_node_ips=offline_node_ips)
if Toolbox.run_hooks(component='nodetype',
sub_component='demote',
logger=NodeTypeController._logger,
cluster_ip=cluster_ip,
master_ip=master_ip,
offline_node_ips=offline_node_ips):
Toolbox.log(logger=NodeTypeController._logger, messages='Restarting services')
NodeTypeController.restart_framework_and_memcache_services(clients=ip_client_map, logger=NodeTypeController._logger, offline_node_ips=offline_node_ips)
if storagerouter not in offline_nodes:
target_client = ip_client_map[cluster_ip]
node_name, _ = target_client.get_hostname()
if NodeTypeController.avahi_installed(client=target_client, logger=NodeTypeController._logger) is True:
NodeTypeController.configure_avahi(client=target_client, node_name=node_name, node_type='extra', logger=NodeTypeController._logger)
Configuration.set('/ovs/framework/hosts/{0}/type'.format(storagerouter.machine_id), 'EXTRA')
if target_client is not None and target_client.file_exists('/tmp/ovs_rollback'):
target_client.file_write('/tmp/ovs_rollback', 'rollback')
Toolbox.log(logger=NodeTypeController._logger, messages='Demote complete', title=True)
示例14: configure_host
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
#.........这里部分代码省略.........
nova_volume_file = '{0}/virt/libvirt/volume.py'.format(nova_base_path)
else:
nova_volume_file = '{0}/nova/virt/libvirt/volume.py'.format(self._driver_location)
if self._is_devstack is True:
nova_driver_file = '{0}/virt/libvirt/driver.py'.format(nova_base_path)
else:
nova_driver_file = '{0}/nova/virt/libvirt/driver.py'.format(self._driver_location)
self._logger.info(' Patching file {0}'.format(nova_volume_file))
file_contents = self.client.file_read(nova_volume_file)
if 'class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):' not in file_contents:
file_contents += '''
class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):
def __init__(self, connection):
super(LibvirtFileVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
conf = super(LibvirtFileVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
return conf
'''
self.client.file_write(nova_volume_file, file_contents)
self._logger.info(' Patching file {0}'.format(nova_driver_file))
file_contents = self.client.file_read(nova_driver_file)
if self._stack_version in ('liberty', 'mitaka'):
check_line = 'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver'
new_line = 'file=nova.virt.libvirt.volume.volume.LibvirtFileVolumeDriver'
else:
check_line = 'local=nova.virt.libvirt.volume.LibvirtVolumeDriver'
new_line = 'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver'
if new_line not in file_contents:
for line in file_contents.splitlines():
if check_line in line:
stripped_line = line.rstrip()
whitespaces = len(stripped_line) - len(stripped_line.lstrip())
new_line = "{0}'{1}',\n".format(' ' * whitespaces, new_line)
fc = file_contents[:file_contents.index(line)] + new_line + file_contents[file_contents.index(line):]
self.client.file_write(nova_driver_file, "".join(fc))
break
if os.path.exists(cinder_brick_initiator_file):
# fix brick/upload to glance
self._logger.info(' Patching file {0}'.format(cinder_brick_initiator_file))
if self._stack_version in ('liberty', 'mitaka', 'newton'):
self.client.run("""sed -i 's/elif protocol == LOCAL:/elif protocol in [LOCAL, "FILE"]:/g' {0}""".format(cinder_brick_initiator_file))
else:
self.client.run("""sed -i 's/elif protocol == "LOCAL":/elif protocol in ["LOCAL", "FILE"]:/g' {0}""".format(cinder_brick_initiator_file))
# 4. Configure messaging driver
self._logger.info(' - Configure messaging driver')
nova_messaging_driver = 'nova.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'
cinder_messaging_driver = 'cinder.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'
with remote(ip, [RawConfigParser, open], 'root') as rem:
for config_file, driver in {self._NOVA_CONF: nova_messaging_driver,
self._CINDER_CONF: cinder_messaging_driver}.iteritems():
changed = False
cfg = rem.RawConfigParser()
cfg.read([config_file])
if cfg.has_option("DEFAULT", "notification_driver"):
if cfg.get("DEFAULT", "notification_driver") != driver:
changed = True
cfg.set("DEFAULT", "notification_driver", driver)
else:
changed = True
cfg.set("DEFAULT", "notification_driver", driver)
if cfg.has_option("DEFAULT", "notification_topics"):
notification_topics = cfg.get("DEFAULT", "notification_topics").split(",")
if "notifications" not in notification_topics:
notification_topics.append("notifications")
changed = True
cfg.set("DEFAULT", "notification_topics", ",".join(notification_topics))
else:
changed = True
cfg.set("DEFAULT", "notification_topics", "notifications")
if config_file == self._NOVA_CONF:
for param, value in {'notify_on_any_change': 'True',
'notify_on_state_change': 'vm_and_task_state'}.iteritems():
if not cfg.has_option("DEFAULT", param):
changed = True
cfg.set("DEFAULT", param, value)
if changed is True:
with rem.open(config_file, "w") as fp:
cfg.write(fp)
# 5. Enable events consumer
self._logger.info(' - Enabling events consumer service')
service_name = 'openstack-events-consumer'
if not ServiceManager.has_service(service_name, self.client):
ServiceManager.add_service(service_name, self.client)
ServiceManager.enable_service(service_name, self.client)
ServiceManager.start_service(service_name, self.client)
示例15: extend_cluster
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import add_service [as 别名]
def extend_cluster(master_ip, new_ip, cluster_name, server_port=DEFAULT_SERVER_PORT, client_port=DEFAULT_CLIENT_PORT):
"""
Extends a cluster to a given new node
:param master_ip: IP of one of the already existing nodes
:type master_ip: str
:param new_ip: IP address of the node to be added
:type new_ip: str
:param cluster_name: Name of the cluster to be extended
:type cluster_name: str
:param server_port: Port to be used by server
:type server_port: int
:param client_port: Port to be used by client
:type client_port: int
"""
EtcdInstaller._logger.debug('Extending cluster "{0}" from {1} to {2}'.format(cluster_name, master_ip, new_ip))
master_client = SSHClient(master_ip, username='root')
if not EtcdInstaller._is_healty(cluster_name, master_client, client_port=client_port):
raise RuntimeError('Cluster "{0}" unhealthy, aborting extend'.format(cluster_name))
command = ['etcdctl', 'member', 'list']
new_server_url = EtcdInstaller.SERVER_URL.format(new_ip, server_port)
if client_port != EtcdInstaller.DEFAULT_CLIENT_PORT:
command = ['etcdctl', '--peers={0}:{1}'.format(master_ip, client_port), 'member', 'list']
cluster_members = master_client.run(command).splitlines()
for cluster_member in cluster_members:
if new_server_url in cluster_member:
EtcdInstaller._logger.info('Node {0} already member of etcd cluster'.format(new_ip))
return
current_cluster = []
for item in cluster_members:
info = re.search(EtcdInstaller.MEMBER_REGEX, item).groupdict()
current_cluster.append('{0}={1}'.format(info['name'], info['peer']))
new_client = SSHClient(new_ip, username='root')
node_name = System.get_my_machine_id(new_client)
current_cluster.append('{0}={1}'.format(node_name, new_server_url))
data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
abs_paths = [data_dir, wal_dir]
new_client.dir_delete(abs_paths)
new_client.dir_create(abs_paths)
new_client.dir_chmod(abs_paths, 0755, recursive=True)
new_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)
base_name = 'ovs-etcd'
target_name = 'ovs-etcd-{0}'.format(cluster_name)
EtcdInstaller.stop(cluster_name, new_client) # Stop a possible proxy service
params = ServiceManager.add_service(name=base_name,
client=new_client,
target_name=target_name,
delay_registration=True,
params={'CLUSTER': cluster_name,
'NODE_ID': node_name,
'DATA_DIR': data_dir,
'WAL_DIR': wal_dir,
'SERVER_URL': new_server_url,
'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(new_ip, client_port),
'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1', client_port),
'INITIAL_CLUSTER': ','.join(current_cluster),
'INITIAL_STATE': 'existing',
'INITIAL_PEERS': ''})
add_command = ['etcdctl', 'member', 'add', node_name, new_server_url]
if client_port != EtcdInstaller.DEFAULT_CLIENT_PORT:
add_command = ['etcdctl', '--peers={0}:{1}'.format(master_ip, client_port), 'member', 'add', node_name, new_server_url]
master_client.run(add_command)
EtcdInstaller.start(cluster_name, new_client)
EtcdInstaller.wait_for_cluster(cluster_name, new_client, client_port=client_port)
ServiceManager.register_service(service_metadata=params, node_name=node_name)
EtcdInstaller._logger.debug('Extending cluster "{0}" from {1} to {2} completed'.format(cluster_name, master_ip, new_ip))