本文整理汇总了Python中ovs.extensions.services.service.ServiceManager.remove_service方法的典型用法代码示例。如果您正苦于以下问题:Python ServiceManager.remove_service方法的具体用法?Python ServiceManager.remove_service怎么用?Python ServiceManager.remove_service使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ovs.extensions.services.service.ServiceManager
的用法示例。
在下文中一共展示了ServiceManager.remove_service方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: remove_services
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def remove_services(client, node_type, logger):
"""
Remove all services managed by OVS
:param client: Client on which to remove the services
:type client: ovs.extensions.generic.sshclient.SSHClient
:param node_type: Type of node, can be 'master' or 'extra'
:type node_type: str
:param logger: Logger object used for logging
:type logger: ovs.log.log_handler.LogHandler
:return: None
"""
Toolbox.log(logger=logger, messages="Removing services")
stop_only = ["rabbitmq-server", "memcached"]
services = ["workers", "support-agent", "watcher-framework"]
if node_type == "master":
services += ["scheduled-tasks", "webapp-api", "volumerouter-consumer"]
if Toolbox.is_service_internally_managed(service="rabbitmq") is True:
services.append("rabbitmq-server")
if Toolbox.is_service_internally_managed(service="memcached") is True:
services.append("memcached")
for service in services:
if ServiceManager.has_service(service, client=client):
Toolbox.log(
logger=logger,
messages="{0} service {1}".format("Removing" if service not in stop_only else "Stopping", service),
)
ServiceManager.stop_service(service, client=client)
if service not in stop_only:
ServiceManager.remove_service(service, client=client)
示例2: on_demote
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def on_demote(cluster_ip, master_ip, offline_node_ips=None):
"""
Handles the demote for the StorageDrivers
:param cluster_ip: IP of the node to demote
:param master_ip: IP of the master node
:param offline_node_ips: IPs of nodes which are offline
"""
if offline_node_ips is None:
offline_node_ips = []
client = SSHClient(cluster_ip, username='root') if cluster_ip not in offline_node_ips else None
servicetype = ServiceTypeList.get_by_name('Arakoon')
current_service = None
remaining_ips = []
for service in servicetype.services:
if service.name == 'arakoon-voldrv':
if service.storagerouter.ip == cluster_ip:
current_service = service
elif service.storagerouter.ip not in offline_node_ips:
remaining_ips.append(service.storagerouter.ip)
if current_service is not None:
print '* Shrink StorageDriver cluster'
ArakoonInstaller.shrink_cluster(master_ip, cluster_ip, 'voldrv', offline_node_ips)
if client is not None and ServiceManager.has_service(current_service.name, client=client) is True:
ServiceManager.stop_service(current_service.name, client=client)
ServiceManager.remove_service(current_service.name, client=client)
ArakoonInstaller.restart_cluster_remove('voldrv', remaining_ips)
current_service.delete()
StorageDriverController._configure_arakoon_to_volumedriver(offline_node_ips)
示例3: on_demote
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def on_demote(cluster_ip, master_ip):
"""
Handles the demote for the StorageDrivers
:param cluster_ip: IP of the node to demote
:param master_ip: IP of the master node
"""
client = SSHClient(cluster_ip, username='root')
servicetype = ServiceTypeList.get_by_name('Arakoon')
current_service = None
remaining_ips = []
for service in servicetype.services:
if service.name == 'arakoon-voldrv':
if service.storagerouter.ip == cluster_ip:
current_service = service
else:
remaining_ips.append(service.storagerouter.ip)
if current_service is not None:
print '* Shrink StorageDriver cluster'
ArakoonInstaller.shrink_cluster(master_ip, cluster_ip, 'voldrv')
if ServiceManager.has_service(current_service.name, client=client) is True:
ServiceManager.stop_service(current_service.name, client=client)
ServiceManager.remove_service(current_service.name, client=client)
ArakoonInstaller.restart_cluster_remove('voldrv', remaining_ips)
current_service.delete()
for storagerouter in StorageRouterList.get_storagerouters():
ArakoonInstaller.deploy_to_slave(master_ip, storagerouter.ip, 'voldrv')
StorageDriverController._configure_arakoon_to_volumedriver()
示例4: remove
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def remove(cluster_name, client):
"""
Removes an etcd service
:param client: Client on which to remove the service
:param cluster_name: The name of the cluster service to remove
"""
if ServiceManager.has_service('etcd-{0}'.format(cluster_name), client=client) is True:
ServiceManager.remove_service('etcd-{0}'.format(cluster_name), client=client)
示例5: remove
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def remove(cluster_name, client):
"""
Removes an arakoon service
:param cluster_name: The name of the cluster service to remove
:type cluster_name: str
:param client: Client on which to remove the service
:type client: SSHClient
:return: None
"""
if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True:
ServiceManager.remove_service('arakoon-{0}'.format(cluster_name), client=client)
示例6: remove
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def remove(cluster_name, client, delay_unregistration=False):
"""
Removes an arakoon service
:param cluster_name: The name of the cluster service to remove
:type cluster_name: str
:param client: Client on which to remove the service
:type client: SSHClient
:param delay_unregistration: Un-register the service right away or not
:type delay_unregistration: bool
:return: None
"""
service_name = ArakoonInstaller.get_service_name_for_cluster(cluster_name=cluster_name)
if ServiceManager.has_service(name=service_name, client=client) is True:
ServiceManager.remove_service(name=service_name, client=client, delay_unregistration=delay_unregistration)
示例7: on_remove
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def on_remove(cluster_ip, complete_removal):
"""
Handles the StorageDriver removal part of a node
:param cluster_ip: IP of the node which is being removed from the cluster
:type cluster_ip: str
:param complete_removal: Unused for StorageDriver, used for AlbaController
:type complete_removal: bool
:return: None
"""
_ = complete_removal
service_name = 'watcher-volumedriver'
try:
client = SSHClient(endpoint=cluster_ip, username='root')
if ServiceManager.has_service(name=service_name, client=client):
ServiceManager.stop_service(name=service_name, client=client)
ServiceManager.remove_service(name=service_name, client=client)
except UnableToConnectException:
pass
示例8: on_demote
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def on_demote(cluster_ip, master_ip, offline_node_ips=None):
"""
Handles the demote for the StorageDrivers
:param cluster_ip: IP of the node to demote
:type cluster_ip: str
:param master_ip: IP of the master node
:type master_ip: str
:param offline_node_ips: IPs of nodes which are offline
:type offline_node_ips: list
:return: None
"""
_ = master_ip
if offline_node_ips is None:
offline_node_ips = []
client = SSHClient(cluster_ip, username='root') if cluster_ip not in offline_node_ips else None
servicetype = ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.ARAKOON)
current_service = None
remaining_ips = []
for service in servicetype.services:
if service.name == 'arakoon-voldrv' and service.is_internal is True: # Externally managed arakoon cluster service does not have storage router
if service.storagerouter.ip == cluster_ip:
current_service = service
elif service.storagerouter.ip not in offline_node_ips:
remaining_ips.append(service.storagerouter.ip)
if current_service is not None:
if len(remaining_ips) == 0:
raise RuntimeError('Could not find any remaining arakoon nodes for the voldrv cluster')
StorageDriverController._logger.debug('* Shrink StorageDriver cluster')
cluster_name = str(Configuration.get('/ovs/framework/arakoon_clusters|voldrv'))
ArakoonInstaller.shrink_cluster(deleted_node_ip=cluster_ip,
remaining_node_ip=remaining_ips[0],
cluster_name=cluster_name,
offline_nodes=offline_node_ips)
if client is not None and ServiceManager.has_service(current_service.name, client=client) is True:
ServiceManager.stop_service(current_service.name, client=client)
ServiceManager.remove_service(current_service.name, client=client)
ArakoonInstaller.restart_cluster_remove(cluster_name, remaining_ips, filesystem=False)
current_service.delete()
StorageDriverController._configure_arakoon_to_volumedriver(cluster_name=cluster_name)
示例9: unconfigure_host
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def unconfigure_host(self, ip):
if self._is_devstack is False and self._is_openstack is False or self._cinder_installed is False or self._nova_installed is False:
self._logger.warning('Unconfigure host: No OpenStack nor DevStack installation detected or Cinder and Nova plugins are not installed')
return
# 1. Remove driver code
self._logger.info('*** Unconfiguring host with IP {0} ***'.format(ip))
self._logger.info(' Removing driver code')
if self._is_devstack is True:
self.client.file_delete(self._devstack_driver)
else:
self.client.file_delete('{0}/cinder/volume/drivers/openvstorage.py'.format(self._driver_location))
# 2. Removing users from group
self._logger.info(' Removing users from group ovs')
for user in ['libvirt-qemu', 'stack'] if self._is_devstack is True else self._openstack_users:
self.client.run('deluser {0} ovs'.format(user))
# 3. Revert patches
self._logger.info(' Reverting patches')
nova_base_path = self._get_base_path('nova')
cinder_base_path = self._get_base_path('cinder')
if self._is_devstack is True:
nova_volume_file = '{0}/virt/libvirt/volume.py'.format(nova_base_path)
nova_driver_file = '{0}/virt/libvirt/driver.py'.format(nova_base_path)
cinder_brick_initiator_file = '{0}/brick/initiator/connector.py'.format(cinder_base_path)
else:
nova_volume_file = '{0}/nova/virt/libvirt/volume.py'.format(self._driver_location)
nova_driver_file = '{0}/nova/virt/libvirt/driver.py'.format(self._driver_location)
cinder_brick_initiator_file = '{0}/cinder/brick/initiator/connector.py'.format(self._driver_location)
self._logger.info(' Reverting patched file: {0}'.format(nova_volume_file))
new_contents = []
skip_class = False
for line in self.client.file_read(nova_volume_file).splitlines():
if line.startswith('class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):'):
skip_class = True
continue
if line.startswith('class'):
skip_class = False
if skip_class is False:
new_contents.append(line)
self.client.file_write(nova_volume_file, "".join(new_contents))
self._logger.info(' Reverting patched file: {0}'.format(nova_driver_file))
new_contents = []
for line in self.client.file_read(nova_driver_file).splitlines():
stripped_line = line.strip()
if stripped_line.startswith("'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver'"):
continue
new_contents.append(line)
self.client.file_write(nova_driver_file, "".join(new_contents))
if os.path.exists(cinder_brick_initiator_file):
self._logger.info(' Reverting patched file: {0}'.format(cinder_brick_initiator_file))
self.client.run("""sed -i 's/elif protocol in ["LOCAL", "FILE"]:/elif protocol == "LOCAL":/g' {0}""".format(cinder_brick_initiator_file))
# 4. Unconfigure messaging driver
self._logger.info(' Unconfiguring messaging driver')
nova_messaging_driver = 'nova.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'
cinder_messaging_driver = 'cinder.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'
with remote(ip, [RawConfigParser, open], 'root') as rem:
for config_file, driver in {self._NOVA_CONF: nova_messaging_driver,
self._CINDER_CONF: cinder_messaging_driver}.iteritems():
cfg = rem.RawConfigParser()
cfg.read([config_file])
if cfg.has_option("DEFAULT", "notification_driver"):
cfg.remove_option("DEFAULT", "notification_driver")
if cfg.has_option("DEFAULT", "notification_topics"):
notification_topics = cfg.get("DEFAULT", "notification_topics").split(",")
if "notifications" in notification_topics:
notification_topics.remove("notifications")
cfg.set("DEFAULT", "notification_topics", ",".join(notification_topics))
if config_file == self._NOVA_CONF:
for param, value in {'notify_on_any_change': 'True',
'notify_on_state_change': 'vm_and_task_state'}.iteritems():
if cfg.has_option("DEFAULT", param):
cfg.remove_option("DEFAULT", param)
with rem.open(config_file, "w") as fp:
cfg.write(fp)
# 5. Disable events consumer
self._logger.info(' Disabling events consumer')
service_name = 'ovs-openstack-events-consumer'
if ServiceManager.has_service(service_name, self.client):
ServiceManager.stop_service(service_name, self.client)
ServiceManager.disable_service(service_name, self.client)
ServiceManager.remove_service(service_name, self.client)
示例10: remove_node
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
#.........这里部分代码省略.........
Toolbox.log(
logger=NodeRemovalController._logger,
messages=" Removing vPools from node".format(storage_router_to_remove.ip),
)
storage_routers_offline_guids = [
sr.guid for sr in storage_routers_offline if sr.guid != storage_router_to_remove.guid
]
for storage_driver in storage_router_to_remove.storagedrivers:
Toolbox.log(
logger=NodeRemovalController._logger,
messages=" Removing vPool {0} from node".format(storage_driver.vpool.name),
)
StorageRouterController.remove_storagedriver(
storagedriver_guid=storage_driver.guid, offline_storage_router_guids=storage_routers_offline_guids
)
# Demote if MASTER
if storage_router_to_remove.node_type == "MASTER":
NodeTypeController.demote_node(
cluster_ip=storage_router_to_remove.ip,
master_ip=master_ip,
ip_client_map=ip_client_map,
unique_id=storage_router_to_remove.machine_id,
unconfigure_memcached=internal_memcached,
unconfigure_rabbitmq=internal_rabbit_mq,
offline_nodes=storage_routers_offline,
)
# Stop / remove services
Toolbox.log(logger=NodeRemovalController._logger, messages="Stopping and removing services")
config_store = Configuration.get_store()
if storage_router_to_remove_online is True:
client = SSHClient(endpoint=storage_router_to_remove, username="root")
NodeRemovalController.remove_services(
client=client,
node_type=storage_router_to_remove.node_type.lower(),
logger=NodeRemovalController._logger,
)
service = "watcher-config"
if ServiceManager.has_service(service, client=client):
Toolbox.log(logger=NodeRemovalController._logger, messages="Removing service {0}".format(service))
ServiceManager.stop_service(service, client=client)
ServiceManager.remove_service(service, client=client)
if config_store == "etcd":
from ovs.extensions.db.etcd.installer import EtcdInstaller
if Configuration.get(key="/ovs/framework/external_config") is None:
Toolbox.log(logger=NodeRemovalController._logger, messages=" Removing Etcd cluster")
try:
EtcdInstaller.stop("config", client)
EtcdInstaller.remove("config", client)
except Exception as ex:
Toolbox.log(
logger=NodeRemovalController._logger,
messages=["\nFailed to unconfigure Etcd", ex],
loglevel="exception",
)
Toolbox.log(logger=NodeRemovalController._logger, messages="Removing Etcd proxy")
EtcdInstaller.remove_proxy("config", client.ip)
Toolbox.run_hooks(
component="noderemoval",
sub_component="remove",
logger=NodeRemovalController._logger,
示例11: demote_node
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
#.........这里部分代码省略.........
if arakoon_metadata['internal'] is True:
Toolbox.log(logger=NodeTypeController._logger, messages='Restarting master node services')
remaining_nodes = ip_client_map.keys()[:]
if cluster_ip in remaining_nodes:
remaining_nodes.remove(cluster_ip)
PersistentFactory.store = None
VolatileFactory.store = None
for service in storagerouter.services:
if service.name == 'arakoon-ovsdb':
service.delete()
target_client = None
if storagerouter in offline_nodes:
if unconfigure_rabbitmq is True:
Toolbox.log(logger=NodeTypeController._logger, messages='Removing/unconfiguring offline RabbitMQ node')
client = ip_client_map[master_ip]
try:
client.run(['rabbitmqctl', 'forget_cluster_node', '[email protected]{0}'.format(storagerouter.name)])
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to forget RabbitMQ cluster node', ex], loglevel='exception')
else:
target_client = ip_client_map[cluster_ip]
if unconfigure_rabbitmq is True:
Toolbox.log(logger=NodeTypeController._logger, messages='Removing/unconfiguring RabbitMQ')
try:
if ServiceManager.has_service('rabbitmq-server', client=target_client):
Toolbox.change_service_state(target_client, 'rabbitmq-server', 'stop', NodeTypeController._logger)
target_client.run(['rabbitmq-server', '-detached'])
time.sleep(5)
target_client.run(['rabbitmqctl', 'stop_app'])
time.sleep(5)
target_client.run(['rabbitmqctl', 'reset'])
time.sleep(5)
target_client.run(['rabbitmqctl', 'stop'])
time.sleep(5)
target_client.file_unlink("/var/lib/rabbitmq/.erlang.cookie")
Toolbox.change_service_state(target_client, 'rabbitmq-server', 'stop', NodeTypeController._logger) # To be sure
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to remove/unconfigure RabbitMQ', ex], loglevel='exception')
Toolbox.log(logger=NodeTypeController._logger, messages='Stopping services')
services = ['memcached', 'rabbitmq-server']
if unconfigure_rabbitmq is False:
services.remove('rabbitmq-server')
if unconfigure_memcached is False:
services.remove('memcached')
for service in services:
if ServiceManager.has_service(service, client=target_client):
Toolbox.log(logger=NodeTypeController._logger, messages='Stopping service {0}'.format(service))
try:
Toolbox.change_service_state(target_client, service, 'stop', NodeTypeController._logger)
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to stop service'.format(service), ex], loglevel='exception')
Toolbox.log(logger=NodeTypeController._logger, messages='Removing services')
services = ['scheduled-tasks', 'webapp-api', 'volumerouter-consumer']
for service in services:
if ServiceManager.has_service(service, client=target_client):
Toolbox.log(logger=NodeTypeController._logger, messages='Removing service {0}'.format(service))
try:
Toolbox.change_service_state(target_client, service, 'stop', NodeTypeController._logger)
ServiceManager.remove_service(service, client=target_client)
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to remove service'.format(service), ex], loglevel='exception')
if ServiceManager.has_service('workers', client=target_client):
ServiceManager.add_service(name='workers',
client=target_client,
params={'WORKER_QUEUE': '{0}'.format(unique_id)})
try:
NodeTypeController._configure_amqp_to_volumedriver()
except Exception as ex:
Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to configure AMQP to Storage Driver', ex], loglevel='exception')
Toolbox.log(logger=NodeTypeController._logger, messages='Restarting services')
NodeTypeController.restart_framework_and_memcache_services(clients=ip_client_map, logger=NodeTypeController._logger, offline_node_ips=offline_node_ips)
if Toolbox.run_hooks(component='nodetype',
sub_component='demote',
logger=NodeTypeController._logger,
cluster_ip=cluster_ip,
master_ip=master_ip,
offline_node_ips=offline_node_ips):
Toolbox.log(logger=NodeTypeController._logger, messages='Restarting services')
NodeTypeController.restart_framework_and_memcache_services(clients=ip_client_map, logger=NodeTypeController._logger, offline_node_ips=offline_node_ips)
if storagerouter not in offline_nodes:
target_client = ip_client_map[cluster_ip]
node_name, _ = target_client.get_hostname()
if NodeTypeController.avahi_installed(client=target_client, logger=NodeTypeController._logger) is True:
NodeTypeController.configure_avahi(client=target_client, node_name=node_name, node_type='extra', logger=NodeTypeController._logger)
Configuration.set('/ovs/framework/hosts/{0}/type'.format(storagerouter.machine_id), 'EXTRA')
if target_client is not None and target_client.file_exists('/tmp/ovs_rollback'):
target_client.file_write('/tmp/ovs_rollback', 'rollback')
Toolbox.log(logger=NodeTypeController._logger, messages='Demote complete', title=True)
示例12: execute_scrub_work
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def execute_scrub_work(queue, vpool, scrub_info, error_messages):
"""
Executes scrub work for a given vDisk queue and vPool, based on scrub_info
:param queue: a Queue with vDisk guids that need to be scrubbed (they should only be member of a single vPool)
:type queue: Queue
:param vpool: the vPool object of the vDisks
:type vpool: VPool
:param scrub_info: A dict containing scrub information: `scrub_path` with the path where to scrub and `storage_router` with the StorageRouter
that needs to do the work
:type scrub_info: dict
:param error_messages: A list of error messages to be filled
:type error_messages: list
:return: a list of error messages
:rtype: list
"""
def _verify_mds_config(current_vdisk):
current_vdisk.invalidate_dynamics('info')
vdisk_configs = current_vdisk.info['metadata_backend_config']
if len(vdisk_configs) == 0:
raise RuntimeError('Could not load MDS configuration')
return vdisk_configs
client = None
lock_time = 5 * 60
storagerouter = scrub_info['storage_router']
scrub_directory = '{0}/scrub_work_{1}_{2}'.format(scrub_info['scrub_path'], vpool.name, storagerouter.name)
scrub_config_key = 'ovs/vpools/{0}/proxies/scrub/scrub_config_{1}'.format(vpool.guid, storagerouter.guid)
backend_config_key = 'ovs/vpools/{0}/proxies/scrub/backend_config_{1}'.format(vpool.guid, storagerouter.guid)
alba_proxy_service = 'ovs-albaproxy_{0}_{1}_scrub'.format(vpool.name, storagerouter.name)
# Deploy a proxy
try:
with file_mutex(name='ovs_albaproxy_scrub', wait=lock_time):
ScheduledTaskController._logger.info('Scrubber - vPool {0} - StorageRouter {1} - Deploying ALBA proxy {2}'.format(vpool.name, storagerouter.name, alba_proxy_service))
client = SSHClient(storagerouter, 'root')
client.dir_create(scrub_directory)
client.dir_chmod(scrub_directory, 0777) # Celery task executed by 'ovs' user and should be able to write in it
if ServiceManager.has_service(name=alba_proxy_service, client=client) is True and ServiceManager.get_service_status(name=alba_proxy_service, client=client) is True:
ScheduledTaskController._logger.info('Scrubber - vPool {0} - StorageRouter {1} - Re-using existing proxy service {2}'.format(vpool.name, storagerouter.name, alba_proxy_service))
scrub_config = Configuration.get(scrub_config_key)
else:
machine_id = System.get_my_machine_id(client)
port_range = Configuration.get('/ovs/framework/hosts/{0}/ports|storagedriver'.format(machine_id))
port = System.get_free_ports(selected_range=port_range, nr=1, client=client)[0]
# Scrub config
# {u'albamgr_cfg_url': u'arakoon://config/ovs/vpools/71e2f717-f270-4a41-bbb0-d4c8c084d43e/proxies/64759516-3471-4321-b912-fb424568fc5b/config/abm?ini=%2Fopt%2FOpenvStorage%2Fconfig%2Farakoon_cacc.ini',
# u'fragment_cache': [u'none'],
# u'ips': [u'127.0.0.1'],
# u'log_level': u'info',
# u'manifest_cache_size': 17179869184,
# u'port': 0,
# u'transport': u'tcp'}
# Backend config
# {u'alba_connection_host': u'10.100.193.155',
# u'alba_connection_port': 26204,
# u'alba_connection_preset': u'preset',
# u'alba_connection_timeout': 15,
# u'alba_connection_transport': u'TCP',
# u'backend_interface_retries_on_error': 5,
# u'backend_interface_retry_backoff_multiplier': 2.0,
# u'backend_interface_retry_interval_secs': 1,
# u'backend_type': u'ALBA'}
scrub_config = Configuration.get('ovs/vpools/{0}/proxies/scrub/generic_scrub'.format(vpool.guid))
scrub_config['port'] = port
scrub_config['transport'] = 'tcp'
Configuration.set(scrub_config_key, json.dumps(scrub_config, indent=4), raw=True)
params = {'VPOOL_NAME': vpool.name,
'LOG_SINK': LogHandler.get_sink_path('alba_proxy'),
'CONFIG_PATH': Configuration.get_configuration_path(scrub_config_key)}
ServiceManager.add_service(name='ovs-albaproxy', params=params, client=client, target_name=alba_proxy_service)
ServiceManager.start_service(name=alba_proxy_service, client=client)
ScheduledTaskController._logger.info('Scrubber - vPool {0} - StorageRouter {1} - Deployed ALBA proxy {2}'.format(vpool.name, storagerouter.name, alba_proxy_service))
backend_config = Configuration.get('ovs/vpools/{0}/hosts/{1}/config'.format(vpool.guid, vpool.storagedrivers[0].storagedriver_id))['backend_connection_manager']
backend_config['alba_connection_host'] = '127.0.0.1'
backend_config['alba_connection_port'] = scrub_config['port']
Configuration.set(backend_config_key, json.dumps({"backend_connection_manager": backend_config}, indent=4), raw=True)
except Exception:
message = 'Scrubber - vPool {0} - StorageRouter {1} - An error occurred deploying ALBA proxy {2}'.format(vpool.name, storagerouter.name, alba_proxy_service)
error_messages.append(message)
ScheduledTaskController._logger.exception(message)
if client is not None and ServiceManager.has_service(name=alba_proxy_service, client=client) is True:
if ServiceManager.get_service_status(name=alba_proxy_service, client=client) is True:
ServiceManager.stop_service(name=alba_proxy_service, client=client)
ServiceManager.remove_service(name=alba_proxy_service, client=client)
if Configuration.exists(scrub_config_key):
Configuration.delete(scrub_config_key)
try:
# Empty the queue with vDisks to scrub
with remote(storagerouter.ip, [VDisk]) as rem:
while True:
vdisk = None
vdisk_guid = queue.get(False)
try:
# Check MDS master is local. Trigger MDS handover if necessary
vdisk = rem.VDisk(vdisk_guid)
#.........这里部分代码省略.........
示例13: remove
# 需要导入模块: from ovs.extensions.services.service import ServiceManager [as 别名]
# 或者: from ovs.extensions.services.service.ServiceManager import remove_service [as 别名]
def remove(cluster_name, client):
"""
Removes an arakoon service
"""
if ServiceManager.has_service("arakoon-{0}".format(cluster_name), client=client) is True:
ServiceManager.remove_service("arakoon-{0}".format(cluster_name), client=client)