本文整理汇总了Python中eventlet.greenpool.GreenPool.spawn_n方法的典型用法代码示例。如果您正苦于以下问题:Python GreenPool.spawn_n方法的具体用法?Python GreenPool.spawn_n怎么用?Python GreenPool.spawn_n使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类eventlet.greenpool.GreenPool
的用法示例。
在下文中一共展示了GreenPool.spawn_n方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: tests
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
def tests(status, test):
pool = GreenPool(size=500)
for host, s in status['servers'].iteritems():
for t in test:
if t.name in s:
pool.spawn_n(t.test, host, s)
pool.waitall()
示例2: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = []
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.expiring_objects_account):
container = c['name']
timestamp = int(container)
if timestamp > int(time()):
break
containers_to_delete.append(container)
for o in self.swift.iter_objects(self.expiring_objects_account,
container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
self.logger.debug(_('Run end'))
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例3: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
def run_once(self, *args, **kwargs):
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
self.report_containers = 0
containers_to_delete = []
try:
self.logger.debug(_('Run begin'))
containers, objects = \
self.swift.get_account_info(self.sample_account)
self.logger.info(_('Pass beginning; %s possible containers; %s '
'possible objects') % (containers, objects))
for c in self.swift.iter_containers(self.sample_account):
container = c['name']
try:
timestamp, account = container.split('_', 1)
timestamp = float(timestamp)
except ValueError:
self.logger.debug('ValueError: %s, '
'need more than 1 value to unpack' % \
container)
else:
if processes > 0:
obj_proc = int(hashlib.md5(container).hexdigest(), 16)
if obj_proc % processes != process:
continue
n = (float(time()) // self.sample_rate) * self.sample_rate
if timestamp <= n:
containers_to_delete.append(container)
pool.spawn_n(self.aggregate_container, container)
pool.waitall()
for container in containers_to_delete:
try:
self.logger.debug('delete container: %s' % container)
self.swift.delete_container(self.sample_account, container,
acceptable_statuses=(
2, HTTP_NOT_FOUND,
HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %s %s') %
(container, str(err)))
tenants_to_fillup = list()
for c in self.swift.iter_containers(self.aggregate_account):
tenant_id = c['name']
if processes > 0:
c_proc = int(hashlib.md5(tenant_id).hexdigest(), 16)
if c_proc % processes != process:
continue
tenants_to_fillup.append(tenant_id)
# fillup lossed usage data
self.fillup_lossed_usage_data(tenants_to_fillup)
self.logger.debug(_('Run end'))
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例4: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
containers_to_delete = set([])
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
containers, objects = \
self.swift.get_account_info(self.expiring_objects_account)
self.logger.info(_('Pass beginning; '
'%(containers)s possible containers; '
'%(objects)s possible objects') % {
'containers': containers, 'objects': objects})
for container, obj in self.iter_cont_objs_to_expire():
containers_to_delete.add(container)
if not obj:
continue
timestamp, actual_obj = obj.split('-', 1)
timestamp = int(timestamp)
if timestamp > int(time()):
break
pool.spawn_n(
self.delete_object, actual_obj, timestamp,
container, obj)
pool.waitall()
for container in containers_to_delete:
try:
self.swift.delete_container(
self.expiring_objects_account,
container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %(container)s '
'%(err)s') % {'container': container,
'err': str(err)})
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例5: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
processes, process = self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug(_('Run begin'))
for o in self.swift.iter_objects(self.restoring_object_account,
self.todo_container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (self.todo_container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
pool.spawn_n(self.start_object_restoring, obj)
pool.waitall()
for o in self.swift.iter_objects(self.restoring_object_account,
self.restoring_container):
obj = o['name'].encode('utf8')
if processes > 0:
obj_process = int(
hashlib.md5('%s/%s' % (self.restoring_container, obj)).
hexdigest(), 16)
if obj_process % processes != process:
continue
pool.spawn_n(self.check_object_restored, obj)
pool.waitall()
self.logger.debug(_('Run end'))
self.report(final=True)
except (Exception, Timeout) as e:
report_exception(self.logger, _('Unhandled exception'), self.client)
示例6: discovery
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
def discovery(status, test):
pool = GreenPool(size=500)
for d in settings.discovery:
servers = d().get_servers() # [('ip', 'host')]
for server in servers:
ip = server[0]
host = server[1]
if host in settings.exclude:
continue
if host not in status["servers"]: # do discovery
status["servers"][host] = {}
logging.info("performing discovery on %r", server)
for t in test:
pool.spawn_n(t.discover, ip, status["servers"][host])
status["servers"][host]["ip"] = ip
pool.waitall()
示例7: VolumeManager
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.11'
def __init__(self, volume_driver=None, service_name=None,
*args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name='volume',
*args, **kwargs)
self.configuration = Configuration(volume_manager_opts,
config_group=service_name)
self._tp = GreenPool()
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warn(_("Driver path %s is deprecated, update your "
"configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
if volume_driver == 'cinder.volume.drivers.lvm.ThinLVMVolumeDriver':
# Deprecated in Havana
# Not handled in MAPPING because it requires setting a conf option
LOG.warn(_("ThinLVMVolumeDriver is deprecated, please configure "
"LVMISCSIDriver and lvm_type=thin. Continuing with "
"those settings."))
volume_driver = 'cinder.volume.drivers.lvm.LVMISCSIDriver'
self.configuration.lvm_type = 'thin'
self.driver = importutils.import_object(
volume_driver,
configuration=self.configuration,
db=self.db)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
LOG.info(_("Starting volume driver %(driver_name)s (%(version)s)") %
{'driver_name': self.driver.__class__.__name__,
'version': self.driver.get_version()})
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(_("Error encountered during "
"initialization of driver: %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
try:
for volume in volumes:
if volume['status'] in ['available', 'in-use']:
self.driver.ensure_export(ctxt, volume)
elif volume['status'] == 'downloading':
LOG.info(_("volume %s stuck in a downloading state"),
volume['id'])
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt,
volume['id'],
{'status': 'error'})
else:
LOG.info(_("volume %s: skipping export"), volume['id'])
except Exception as ex:
LOG.error(_("Error encountered during "
"re-exporting phase of driver initialization: "
" %(name)s") %
{'name': self.driver.__class__.__name__})
LOG.exception(ex)
return
# at this point the driver is considered initialized.
self.driver.set_initialized()
LOG.debug(_('Resuming any in progress delete operations'))
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume(ctxt,
volume['id']))
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
#.........这里部分代码省略.........
示例8: run_once
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
def run_once(self, *args, **kwargs):
"""
Executes a single pass, looking for objects to expire.
:param args: Extra args to fulfill the Daemon interface; this daemon
has no additional args.
:param kwargs: Extra keyword args to fulfill the Daemon interface; this
daemon accepts processes and process keyword args.
These will override the values from the config file if
provided.
"""
# This if-clause will be removed when general task queue feature is
# implemented.
if not self.dequeue_from_legacy:
self.logger.info('This node is not configured to dequeue tasks '
'from the legacy queue. This node will '
'not process any expiration tasks. At least '
'one node in your cluster must be configured '
'with dequeue_from_legacy == true.')
return
self.get_process_values(kwargs)
pool = GreenPool(self.concurrency)
self.report_first_time = self.report_last_time = time()
self.report_objects = 0
try:
self.logger.debug('Run begin')
task_account_container_list_to_delete = list()
for task_account, my_index, divisor in \
self.iter_task_accounts_to_expire():
container_count, obj_count = \
self.swift.get_account_info(task_account)
# the task account is skipped if there are no task container
if not container_count:
continue
self.logger.info(_(
'Pass beginning for task account %(account)s; '
'%(container_count)s possible containers; '
'%(obj_count)s possible objects') % {
'account': task_account,
'container_count': container_count,
'obj_count': obj_count})
task_account_container_list = \
[(task_account, task_container) for task_container in
self.iter_task_containers_to_expire(task_account)]
task_account_container_list_to_delete.extend(
task_account_container_list)
# delete_task_iter is a generator to yield a dict of
# task_account, task_container, task_object, delete_timestamp,
# target_path to handle delete actual object and pop the task
# from the queue.
delete_task_iter = \
self.round_robin_order(self.iter_task_to_expire(
task_account_container_list, my_index, divisor))
for delete_task in delete_task_iter:
pool.spawn_n(self.delete_object, **delete_task)
pool.waitall()
for task_account, task_container in \
task_account_container_list_to_delete:
try:
self.swift.delete_container(
task_account, task_container,
acceptable_statuses=(2, HTTP_NOT_FOUND, HTTP_CONFLICT))
except (Exception, Timeout) as err:
self.logger.exception(
_('Exception while deleting container %(account)s '
'%(container)s %(err)s') % {
'account': task_account,
'container': task_container, 'err': str(err)})
self.logger.debug('Run end')
self.report(final=True)
except (Exception, Timeout):
self.logger.exception(_('Unhandled exception'))
示例9: CinderBackupProxy
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
class CinderBackupProxy(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = '1.18'
target = messaging.Target(version=RPC_API_VERSION)
VOLUME_NAME_MAX_LEN = 255
VOLUME_UUID_MAX_LEN = 36
BACKUP_NAME_MAX_LEN = 255
BACKUP_UUID_MAX_LEN = 36
def __init__(self, service_name=None, *args, **kwargs):
"""Load the specified in args, or flags."""
# update_service_capabilities needs service_name to be volume
super(CinderBackupProxy, self).__init__(service_name='backup',
*args, **kwargs)
self.configuration = Configuration(volume_backup_opts,
config_group=service_name)
self._tp = GreenPool()
self.volume_api = volume.API()
self._last_info_volume_state_heal = 0
self._change_since_time = None
self.volumes_mapping_cache = {'backups': {}}
self.init_flag = False
self.backup_cache = []
self.tenant_id = self._get_tenant_id()
self.adminCinderClient = self._get_cascaded_cinder_client()
def _init_volume_mapping_cache(self,context):
try:
backups = self.db.backup_get_all(context)
for backup in backups:
backup_id = backup['id']
status = backup['status']
try:
cascaded_backup_id =self._get_cascaded_backup_id(backup_id)
except Exception as ex:
continue
if cascaded_backup_id == '' or status == 'error':
continue
self.volumes_mapping_cache['backups'][backup_id] = cascaded_backup_id
LOG.info(_("cascade info: init volume mapping cache is %s"),
self.volumes_mapping_cache)
except Exception as ex:
LOG.error(_("Failed init volumes mapping cache"))
LOG.exception(ex)
def _gen_ccding_backup_name(self, backup_id):
return "backup" + "@" + backup_id
def _get_cinder_cascaded_admin_client(self):
try:
kwargs = {'username': cfg.CONF.cinder_username,
'password': cfg.CONF.admin_password,
'tenant_name': CONF.cinder_tenant_name,
'auth_url': cfg.CONF.keystone_auth_url,
'insecure': True
}
keystoneclient = kc.Client(**kwargs)
cinderclient = cinder_client.Client(
username=cfg.CONF.cinder_username,
auth_url=cfg.CONF.keystone_auth_url,
insecure=True)
cinderclient.client.auth_token = keystoneclient.auth_ref.auth_token
diction = {'project_id': cfg.CONF.cinder_tenant_id}
cinderclient.client.management_url = \
cfg.CONF.cascaded_cinder_url % diction
return cinderclient
except keystone_exception.Unauthorized:
with excutils.save_and_reraise_exception():
LOG.error(_('Token unauthorized failed for keystoneclient '
'constructed when get cascaded admin client'))
except cinder_exception.Unauthorized:
with excutils.save_and_reraise_exception():
LOG.error(_('Token unauthorized failed for cascaded '
'cinderClient constructed'))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_('Failed to get cinder python client.'))
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
@property
def initialized(self):
return self.init_flag
def init_host(self):
ctxt = context.get_admin_context()
self._init_volume_mapping_cache(ctxt)
LOG.info(_("Cleaning up incomplete backup operations."))
# TODO(smulcahy) implement full resume of backup and restore
#.........这里部分代码省略.........
示例10: VolumeManager
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
RPC_API_VERSION = "1.12"
def __init__(self, volume_driver=None, service_name=None, *args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
# update_service_capabilities needs service_name to be volume
super(VolumeManager, self).__init__(service_name="volume", *args, **kwargs)
self.configuration = Configuration(volume_manager_opts, config_group=service_name)
self._tp = GreenPool()
self.stats = {}
if not volume_driver:
# Get from configuration, which will get the default
# if its not using the multi backend
volume_driver = self.configuration.volume_driver
if volume_driver in MAPPING:
LOG.warn(_("Driver path %s is deprecated, update your " "configuration to the new path."), volume_driver)
volume_driver = MAPPING[volume_driver]
if volume_driver == "cinder.volume.drivers.lvm.ThinLVMVolumeDriver":
# Deprecated in Havana
# Not handled in MAPPING because it requires setting a conf option
LOG.warn(
_(
"ThinLVMVolumeDriver is deprecated, please configure "
"LVMISCSIDriver and lvm_type=thin. Continuing with "
"those settings."
)
)
volume_driver = "cinder.volume.drivers.lvm.LVMISCSIDriver"
self.configuration.lvm_type = "thin"
self.driver = importutils.import_object(volume_driver, configuration=self.configuration, db=self.db)
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
LOG.info(
_("Starting volume driver %(driver_name)s (%(version)s)")
% {"driver_name": self.driver.__class__.__name__, "version": self.driver.get_version()}
)
try:
self.driver.do_setup(ctxt)
self.driver.check_for_setup_error()
except Exception as ex:
LOG.error(
_("Error encountered during " "initialization of driver: %(name)s")
% {"name": self.driver.__class__.__name__}
)
LOG.exception(ex)
# we don't want to continue since we failed
# to initialize the driver correctly.
return
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
try:
sum = 0
self.stats.update({"allocated_capacity_gb": sum})
for volume in volumes:
if volume["status"] in ["available", "in-use"]:
# calculate allocated capacity for driver
sum += volume["size"]
self.stats["allocated_capacity_gb"] = sum
self.driver.ensure_export(ctxt, volume)
elif volume["status"] == "downloading":
LOG.info(_("volume %s stuck in a downloading state"), volume["id"])
self.driver.clear_download(ctxt, volume)
self.db.volume_update(ctxt, volume["id"], {"status": "error"})
else:
LOG.info(_("volume %s: skipping export"), volume["id"])
except Exception as ex:
LOG.error(
_("Error encountered during " "re-exporting phase of driver initialization: " " %(name)s")
% {"name": self.driver.__class__.__name__}
)
LOG.exception(ex)
return
# at this point the driver is considered initialized.
self.driver.set_initialized()
LOG.debug(_("Resuming any in progress delete operations"))
for volume in volumes:
if volume["status"] == "deleting":
LOG.info(_("Resuming delete on volume: %s") % volume["id"])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume(ctxt, volume["id"]))
else:
# By default, delete volumes sequentially
#.........这里部分代码省略.........
示例11: CinderProxy
# 需要导入模块: from eventlet.greenpool import GreenPool [as 别名]
# 或者: from eventlet.greenpool.GreenPool import spawn_n [as 别名]
#.........这里部分代码省略.........
except Exception:
glance._reraise_translated_image_exception(image_id)
if not self.image_service._is_image_available(context, image_meta):
raise exception.ImageNotFound(image_id=image_id)
locations = getattr(image_meta, 'locations', None)
LOG.debug(_("Cascade info: image glance get_image_cascaded,"
"locations:%s"), locations)
LOG.debug(_("Cascade info: image glance get_image_cascaded,"
"cascaded_glance_url:%s"), cascaded_glance_url)
cascaded_image_id = None
for loc in locations:
image_url = loc.get('url')
LOG.debug(_("Cascade info: image glance get_image_cascaded,"
"image_url:%s"), image_url)
if cascaded_glance_url in image_url:
(cascaded_image_id, glance_netloc, use_ssl) = \
glance._parse_image_ref(image_url)
LOG.debug(_("Cascade info : Result :image glance "
"get_image_cascaded,%s") % cascaded_image_id)
break
if cascaded_image_id is None:
raise exception.CinderException(
_("Cascade exception: Cascaded image for image %s not exist ")
% image_id)
return cascaded_image_id
def _add_to_threadpool(self, func, *args, **kwargs):
self._tp.spawn_n(func, *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
"""
ctxt = context.get_admin_context()
volumes = self.db.volume_get_all_by_host(ctxt, self.host)
LOG.debug(_("Re-exporting %s volumes"), len(volumes))
LOG.debug(_('Resuming any in progress delete operations'))
for volume in volumes:
if volume['status'] == 'deleting':
LOG.info(_('Resuming delete on volume: %s') % volume['id'])
if CONF.volume_service_inithost_offload:
# Offload all the pending volume delete operations to the
# threadpool to prevent the main volume service thread
# from being blocked.
self._add_to_threadpool(self.delete_volume(ctxt,
volume['id']))
else:
# By default, delete volumes sequentially
self.delete_volume(ctxt, volume['id'])
# collect and publish service capabilities
self.publish_service_capabilities(ctxt)
def create_volume(self, context, volume_id, request_spec=None,
filter_properties=None, allow_reschedule=True,
snapshot_id=None, image_id=None, source_volid=None):
"""Creates and exports the volume."""