本文整理匯總了Python中oslo_concurrency.lockutils.lock方法的典型用法代碼示例。如果您正苦於以下問題:Python lockutils.lock方法的具體用法?Python lockutils.lock怎麽用?Python lockutils.lock使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類oslo_concurrency.lockutils
的用法示例。
在下文中一共展示了lockutils.lock方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _on_capsule_deleted
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def _on_capsule_deleted(self, capsule_uuid):
try:
# NOTE(ndesh): We need to lock here to avoid race condition
# with the deletion code for CNI DEL so that
# we delete the registry entry exactly once
with lockutils.lock(capsule_uuid, external=True):
if self.registry[capsule_uuid]['vif_unplugged']:
LOG.debug("Remove capsule %(capsule)s from registry",
{'capsule': capsule_uuid})
del self.registry[capsule_uuid]
else:
LOG.debug("Received delete for capsule %(capsule)s",
{'capsule': capsule_uuid})
capsule_dict = self.registry[capsule_uuid]
capsule_dict['del_received'] = True
self.registry[capsule_uuid] = capsule_dict
except KeyError:
# This means someone else removed it. It's odd but safe to ignore.
LOG.debug('Capsule %s entry already removed from registry while '
'handling DELETED event. Ignoring.', capsule_uuid)
pass
示例2: on_deleted
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def on_deleted(self, pod):
pod_name = utils.get_pod_unique_name(pod)
try:
if pod_name in self.registry:
# NOTE(ndesh): We need to lock here to avoid race condition
# with the deletion code for CNI DEL so that
# we delete the registry entry exactly once
with lockutils.lock(pod_name, external=True):
if self.registry[pod_name]['vif_unplugged']:
del self.registry[pod_name]
else:
pod_dict = self.registry[pod_name]
pod_dict['del_received'] = True
self.registry[pod_name] = pod_dict
except KeyError:
# This means someone else removed it. It's odd but safe to ignore.
LOG.debug('Pod %s entry already removed from registry while '
'handling DELETED event. Ignoring.', pod_name)
pass
示例3: _close_vterm_local
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def _close_vterm_local(adapter, lpar_uuid):
"""Forces the close of the terminal on a local system.
Will check for a VNC server as well in case it was started via that
mechanism.
:param adapter: The adapter to talk over the API.
:param lpar_uuid: partition uuid
"""
lpar_id = _get_lpar_id(adapter, lpar_uuid)
_run_proc(['rmvterm', '--id', lpar_id])
# Stop the port.
with lock.lock('powervm_vnc_term'):
vnc_port = _VNC_UUID_TO_LOCAL_PORT.get(lpar_uuid, 0)
if vnc_port in _VNC_LOCAL_PORT_TO_REPEATER:
_VNC_LOCAL_PORT_TO_REPEATER[vnc_port].stop()
示例4: get_all
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def get_all(self):
"""return the list of loaded modules.
:return: name of every loaded modules.
"""
policy.authorize(pecan.request.context, 'rating:list_modules', {})
modules_list = []
lock = lockutils.lock('rating-modules')
with lock:
for module in self.extensions:
infos = module.obj.module_info.copy()
infos['module_id'] = infos.pop('name')
modules_list.append(rating_models.CloudkittyModule(**infos))
return rating_models.CloudkittyModuleCollection(
modules=modules_list)
示例5: get_one
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def get_one(self, module_id):
"""return a module
:return: CloudKittyModule
"""
policy.authorize(pecan.request.context, 'rating:get_module', {})
try:
lock = lockutils.lock('rating-modules')
with lock:
module = self.extensions[module_id]
except KeyError:
pecan.abort(404, 'Module not found.')
infos = module.obj.module_info.copy()
infos['module_id'] = infos.pop('name')
return rating_models.CloudkittyModule(**infos)
示例6: expose_modules
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def expose_modules(self):
"""Load rating modules to expose API controllers."""
lock = lockutils.lock('rating-modules')
with lock:
for ext in self.extensions:
# FIXME(sheeprine): we should notify two modules with same name
name = ext.name
if not ext.obj.config_controller:
ext.obj.config_controller = UnconfigurableController
# Update extension reference
setattr(self, name, ext.obj.config_controller())
if name in self._loaded_modules:
self._loaded_modules.remove(name)
# Clear removed modules
for module in self._loaded_modules:
delattr(self, module)
self._loaded_modules = self.extensions.names()
示例7: power_on
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def power_on(adapter, instance, opts=None):
"""Powers on a VM.
:param adapter: A pypowervm.adapter.Adapter.
:param instance: The nova instance to power on.
:param opts: (Optional) Additional parameters to the pypowervm power_on
method. See that method's docstring for details.
:return: True if the instance was powered on. False if it was not in a
startable state.
:raises: InstancePowerOnFailure
"""
# Synchronize power-on and power-off ops on a given instance
with lockutils.lock('power_%s' % instance.uuid):
entry = get_instance_wrapper(adapter, instance)
# Get the current state and see if we can start the VM
if entry.state in POWERVM_STARTABLE_STATE:
# Now start the lpar
power.power_on(entry, None, add_parms=opts)
return True
return False
示例8: lock_files
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def lock_files(handles_dir, out_queue):
with lockutils.lock('external', 'test-', external=True):
# Open some files we can use for locking
handles = []
for n in range(50):
path = os.path.join(handles_dir, ('file-%s' % n))
handles.append(open(path, 'w'))
# Loop over all the handles and try locking the file
# without blocking, keep a count of how many files we
# were able to lock and then unlock. If the lock fails
# we get an IOError and bail out with bad exit code
count = 0
for handle in handles:
try:
lock_file(handle)
count += 1
unlock_file(handle)
except IOError:
os._exit(2)
finally:
handle.close()
return out_queue.put(count)
示例9: test_contextlock_unlocks
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def test_contextlock_unlocks(self):
self.config(lock_path=tempfile.mkdtemp(), group='oslo_concurrency')
with lockutils.lock("test") as sem:
self.assertIsInstance(sem, threading.Semaphore)
with lockutils.lock("test2", external=True) as lock:
self.assertTrue(lock.exists())
# NOTE(flaper87): Lock should be free
with lockutils.lock("test2", external=True) as lock:
self.assertTrue(lock.exists())
# NOTE(flaper87): Lock should be free
# but semaphore should already exist.
with lockutils.lock("test") as sem2:
self.assertEqual(sem, sem2)
示例10: cancel_migration
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def cancel_migration(self, ctxt, migration_id, force):
migration = self._get_migration(ctxt, migration_id)
if len(migration.executions) != 1:
raise exception.InvalidMigrationState(
"Migration '%s' has in improper number of tasks "
"executions: %d" % (migration_id, len(migration.executions)))
execution = migration.executions[0]
if execution.status not in constants.ACTIVE_EXECUTION_STATUSES:
raise exception.InvalidMigrationState(
"Migration '%s' is not currently running" % migration_id)
if execution.status == constants.EXECUTION_STATUS_CANCELLING and (
not force):
raise exception.InvalidMigrationState(
"Migration '%s' is already being cancelled. Please use the "
"force option if you'd like to force-cancel it.")
with lockutils.lock(
constants.EXECUTION_LOCK_NAME_FORMAT % execution.id,
external=True):
self._cancel_tasks_execution(ctxt, execution, force=force)
self._check_delete_reservation_for_transfer(migration)
示例11: __call__
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def __call__(cls, *args, **kwargs):
with lockutils.lock('singleton_lock', semaphores=cls._semaphores):
if cls not in cls._instances:
cls._instances[cls] = super(
Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
示例12: _update_vif_status
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def _update_vif_status(self, capsule_uuid, ifname):
with lockutils.lock(capsule_uuid, external=True):
capsule_dict = self.registry.get(capsule_uuid)
if capsule_dict:
capsule_dict = self.registry[capsule_uuid]
capsule_dict['vifs'][ifname]['active'] = True
self.registry[capsule_uuid] = capsule_dict
示例13: add
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def add(self, params):
vifs = self._do_work(params, b_base.connect)
capsule_uuid = self._get_capsule_uuid(params)
# NOTE(dulek): Saving containerid to be able to distinguish old DEL
# requests that we should ignore. We need a lock to
# prevent race conditions and replace whole object in the
# dict for multiprocessing.Manager to notice that.
with lockutils.lock(capsule_uuid, external=True):
self.registry[capsule_uuid] = {
'containerid': params.CNI_CONTAINERID,
'vif_unplugged': False,
'del_received': False,
'vifs': {ifname: {'active': vif.active, 'id': vif.id}
for ifname, vif in vifs.items()},
}
LOG.debug('Saved containerid = %s for capsule %s',
params.CNI_CONTAINERID, capsule_uuid)
# Wait for VIFs to become active.
timeout = CONF.cni_daemon.vif_active_timeout
def any_vif_inactive(vifs):
"""Return True if there is at least one VIF that's not ACTIVE."""
return any(not vif['active'] for vif in vifs.values())
# Wait for timeout sec, 1 sec between tries, retry when even one
# vif is not active.
@retrying.retry(stop_max_delay=timeout * 1000, wait_fixed=RETRY_DELAY,
retry_on_result=any_vif_inactive)
def wait_for_active(capsule_uuid):
return self.registry[capsule_uuid]['vifs']
result = wait_for_active(capsule_uuid)
for vif in result.values():
if not vif['active']:
LOG.error("Timed out waiting for vifs to become active")
raise exception.ResourceNotReady(resource=capsule_uuid)
return vifs[consts.DEFAULT_IFNAME]
示例14: __call__
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def __call__(self, event, *args, **kwargs):
group = self._group_by(event)
with lockutils.lock(group):
try:
queue = self._queues[group]
# NOTE(dulek): We don't want to risk injecting an outdated
# state if events for that resource are in queue.
if kwargs.get('injected', False):
return
except KeyError:
queue = py_queue.Queue(self._queue_depth)
self._queues[group] = queue
thread = self._thread_group.add_thread(self._run, group, queue)
thread.link(self._done, group)
queue.put((event, args, kwargs))
示例15: on_done
# 需要導入模塊: from oslo_concurrency import lockutils [as 別名]
# 或者: from oslo_concurrency.lockutils import lock [as 別名]
def on_done(self, pod, vifs):
pod_name = utils.get_pod_unique_name(pod)
vif_dict = {
ifname: vif.obj_to_primitive() for
ifname, vif in vifs.items()
}
# NOTE(dulek): We need a lock when modifying shared self.registry dict
# to prevent race conditions with other processes/threads.
with lockutils.lock(pod_name, external=True):
if (pod_name not in self.registry or
self.registry[pod_name]['pod']['metadata']['uid']
!= pod['metadata']['uid']):
self.registry[pod_name] = {'pod': pod, 'vifs': vif_dict,
'containerid': None,
'vif_unplugged': False,
'del_received': False}
else:
# NOTE(dulek): Only update vif if its status changed, we don't
# need to care about other changes now.
old_vifs = {
ifname:
base.VersionedObject.obj_from_primitive(vif_obj) for
ifname, vif_obj in (
self.registry[pod_name]['vifs'].items())
}
for iface in vifs:
if old_vifs[iface].active != vifs[iface].active:
pod_dict = self.registry[pod_name]
pod_dict['vifs'] = vif_dict
self.registry[pod_name] = pod_dict