本文整理汇总了Python中neutron.openstack.common.lockutils.lock函数的典型用法代码示例。如果您正苦于以下问题:Python lock函数的具体用法?Python lock怎么用?Python lock使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lock函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: delete_flowrule
def delete_flowrule(self, context, id):
'''
called by a DELETE to /flowrules/id
in here 3 methods are called:
delete_flowrule_precommit -> mechanism driver method (it should perform some delete calls in the "flowmods" DB table the flowmods to be instanciated
delete_flowrule -> calls delete_flowrule in db_base_plugin_v2.py (it deletes the flowrule from the "flowrules" table plus some checks)
delete_flowrule_postcommit -> mechanism driver method to sync all flowmods with ODL
'''
# The id is the unique identifier that can be used to delete
# the row entry of your database.
LOG.debug(_("delete_flowrule with id "+str(id)))
#no return value required
session = context.session
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
flowrule = self.get_flowrule(context, id)
mech_context = driver_context.FlowruleContext(self,
context,
flowrule)
self.mechanism_manager.delete_flowrule_precommit(mech_context)
try: #check if the flowrule exists in the DB
flowrule_db = (session.query(models_v2.Flowrule).
enable_eagerloads(False).
filter_by(id=id).with_lockmode('update').one())
except sa_exc.NoResultFound:
LOG.debug(_("The flowrule '%s' does not exist anymore"), id)
return
flowrule = self._make_flowrule_dict(flowrule_db)
super(Ml2Plugin, self).delete_flowrule(context, id)
#TBD move out of the transaction
self.mechanism_manager.delete_flowrule_postcommit(mech_context)
示例2: _txn_from_context
def _txn_from_context(self, context, tag="<unset>"):
"""Context manager: opens a DB transaction against the given context.
If required, this also takes the Neutron-wide db-access semaphore.
:return: context manager for use with with:.
"""
session = context.session
conn_url = str(session.connection().engine.url).lower()
if (conn_url.startswith("mysql:") or
conn_url.startswith("mysql+mysqldb:")):
# Neutron is using the mysqldb driver for accessing the database.
# This has a known incompatibility with eventlet that leads to
# deadlock. Take the neutron-wide db-access lock as a workaround.
# See https://bugs.launchpad.net/oslo.db/+bug/1350149 for a
# description of the issue.
LOG.debug("Waiting for db-access lock tag=%s...", tag)
try:
with lockutils.lock('db-access'):
LOG.debug("...acquired db-access lock tag=%s", tag)
with context.session.begin(subtransactions=True) as txn:
yield txn
finally:
LOG.debug("Released db-access lock tag=%s", tag)
else:
# Liberty or later uses an eventlet-safe mysql library. (Or, we're
# not using mysql at all.)
LOG.debug("Not using mysqldb driver, skipping db-access lock")
with context.session.begin(subtransactions=True) as txn:
yield txn
示例3: update_port_status
def update_port_status(self, context, port_id, status, host=None):
"""
Returns port_id (non-truncated uuid) if the port exists.
Otherwise returns None.
"""
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Port %(port)s updated up by agent not found"),
{'port': port_id})
return None
if port.status != status:
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
return port['id']
示例4: update_port_status
def update_port_status(self, context, port_id, status):
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to prevent
# undesired eventlet yields leading to 'lock wait timeout' errors
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Port %(port)s updated up by agent not found"),
{'port': port_id})
return False
if port.status != status:
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
mech_context = driver_context.PortContext(
self, context, updated_port, network,
original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
return True
示例5: _set
def _set(self, key, value, ttl=0, not_exists=False):
with lockutils.lock(key):
# NOTE(flaper87): This is needed just in `set`
# calls, hence it's not in `_set_unlocked`
if not_exists and self._exists_unlocked(key):
return False
self._set_unlocked(key, value, ttl)
return True
示例6: _incr_append
def _incr_append(self, key, other):
with lockutils.lock(key):
timeout, value = self._get_unlocked(key)
if value is None:
return None
ttl = timeutils.utcnow_ts() - timeout
new_value = value + other
self._set_unlocked(key, new_value, ttl)
return new_value
示例7: _apply
def _apply(self):
lock_name = "iptables"
if self.namespace:
lock_name += "-" + self.namespace
try:
with lockutils.lock(lock_name, utils.SYNCHRONIZED_PREFIX, True):
LOG.debug('Got semaphore / lock "%s"', lock_name)
return self._apply_synchronized()
finally:
LOG.debug('Semaphore / lock released "%s"', lock_name)
示例8: update_port
def update_port(self, context, id, port):
attrs = port['port']
need_port_update_notify = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
original_port = self._make_port_dict(port_db)
updated_port = super(Ml2Plugin, self).update_port(context, id,
port)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding,
original_port=original_port)
need_port_update_notify |= self._process_port_binding(
mech_context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_port_postcommit(mech_context)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
bound_port = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_port._port
示例9: instance
def instance(cls, l3_agent):
"""Creates instance (singleton) of service.
Do not directly call this for the base class. Instead, it should be
called by a child class, that represents a specific service type.
This ensures that only one instance is created for all agents of a
specific service type.
"""
if not cls._instance:
with lockutils.lock('instance'):
if not cls._instance:
cls._instance = cls(l3_agent)
return cls._instance
示例10: get_client
def get_client(context, admin=False):
if admin or (context.is_admin and not context.auth_token):
with lockutils.lock('neutron_admin_auth_token_lock'):
orig_token = AdminTokenStore.get().admin_auth_token
client = _get_client(orig_token, admin=True)
return ClientWrapper(client)
# We got a user token that we can use as-is
if context.auth_token:
token = context.auth_token
return _get_client(token=token)
# We did not get a user token and we should not be using
# an admin token so log an error
raise neutron_client_exc.Unauthorized()
示例11: delete_port
def delete_port(self, context, id, l3_port_check=True):
LOG.debug(_("Deleting port %s"), id)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to prevent
# undesired eventlet yields leading to 'lock wait timeout' errors
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(id=id).with_lockmode('update').one())
except sa_exc.NoResultFound:
# the port existed when l3plugin.prevent_l3_port_deletion
# was called but now is already gone
LOG.debug(_("The port '%s' was deleted"), id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
mech_context = driver_context.PortContext(self, context, port,
network)
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
LOG.debug(_("Calling base delete_port"))
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
if l3plugin:
l3plugin.notify_routers_updated(context, router_ids)
try:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
self.notify_security_groups_member_updated(context, port)
示例12: delete_port
def delete_port(self, context, id, l3_port_check=True):
LOG.debug(_("Deleting port %s"), id)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
# the port existed when l3plugin.prevent_l3_port_deletion
# was called but now is already gone
LOG.debug(_("The port '%s' was deleted"), id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
mech_context = driver_context.PortContext(self, context, port,
network, binding)
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
LOG.debug(_("Calling base delete_port"))
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
if l3plugin:
l3plugin.notify_routers_updated(context, router_ids)
try:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
self.notify_security_groups_member_updated(context, port)
示例13: _run_commands
def _run_commands(self, port, commands):
num_tries = 0
max_tries = 1 + self._config.auth_failure_retries
sleep_time = self._config.auth_failure_retry_interval
while True:
num_tries += 1
try:
# we must lock during switch communication here because we run
# the save commands in a separate greenthread.
with lockutils.lock('CiscoDriver-%s' % (port.switch_host),
lock_file_prefix='neutron-'):
return self._run_commands_inner(port, commands)
except CiscoException as err:
if (num_tries == max_tries or not self._retryable_error(err)):
raise
LOG.warning("Received retryable failure: %s" % err)
time.sleep(sleep_time)
示例14: delete_listener
def delete_listener(self, context, id):
with contextlib.nested(lockutils.lock('db-access'),
context.session.begin(subtransactions=True)):
listener_db_entry = self._get_resource(context, models.Listener, id)
#if listener_db_entry.admin_state_up:
# filters = {'loadbalancer_id': [listener_db_entry.loadbalancer_id],
# 'admin_state_up': [True]}
# all_filters = {'loadbalancer_id': [listener_db_entry.loadbalancer_id]}
# all_listeners = self._get_resources(context,
# models.Listener,
# filters=all_filters)
# if len(all_listeners)>1:
# up_listeners = self._get_resources(context,
# models.Listener,
# filters=filters)
# if len(up_listeners)<=1:
# raise loadbalancerv2.OneListenerAdminStateUpAtLeast(
# lb_id=listener_db_entry.loadbalancer_id)
context.session.delete(listener_db_entry)
示例15: __init__
def __init__(self, name, lock_file_prefix=None):
self.mgr = lockutils.lock(name, lock_file_prefix, True)