本文整理汇总了Python中neutron.plugins.ml2.db.get_locked_port_and_binding函数的典型用法代码示例。如果您正苦于以下问题:Python get_locked_port_and_binding函数的具体用法?Python get_locked_port_and_binding怎么用?Python get_locked_port_and_binding使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_locked_port_and_binding函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_get_locked_port_and_binding_result_not_found
def test_get_locked_port_and_binding_result_not_found(self):
port_id = uuidutils.generate_uuid()
port, binding = ml2_db.get_locked_port_and_binding(self.ctx.session,
port_id)
self.assertIsNone(port)
self.assertIsNone(binding)
示例2: _sync_base
def _sync_base(self):
ctx = context.get_admin_context()
# Sync Networks
for network in self.core_plugin.get_networks(ctx):
mech_context = driver_context.NetworkContext(self.core_plugin, ctx,
network)
try:
self.driver.create_network_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create network postcommit failed for "
"network %s"), network['id'])
# Sync Subnets
for subnet in self.core_plugin.get_subnets(ctx):
mech_context = driver_context.SubnetContext(self.core_plugin, ctx,
subnet)
try:
self.driver.create_subnet_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create subnet postcommit failed for"
" subnet %s"), subnet['id'])
# Sync Ports (compute/gateway/dhcp)
for port in self.core_plugin.get_ports(ctx):
_, binding = l2_db.get_locked_port_and_binding(ctx.session,
port['id'])
network = self.core_plugin.get_network(ctx, port['network_id'])
mech_context = driver_context.PortContext(self.core_plugin, ctx,
port, network, binding,
[])
try:
self.driver.create_port_postcommit(mech_context)
except Exception:
LOG.warn(_LW("Create port postcommit failed for"
" port %s"), port['id'])
示例3: _sync_ports
def _sync_ports(self, ctx):
LOG.debug("OVN-NB Sync ports started")
lports = self.ovn_api.get_all_logical_ports_ids()
for port in self.core_plugin.get_ports(ctx):
_, binding = l2_db.get_locked_port_and_binding(ctx.session, port["id"])
network = self.core_plugin.get_network(ctx, port["network_id"])
port_context = driver_context.PortContext(self.core_plugin, ctx, port, network, binding, [])
try:
if self.mode == SYNC_MODE_REPAIR:
self.driver.create_port_postcommit(port_context)
res = lports.pop(port_context.current["id"], None)
if self.mode == SYNC_MODE_LOG:
if res is None:
LOG.warn(
_LW("Port found in Neutron but not in OVN" "DB, port_id=%s"), port_context.current["id"]
)
except RuntimeError:
LOG.warn(_LW("Create port postcommit failed for" " port %s"), port["id"])
# Only delete logical port if it was previously created by neutron
with self.ovn_api.transaction() as txn:
for lport, ext_ids in lports.items():
if ovn_const.OVN_PORT_NAME_EXT_ID_KEY in ext_ids:
if self.mode == SYNC_MODE_REPAIR:
txn.add(self.ovn_api.delete_lport(lport))
if self.mode == SYNC_MODE_LOG:
LOG.warn(
_LW("Port found in OVN but not in Neutron," " port_name=%s"),
ext_ids[ovn_const.OVN_PORT_NAME_EXT_ID_KEY],
)
LOG.debug("OVN-NB Sync ports finished")
示例4: test_get_locked_port_and_binding
def test_get_locked_port_and_binding(self):
network_id = "foo-network-id"
port_id = "foo-port-id"
host = "fake_host"
vif_type = portbindings.VIF_TYPE_UNBOUND
self._setup_neutron_network(network_id)
self._setup_neutron_port(network_id, port_id)
self._setup_neutron_portbinding(port_id, vif_type, host)
port, binding = ml2_db.get_locked_port_and_binding(self.ctx.session, port_id)
self.assertEqual(port_id, port.id)
self.assertEqual(port_id, binding.port_id)
示例5: update_port
def update_port(self, context, id, port):
attrs = port['port']
need_port_update_notify = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
original_port = self._make_port_dict(port_db)
updated_port = super(Ml2Plugin, self).update_port(context, id,
port)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding,
original_port=original_port)
need_port_update_notify |= self._process_port_binding(
mech_context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_port_postcommit(mech_context)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
bound_port = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_port._port
示例6: _sync_base
def _sync_base(self):
ctx = context.get_admin_context()
# Sync Networks
# Unroll to avoid unwanted additions during sync
networks = [x for x in self.core_plugin.get_networks(ctx)]
for network in networks:
if (network['name'].startswith(
constants.HOST_SNAT_NETWORK_PREFIX) or
constants.APIC_SYNC_NETWORK == network['name']):
continue
mech_context = driver_context.NetworkContext(
self.core_plugin, ctx, network)
try:
self.driver.create_network_postcommit(mech_context)
except aexc.ReservedSynchronizationName as e:
LOG.debug(e.message)
except Exception as e:
LOG.exception(e)
# Sync Subnets
subnets = [x for x in self.core_plugin.get_subnets(ctx)]
for subnet in subnets:
if constants.HOST_SNAT_POOL in subnet['name']:
continue
network = self.core_plugin.get_network(
ctx, subnet['network_id'])
mech_context = driver_context.SubnetContext(self.core_plugin, ctx,
subnet, network)
try:
self.driver.create_subnet_postcommit(mech_context)
except Exception as e:
LOG.exception(e)
# Sync Ports (compute/gateway/dhcp)
ports = [x for x in self.core_plugin.get_ports(ctx)]
for port in ports:
if constants.HOST_SNAT_POOL_PORT in port['name']:
continue
_, binding = l2_db.get_locked_port_and_binding(ctx.session,
port['id'])
levels = l2_db.get_binding_levels(ctx.session, port['id'],
binding.host)
network = self.core_plugin.get_network(ctx, port['network_id'])
mech_context = driver_context.PortContext(self.core_plugin, ctx,
port, network, binding,
levels)
try:
self.driver.create_port_postcommit(mech_context)
except Exception as e:
LOG.exception(e)
示例7: _sync_base
def _sync_base(self):
ctx = context.get_admin_context()
# Sync Networks
# Unroll to avoid unwanted additions during sync
networks = [x for x in self.core_plugin.get_networks(ctx)]
for network in networks:
if constants.APIC_SYNC_NETWORK == network['name']:
continue
mech_context = driver_context.NetworkContext(
self.core_plugin, ctx, network)
try:
self.driver.create_network_postcommit(mech_context)
except aexc.ReservedSynchronizationName as e:
LOG.debug(e.message)
except Exception as e:
LOG.warning(_LW("Create network postcommit failed for "
"network %(net_id)s: %(message)s"),
net_id=network['id'], message=e.message)
# Sync Subnets
subnets = [x for x in self.core_plugin.get_subnets(ctx)]
for subnet in subnets:
mech_context = driver_context.SubnetContext(self.core_plugin, ctx,
subnet)
try:
self.driver.create_subnet_postcommit(mech_context)
except Exception as e:
LOG.warning(_LW("Create subnet postcommit failed for "
"subnet %(sub_id)s: %(message)s"),
sub_id=subnet['id'], message=e.message)
# Sync Ports (compute/gateway/dhcp)
ports = [x for x in self.core_plugin.get_ports(ctx)]
for port in ports:
binding = l2_db.get_locked_port_and_binding(ctx.session,
port['id'])[1]
levels = l2_db.get_binding_levels(ctx.session, port['id'],
binding.host)
network = self.core_plugin.get_network(ctx, port['network_id'])
mech_context = driver_context.PortContext(self.core_plugin, ctx,
port, network, binding,
levels)
try:
self.driver.create_port_postcommit(mech_context)
except Exception as e:
LOG.warning(_LW("Create port postcommit failed for "
"port %(port_id)s: %(message)s"),
port_id=port['id'], message=e.message)
示例8: delete_port
def delete_port(self, context, id, l3_port_check=True):
LOG.debug(_("Deleting port %s"), id)
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
# the port existed when l3plugin.prevent_l3_port_deletion
# was called but now is already gone
LOG.debug(_("The port '%s' was deleted"), id)
return
port = self._make_port_dict(port_db)
network = self.get_network(context, port['network_id'])
mech_context = driver_context.PortContext(self, context, port,
network, binding)
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
LOG.debug(_("Calling base delete_port"))
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
if l3plugin:
l3plugin.notify_routers_updated(context, router_ids)
try:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_("mechanism_manager.delete_port_postcommit failed"))
self.notify_security_groups_member_updated(context, port)
示例9: test_return_on_concurrent_delete_and_binding
def test_return_on_concurrent_delete_and_binding(self):
# create a port and delete it so we have an expired mechanism context
with self.port() as port:
plugin = manager.NeutronManager.get_plugin()
binding = ml2_db.get_locked_port_and_binding(self.context.session,
port['port']['id'])[1]
binding['host'] = 'test'
mech_context = driver_context.PortContext(
plugin, self.context, port['port'],
plugin.get_network(self.context, port['port']['network_id']),
binding)
with contextlib.nested(
mock.patch('neutron.plugins.ml2.plugin.'
'db.get_locked_port_and_binding',
return_value=(None, None)),
mock.patch('neutron.plugins.ml2.plugin.Ml2Plugin._make_port_dict')
) as (glpab_mock, mpd_mock):
plugin._bind_port_if_needed(mech_context)
# called during deletion to get port
self.assertTrue(glpab_mock.mock_calls)
# should have returned before calling _make_port_dict
self.assertFalse(mpd_mock.mock_calls)
示例10: _commit_port_binding
def _commit_port_binding(self, plugin_context, port_id, orig_binding,
new_context):
session = plugin_context.session
new_binding = new_context._binding
# After we've attempted to bind the port, we begin a
# transaction, get the current port state, and decide whether
# to commit the binding results.
#
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
port_db, cur_binding = db.get_locked_port_and_binding(session,
port_id)
if not port_db:
# The port has been deleted concurrently.
return
oport = self._make_port_dict(port_db)
port = self._make_port_dict(port_db)
network = self.get_network(plugin_context, port['network_id'])
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding,
original_port=oport)
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
orig_binding.host == cur_binding.host and
orig_binding.vnic_type == cur_binding.vnic_type and
orig_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
cur_binding.driver = new_binding.driver
cur_binding.segment = new_binding.segment
# REVISIT(rkukura): The binding:profile attribute is
# supposed to be input-only, but the Mellanox driver
# currently modifies it while binding. Remove this
# code when the Mellanox driver has been updated to
# use binding:vif_details instead.
if cur_binding.profile != new_binding.profile:
cur_binding.profile = new_binding.profile
# Update PortContext's port dictionary to reflect the
# updated binding state.
self._update_port_dict_binding(port, cur_binding)
# Update the port status if requested by the bound driver.
if new_binding.segment and new_context._new_port_status:
port_db.status = new_context._new_port_status
port['status'] = new_context._new_port_status
# Call the mechanism driver precommit methods, commit
# the results, and call the postcommit methods.
self.mechanism_manager.update_port_precommit(cur_context)
if commit:
self.mechanism_manager.update_port_postcommit(cur_context)
# Continue, using the port state as of the transaction that
# just finished, whether that transaction committed new
# results or discovered concurrent port state changes.
return (cur_context, commit)
示例11: update_port
def update_port(self, context, id, port):
do_commit = False
attrs = port['port']
need_port_update_notify = False
LOG.info('Attempting port update %s: %s' % (id, port))
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = ml2_db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
original_port = self._make_port_dict(port_db)
# Process extension data
self._find_port_dict_extensions(
original_port, None, session=session)
updated_port = super(plugin.Ml2Plugin, self).update_port(
context, id, port)
# Process extension data
port_ext = self._update_port_ext(
original_port, updated_port, port, session=session)
switchports = self._update_switchports(
updated_port, port, session=session)
self._find_port_dict_extensions(
updated_port, None, port_ext=port_ext,
switchports=switchports, session=session)
# We only want to commit on a state change
if original_port["commit"] != updated_port["commit"]:
do_commit = True
# If we are transitioning to active, validate
if not original_port["commit"] and updated_port["commit"]:
self._validate_port_can_commit(
updated_port, None, session=session)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding,
original_port=original_port)
need_port_update_notify |= self._process_port_binding(
mech_context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
if do_commit:
self.mechanism_manager.update_port_postcommit(mech_context)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
if need_port_update_notify:
self._notify_port_updated(mech_context)
bound_port = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_port._port