本文整理汇总了Python中neutron.db.provisioning_blocks.provisioning_complete函数的典型用法代码示例。如果您正苦于以下问题:Python provisioning_complete函数的具体用法?Python provisioning_complete怎么用?Python provisioning_complete使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了provisioning_complete函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_adding_component_idempotent
def test_adding_component_idempotent(self):
for i in range(5):
pb.add_provisioning_component(self.ctx, self.port.id,
resources.PORT, 'entity1')
pb.provisioning_complete(self.ctx, self.port.id,
resources.PORT, 'entity1')
self.assertTrue(self.provisioned.called)
示例2: update_device_up
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s up at agent %(agent_id)s",
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(rpc_context, device)
port = plugin.port_bound_to_host(rpc_context, port_id, host)
if host and not port:
LOG.debug("Device %(device)s not bound to the"
" agent host %(host)s",
{'device': device, 'host': host})
return
if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
# NOTE(kevinbenton): we have to special case DVR ports because of
# the special multi-binding status update logic they have that
# depends on the host
plugin.update_port_status(rpc_context, port_id,
n_const.PORT_STATUS_ACTIVE, host)
else:
# _device_to_port_id may have returned a truncated UUID if the
# agent did not provide a full one (e.g. Linux Bridge case). We
# need to look up the full one before calling provisioning_complete
if not port:
port = ml2_db.get_port(rpc_context.session, port_id)
if not port:
# port doesn't exist, no need to add a provisioning block
return
provisioning_blocks.provisioning_complete(
rpc_context, port['id'], resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
示例3: set_port_status_up
def set_port_status_up(self, port_id):
# Port provisioning is complete now that OVN has reported
# that the port is up.
LOG.info(_LI("OVN reports status up for port: %s"), port_id)
provisioning_blocks.provisioning_complete(
n_context.get_admin_context(), port_id, resources.PORT, provisioning_blocks.L2_AGENT_ENTITY
)
示例4: _pull_missed_statuses
def _pull_missed_statuses(self):
LOG.debug("starting to pull pending statuses...")
plugin = directory.get_plugin()
filter = {"status": [n_const.PORT_STATUS_DOWN],
"vif_type": ["unbound"]}
ports = plugin.get_ports(context.get_admin_context(), filter)
if not ports:
LOG.debug("no down ports found, done")
return
port_fetch_url = utils.get_odl_url(self.PORT_PATH)
client = odl_client.OpenDaylightRestClient.create_client(
url=port_fetch_url)
for port in ports:
port_id = port["id"]
response = client.get(port_id)
if response.status_code != 200:
LOG.warning("Non-200 response code %s", str(response))
continue
odl_status = response.json()['port'][0]['status']
if odl_status == n_const.PORT_STATUS_ACTIVE:
# for now we only support transition from DOWN->ACTIVE
# See https://bugs.launchpad.net/networking-odl/+bug/1686023
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
LOG.debug("done pulling pending statuses")
示例5: test_provisioning_of_correct_item
def test_provisioning_of_correct_item(self):
port2 = self._make_port()
pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
'entity1')
pb.provisioning_complete(self.ctx, port2.id,
resources.PORT, 'entity1')
self.provisioned.assert_called_once_with(
resources.PORT, pb.PROVISIONING_COMPLETE, mock.ANY,
context=self.ctx, object_id=port2.id)
示例6: set_port_status_up
def set_port_status_up(self, port_id):
# Port provisioning is complete now that OVN has reported that the
# port is up. Any provisioning block (possibly added during port
# creation or when OVN reports that the port is down) must be removed.
LOG.info(_LI("OVN reports status up for port: %s"), port_id)
provisioning_blocks.provisioning_complete(
n_context.get_admin_context(),
port_id,
resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
示例7: test_not_provisioned_until_final_component_complete
def test_not_provisioned_until_final_component_complete(self):
pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
'entity1')
pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
'entity2')
pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
'entity1')
self.assertFalse(self.provisioned.called)
pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
'entity2')
self.assertTrue(self.provisioned.called)
示例8: test_adding_component_for_new_resource_type
def test_adding_component_for_new_resource_type(self):
provisioned = mock.Mock()
registry.subscribe(provisioned, 'NETWORK', pb.PROVISIONING_COMPLETE)
net = self._make_net()
# expect failed because the model was not registered for the type
with testtools.ExpectedException(RuntimeError):
pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent')
pb.add_model_for_resource('NETWORK', models_v2.Network)
pb.add_provisioning_component(self.ctx, net.id, 'NETWORK', 'ent')
pb.provisioning_complete(self.ctx, net.id, 'NETWORK', 'ent')
self.assertTrue(provisioned.called)
示例9: test_is_object_blocked
def test_is_object_blocked(self):
pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
'e1')
self.assertTrue(pb.is_object_blocked(self.ctx, self.port.id,
resources.PORT))
self.assertFalse(pb.is_object_blocked(self.ctx, 'xyz',
resources.PORT))
pb.provisioning_complete(self.ctx, self.port.id,
resources.PORT, 'e1')
self.assertFalse(pb.is_object_blocked(self.ctx, self.port.id,
resources.PORT))
示例10: test_remove_provisioning_component
def test_remove_provisioning_component(self):
pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
'e1')
pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
'e2')
self.assertTrue(pb.remove_provisioning_component(
self.ctx, self.port.id, resources.PORT, 'e1'))
self.assertFalse(self.provisioned.called)
pb.provisioning_complete(self.ctx, self.port.id,
resources.PORT, 'other')
self.assertFalse(self.provisioned.called)
pb.provisioning_complete(self.ctx, self.port.id,
resources.PORT, 'e2')
self.assertTrue(self.provisioned.called)
示例11: update_device_up
def update_device_up(self, rpc_context, **kwargs):
"""Device is up on agent."""
agent_id = kwargs.get('agent_id')
device = kwargs.get('device')
host = kwargs.get('host')
LOG.debug("Device %(device)s up at agent %(agent_id)s",
{'device': device, 'agent_id': agent_id})
plugin = manager.NeutronManager.get_plugin()
port_id = plugin._device_to_port_id(rpc_context, device)
port = plugin.port_bound_to_host(rpc_context, port_id, host)
if host and not port:
LOG.debug("Device %(device)s not bound to the"
" agent host %(host)s",
{'device': device, 'host': host})
# this might mean that a VM is in the process of live migration
# and vif was plugged on the destination compute node;
# need to notify nova explicitly
try:
port = plugin._get_port(rpc_context, port_id)
except exceptions.PortNotFound:
LOG.debug("Port %s not found, will not notify nova.", port_id)
else:
if port.device_owner.startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX):
plugin.nova_notifier.notify_port_active_direct(port)
return
if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
# NOTE(kevinbenton): we have to special case DVR ports because of
# the special multi-binding status update logic they have that
# depends on the host
plugin.update_port_status(rpc_context, port_id,
n_const.PORT_STATUS_ACTIVE, host)
else:
# _device_to_port_id may have returned a truncated UUID if the
# agent did not provide a full one (e.g. Linux Bridge case). We
# need to look up the full one before calling provisioning_complete
if not port:
port = ml2_db.get_port(rpc_context.session, port_id)
if not port:
# port doesn't exist, no need to add a provisioning block
return
provisioning_blocks.provisioning_complete(
rpc_context, port['id'], resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
示例12: update_port_status_to_active
def update_port_status_to_active(self, port, rpc_context, port_id, host):
plugin = directory.get_plugin()
if port and port['device_owner'] == n_const.DEVICE_OWNER_DVR_INTERFACE:
# NOTE(kevinbenton): we have to special case DVR ports because of
# the special multi-binding status update logic they have that
# depends on the host
plugin.update_port_status(rpc_context, port_id,
n_const.PORT_STATUS_ACTIVE, host)
else:
# _device_to_port_id may have returned a truncated UUID if the
# agent did not provide a full one (e.g. Linux Bridge case). We
# need to look up the full one before calling provisioning_complete
if not port:
port = ml2_db.get_port(rpc_context, port_id)
if not port:
# port doesn't exist, no need to add a provisioning block
return
provisioning_blocks.provisioning_complete(
rpc_context, port['id'], resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
示例13: _process_websocket_recv
def _process_websocket_recv(self, payload, reconnect):
# Callback for websocket notification
LOG.debug("Websocket notification for port status update")
for event in odl_ws_client.EventDataParser.get_item(payload):
operation, path, data = event.get_fields()
if ((operation in [event.OPERATION_UPDATE,
event.OPERATION_CREATE])):
port_id = event.extract_field(path, "neutron:uuid")
port_id = str(port_id).strip("'")
status_field = data.get('status')
if status_field is not None:
status = status_field.get('content')
LOG.debug("Update port for port id %s %s", port_id, status)
# for now we only support transition from DOWN->ACTIVE
# https://bugs.launchpad.net/networking-odl/+bug/1686023
if status == n_const.PORT_STATUS_ACTIVE:
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
if operation == event.OPERATION_DELETE:
LOG.debug("PortStatus: Ignoring delete operation")
示例14: test_not_provisioned_when_wrong_component_reports
def test_not_provisioned_when_wrong_component_reports(self):
pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
'entity1')
pb.provisioning_complete(self.ctx, self.port.id,
resources.PORT, 'entity2')
self.assertFalse(self.provisioned.called)
示例15: test_provisioned_after_component_finishes
def test_provisioned_after_component_finishes(self):
pb.add_provisioning_component(self.ctx, self.port.id, resources.PORT,
'entity')
pb.provisioning_complete(self.ctx, self.port.id, resources.PORT,
'entity')
self.assertTrue(self.provisioned.called)