本文整理汇总了Python中neutron.context.get_admin_context函数的典型用法代码示例。如果您正苦于以下问题:Python get_admin_context函数的具体用法?Python get_admin_context怎么用?Python get_admin_context使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_admin_context函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _setup_core_resources
def _setup_core_resources(self):
core_plugin = neutron.manager.NeutronManager.get_plugin()
self._network = core_plugin.create_network(
q_context.get_admin_context(),
{
'network':
{
'tenant_id': self._tenant_id,
'name': 'test net',
'admin_state_up': True,
'shared': False,
}
}
)
self._subnet = core_plugin.create_subnet(
q_context.get_admin_context(),
{
'subnet':
{
'network_id': self._network['id'],
'name': 'test subnet',
'cidr': '192.168.1.0/24',
'ip_version': 4,
'gateway_ip': '192.168.1.1',
'allocation_pools': attributes.ATTR_NOT_SPECIFIED,
'dns_nameservers': attributes.ATTR_NOT_SPECIFIED,
'host_routes': attributes.ATTR_NOT_SPECIFIED,
'enable_dhcp': True,
}
}
)
self._subnet_id = self._subnet['id']
示例2: test_create_delete_l3_policy_with_routers
def test_create_delete_l3_policy_with_routers(self):
with self.router() as router1:
with self.router() as router2:
routers = [router1['router']['id'], router2['router']['id']]
l3p = self.create_l3_policy(routers=routers)
l3p_id = l3p['l3_policy']['id']
test_l3p_id = self._gbp_plugin.get_l3p_id_from_router_id(
nctx.get_admin_context(),
router1['router']['id'])
self.assertEqual(l3p_id, test_l3p_id)
test_l3p_id = self._gbp_plugin.get_l3p_id_from_router_id(
nctx.get_admin_context(),
router2['router']['id'])
self.assertEqual(l3p_id, test_l3p_id)
self.assertEqual(sorted(routers),
sorted(l3p['l3_policy']['routers']))
req = self.new_show_request('l3_policies', l3p_id,
fmt=self.fmt)
res = self.deserialize(self.fmt,
req.get_response(self.ext_api))
self.assertEqual(sorted(routers),
sorted(res['l3_policy']['routers']))
req = self.new_delete_request('l3_policies', l3p_id)
res = req.get_response(self.ext_api)
self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
示例3: _gen_port
def _gen_port(self):
network_id = self.plugin.create_network(
context.get_admin_context(),
{
"network": {
"name": "pecannet",
"tenant_id": "tenid",
"shared": False,
"admin_state_up": True,
"status": "ACTIVE",
}
},
)["id"]
self.port = self.plugin.create_port(
context.get_admin_context(),
{
"port": {
"tenant_id": "tenid",
"network_id": network_id,
"fixed_ips": n_const.ATTR_NOT_SPECIFIED,
"mac_address": "00:11:22:33:44:55",
"admin_state_up": True,
"device_id": "FF",
"device_owner": "pecan",
"name": "pecan",
}
},
)
示例4: test_delete_listener
def test_delete_listener(self):
with self.loadbalancer(no_delete=True) as loadbalancer:
lb_id = loadbalancer['loadbalancer']['id']
self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
with self.listener(loadbalancer_id=lb_id,
no_delete=True) as listener:
listener_id = listener['listener']['id']
self._update_status(models.LoadBalancer, constants.ACTIVE,
lb_id)
ctx = context.get_admin_context()
self.plugin_instance.delete_listener(
ctx, listener['listener']['id'])
calls = self.mock_api.delete_listener.call_args_list
_, called_listener, called_host = calls[0][0]
self.assertEqual(listener_id, called_listener.id)
self.assertEqual('host', called_host)
self.assertEqual(constants.PENDING_DELETE,
called_listener.provisioning_status)
ctx = context.get_admin_context()
lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id)
self.assertEqual(constants.ACTIVE,
lb.provisioning_status)
self.assertRaises(
loadbalancerv2.EntityNotFound,
self.plugin_instance.db.get_listener, ctx, listener_id)
示例5: _get_profile_id
def _get_profile_id(cls, p_type, resource, name):
try:
tenant_id = manager.NeutronManager.get_service_plugins()[
constants.L3_ROUTER_NAT].l3_tenant_id()
except AttributeError:
return
if tenant_id is None:
return
core_plugin = manager.NeutronManager.get_plugin()
if p_type == 'net_profile':
profiles = core_plugin.get_network_profiles(
n_context.get_admin_context(),
{'tenant_id': [tenant_id], 'name': [name]},
['id'])
else:
profiles = core_plugin.get_policy_profiles(
n_context.get_admin_context(),
{'tenant_id': [tenant_id], 'name': [name]},
['id'])
if len(profiles) == 1:
return profiles[0]['id']
elif len(profiles) > 1:
# Profile must have a unique name.
LOG.error(_LE('The %(resource)s %(name)s does not have unique '
'name. Please refer to admin guide and create one.'),
{'resource': resource, 'name': name})
else:
# Profile has not been created.
LOG.error(_LE('There is no %(resource)s %(name)s. Please refer to '
'admin guide and create one.'),
{'resource': resource, 'name': name})
示例6: test_update_member_with_vip
def test_update_member_with_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware') as p:
with self.member(pool_id=p['pool']['id']) as member:
with self.vip(pool=p, subnet=subnet):
self.plugin_instance.update_member(
context.get_admin_context(),
member['member']['id'], member
)
calls = [
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + p['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
updated_member = self.plugin_instance.get_member(
context.get_admin_context(),
member['member']['id']
)
self.assertEqual(updated_member['status'],
constants.ACTIVE)
示例7: test_listener_deployed
def test_listener_deployed(self):
with self.loadbalancer(no_delete=True) as loadbalancer:
self.plugin_instance.db.update_loadbalancer_provisioning_status(
context.get_admin_context(),
loadbalancer['loadbalancer']['id'])
with self.listener(
loadbalancer_id=loadbalancer[
'loadbalancer']['id']) as listener:
ctx = context.get_admin_context()
l = self.plugin_instance.db.get_loadbalancer(
ctx, loadbalancer['loadbalancer']['id'])
self.assertEqual('PENDING_UPDATE', l.provisioning_status)
ll = self.plugin_instance.db.get_listener(
ctx, listener['listener']['id'])
self.assertEqual('PENDING_CREATE', ll.provisioning_status)
self.callbacks.loadbalancer_deployed(
ctx, loadbalancer['loadbalancer']['id'])
l = self.plugin_instance.db.get_loadbalancer(
ctx, loadbalancer['loadbalancer']['id'])
self.assertEqual('ACTIVE', l.provisioning_status)
ll = self.plugin_instance.db.get_listener(
ctx, listener['listener']['id'])
self.assertEqual('ACTIVE', ll.provisioning_status)
示例8: _enforce_device_owner_not_router_intf_or_device_id
def _enforce_device_owner_not_router_intf_or_device_id(self, context, device_owner, device_id, tenant_id):
"""Prevent tenants from replacing the device id of router ports with
a router uuid belonging to another tenant.
"""
if device_owner not in constants.ROUTER_INTERFACE_OWNERS:
return
if not context.is_admin:
# check to make sure device_id does not match another tenants
# router.
if device_id:
if hasattr(self, "get_router"):
try:
ctx_admin = ctx.get_admin_context()
router = self.get_router(ctx_admin, device_id)
except l3.RouterNotFound:
return
else:
l3plugin = manager.NeutronManager.get_service_plugins().get(service_constants.L3_ROUTER_NAT)
if l3plugin:
try:
ctx_admin = ctx.get_admin_context()
router = l3plugin.get_router(ctx_admin, device_id)
except l3.RouterNotFound:
return
else:
# raise as extension doesn't support L3 anyways.
raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
if tenant_id != router["tenant_id"]:
raise n_exc.DeviceIDNotOwnedByTenant(device_id=device_id)
示例9: test_get_pip
def test_get_pip(self):
"""Call _get_pip twice and verify that a Port is created once."""
port_dict = {'fixed_ips': [{'subnet_id': '10.10.10.10',
'ip_address': '11.11.11.11'}]}
port_data = {
'tenant_id': 'tenant_id',
'name': 'port_name',
'network_id': 'network_id',
'mac_address': attributes.ATTR_NOT_SPECIFIED,
'admin_state_up': False,
'device_id': '',
'device_owner': 'neutron:' + constants.LOADBALANCER,
'fixed_ips': [{'subnet_id': '10.10.10.10'}]
}
self.plugin_instance._core_plugin.get_ports = mock.Mock(
return_value=[])
self.plugin_instance._core_plugin.create_port = mock.Mock(
return_value=port_dict)
radware_driver = self.plugin_instance.drivers['radware']
radware_driver._get_pip(context.get_admin_context(),
'tenant_id', 'port_name',
'network_id', '10.10.10.10')
self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
mock.ANY, filters={'name': ['port_name']})
self.plugin_instance._core_plugin.create_port.assert_called_once_with(
mock.ANY, {'port': port_data})
self.plugin_instance._core_plugin.create_port.reset_mock()
self.plugin_instance._core_plugin.get_ports.reset_mock()
self.plugin_instance._core_plugin.get_ports.return_value = [port_dict]
radware_driver._get_pip(context.get_admin_context(),
'tenant_id', 'port_name',
'network_id', '10.10.10.10')
self.plugin_instance._core_plugin.get_ports.assert_called_once_with(
mock.ANY, filters={'name': ['port_name']})
self.assertFalse(self.plugin_instance._core_plugin.create_port.called)
示例10: test_delete_vip
def test_delete_vip(self):
with self.subnet() as subnet:
with self.pool(provider='radware',
do_delete=False,
subnet_id=subnet['subnet']['id']) as pool:
vip_data = {
'name': 'vip1',
'subnet_id': subnet['subnet']['id'],
'pool_id': pool['pool']['id'],
'description': '',
'protocol_port': 80,
'protocol': 'HTTP',
'connection_limit': -1,
'admin_state_up': True,
'status': constants.PENDING_CREATE,
'tenant_id': self._tenant_id,
'session_persistence': ''
}
vip = self.plugin_instance.create_vip(
context.get_admin_context(), {'vip': vip_data})
self.plugin_instance.delete_vip(
context.get_admin_context(), vip['id'])
calls = [
mock.call('DELETE', '/api/workflow/' + pool['pool']['id'],
None, None)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
self.assertRaises(loadbalancer.VipNotFound,
self.plugin_instance.get_vip,
context.get_admin_context(), vip['id'])
示例11: test_create_hm_with_vip
def test_create_hm_with_vip(self):
with self.subnet() as subnet:
with self.health_monitor() as hm:
with self.pool(provider='radware',
subnet_id=subnet['subnet']['id']) as pool:
with self.vip(pool=pool, subnet=subnet):
self.plugin_instance.create_pool_health_monitor(
context.get_admin_context(),
hm, pool['pool']['id']
)
# Test REST calls
calls = [
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
),
mock.call(
'POST', '/api/workflow/' + pool['pool']['id'] +
'/action/BaseCreate',
mock.ANY, driver.TEMPLATE_HEADER
)
]
self.driver_rest_call_mock.assert_has_calls(
calls, any_order=True)
phm = self.plugin_instance.get_pool_health_monitor(
context.get_admin_context(),
hm['health_monitor']['id'], pool['pool']['id']
)
self.assertEqual(phm['status'], constants.ACTIVE)
示例12: test_delete_healthmonitor
def test_delete_healthmonitor(self):
ctx = context.get_admin_context()
self._fake_router_edge_mapping()
with contextlib.nested(
self.subnet(),
self.pool(),
self.health_monitor(no_delete=True)
) as (subnet, pool, health_mon):
net_id = subnet['subnet']['network_id']
self._set_net_external(net_id)
with self.vip(
router_id=self.router_id, pool=pool,
subnet=subnet):
self.plugin.create_pool_health_monitor(
context.get_admin_context(),
health_mon, pool['pool']['id']
)
req = self.new_delete_request('health_monitors',
health_mon['health_monitor']['id'])
res = req.get_response(self.ext_api)
self.assertEqual(res.status_int, 204)
qry = ctx.session.query(ldb.HealthMonitor)
qry = qry.filter_by(id=health_mon['health_monitor']['id'])
self.assertIsNone(qry.first())
示例13: reschedule_resources_from_down_agents
def reschedule_resources_from_down_agents(self, agent_type,
get_down_bindings,
agent_id_attr,
resource_id_attr,
resource_name,
reschedule_resource,
rescheduling_failed):
"""Reschedule resources from down neutron agents
if admin state is up.
"""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents(agent_type, agent_dead_limit)
context = ncontext.get_admin_context()
try:
down_bindings = get_down_bindings(context, agent_dead_limit)
agents_back_online = set()
for binding in down_bindings:
binding_agent_id = getattr(binding, agent_id_attr)
binding_resource_id = getattr(binding, resource_id_attr)
if binding_agent_id in agents_back_online:
continue
else:
# we need new context to make sure we use different DB
# transaction - otherwise we may fetch same agent record
# each time due to REPEATABLE_READ isolation level
context = ncontext.get_admin_context()
agent = self._get_agent(context, binding_agent_id)
if agent.is_active:
agents_back_online.add(binding_agent_id)
continue
LOG.warning(_LW(
"Rescheduling %(resource_name)s %(resource)s from agent "
"%(agent)s because the agent did not report to the server "
"in the last %(dead_time)s seconds."),
{'resource_name': resource_name,
'resource': binding_resource_id,
'agent': binding_agent_id,
'dead_time': agent_dead_limit})
try:
reschedule_resource(context, binding_resource_id)
except (rescheduling_failed, oslo_messaging.RemoteError):
# Catch individual rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception(_LE("Failed to reschedule %(resource_name)s "
"%(resource)s"),
{'resource_name': resource_name,
'resource': binding_resource_id})
except Exception:
# we want to be thorough and catch whatever is raised
# to avoid loop abortion
LOG.exception(_LE("Exception encountered during %(resource_name)s "
"rescheduling."),
{'resource_name': resource_name})
示例14: reschedule_routers_from_down_agents
def reschedule_routers_from_down_agents(self):
"""Reschedule routers from down l3 agents if admin state is up."""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents('L3', agent_dead_limit)
cutoff = self.get_cutoff_time(agent_dead_limit)
context = n_ctx.get_admin_context()
try:
down_bindings = (
context.session.query(RouterL3AgentBinding).
join(agents_db.Agent).
filter(agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up).
outerjoin(l3_attrs_db.RouterExtraAttributes,
l3_attrs_db.RouterExtraAttributes.router_id ==
RouterL3AgentBinding.router_id).
filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha ==
sql.false(),
l3_attrs_db.RouterExtraAttributes.ha ==
sql.null())))
agents_back_online = set()
for binding in down_bindings:
if binding.l3_agent_id in agents_back_online:
continue
else:
# we need new context to make sure we use different DB
# transaction - otherwise we may fetch same agent record
# each time due to REPEATABLE_READ isolation level
context = n_ctx.get_admin_context()
agent = self._get_agent(context, binding.l3_agent_id)
if agent.is_active:
agents_back_online.add(binding.l3_agent_id)
continue
LOG.warning(_LW(
"Rescheduling router %(router)s from agent %(agent)s "
"because the agent did not report to the server in "
"the last %(dead_time)s seconds."),
{'router': binding.router_id,
'agent': binding.l3_agent_id,
'dead_time': agent_dead_limit})
try:
self.reschedule_router(context, binding.router_id)
except (l3agentscheduler.RouterReschedulingFailed,
oslo_messaging.RemoteError):
# Catch individual router rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception(_LE("Failed to reschedule router %s"),
binding.router_id)
except Exception:
# we want to be thorough and catch whatever is raised
# to avoid loop abortion
LOG.exception(_LE("Exception encountered during router "
"rescheduling."))
示例15: test_member_crud
def test_member_crud(self):
with self.subnet(cidr='10.0.0.0/24') as s:
with self.loadbalancer(
subnet=s,
vip_address=WF_APPLY_PARAMS['parameters']['vip_address']
) as lb:
lb_id = lb['loadbalancer']['id']
with self.listener(loadbalancer_id=lb_id) as l:
listener_id = l['listener']['id']
with self.pool(
protocol=lb_con.PROTOCOL_HTTP,
listener_id=listener_id) as p:
pool_id = p['pool']['id']
with self.member(
no_delete=True, address='10.0.1.10',
pool_id=pool_id, subnet=s) as m1:
member1_id = m1['member']['id']
self.driver_rest_call_mock.reset_mock()
rest_call_function_mock.__dict__.update(
{'WORKFLOW_MISSING': False})
with self.member(
no_delete=True, pool_id=pool_id,
subnet=s, address='10.0.1.20') as m2:
member2_id = m2['member']['id']
self.compare_apply_call()
self.driver_rest_call_mock.reset_mock()
m = self.plugin_instance.db.get_pool_member(
context.get_admin_context(),
m1['member']['id']).to_dict(pool=False)
m['weight'] = 2
self.plugin_instance.update_pool_member(
context.get_admin_context(),
m1['member']['id'], p['pool']['id'],
{'member': m})
self.update_member(pool_id, id=member1_id,
weight=2)
self.compare_apply_call()
self.driver_rest_call_mock.reset_mock()
self.plugin_instance.delete_pool_member(
context.get_admin_context(),
member2_id, pool_id)
self.delete_member(member2_id, pool_id)
self.compare_apply_call()
lb = self.plugin_instance.db.get_loadbalancer(
context.get_admin_context(),
lb_id).to_dict(listener=False)
self.assertEqual('ACTIVE',
lb['provisioning_status'])