当前位置: 首页>>代码示例>>Python>>正文


Python context.get_admin_context函数代码示例

本文整理汇总了Python中neutron_lib.context.get_admin_context函数的典型用法代码示例。如果您正苦于以下问题:Python get_admin_context函数的具体用法?Python get_admin_context怎么用?Python get_admin_context使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了get_admin_context函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_create_delete_l3_policy_with_routers

 def test_create_delete_l3_policy_with_routers(self):
     with self.router() as router1:
         with self.router() as router2:
             routers = [router1['router']['id'], router2['router']['id']]
             l3p = self.create_l3_policy(routers=routers)
             l3p_id = l3p['l3_policy']['id']
             test_l3p_id = self._gbp_plugin.get_l3p_id_from_router_id(
                 nctx.get_admin_context(),
                 router1['router']['id'])
             self.assertEqual(l3p_id, test_l3p_id)
             test_l3p_id = self._gbp_plugin.get_l3p_id_from_router_id(
                 nctx.get_admin_context(),
                 router2['router']['id'])
             self.assertEqual(l3p_id, test_l3p_id)
             self.assertEqual(sorted(routers),
                              sorted(l3p['l3_policy']['routers']))
             req = self.new_show_request('l3_policies', l3p_id,
                                         fmt=self.fmt)
             res = self.deserialize(self.fmt,
                                    req.get_response(self.ext_api))
             self.assertEqual(sorted(routers),
                              sorted(res['l3_policy']['routers']))
             req = self.new_delete_request('l3_policies', l3p_id)
             res = req.get_response(self.ext_api)
             self.assertEqual(webob.exc.HTTPNoContent.code, res.status_int)
开发者ID:openstack,项目名称:group-based-policy,代码行数:25,代码来源:test_group_policy_mapping_db.py

示例2: _pull_missed_statuses

    def _pull_missed_statuses(self):
        LOG.debug("starting to pull pending statuses...")
        plugin = directory.get_plugin()
        filter = {"status": [n_const.PORT_STATUS_DOWN],
                  "vif_type": ["unbound"]}
        ports = plugin.get_ports(context.get_admin_context(), filter)

        if not ports:
            LOG.debug("no down ports found, done")
            return

        port_fetch_url = utils.get_odl_url(self.PORT_PATH)
        client = odl_client.OpenDaylightRestClient.create_client(
            url=port_fetch_url)

        for port in ports:
            port_id = port["id"]
            response = client.get(port_id)
            if response.status_code != 200:
                LOG.warning("Non-200 response code %s", str(response))
                continue
            odl_status = response.json()['port'][0]['status']
            if odl_status == n_const.PORT_STATUS_ACTIVE:
                # for now we only support transition from DOWN->ACTIVE
                # See https://bugs.launchpad.net/networking-odl/+bug/1686023
                provisioning_blocks.provisioning_complete(
                    context.get_admin_context(),
                    port_id, resources.PORT,
                    provisioning_blocks.L2_AGENT_ENTITY)
        LOG.debug("done pulling pending statuses")
开发者ID:openstack,项目名称:networking-odl,代码行数:30,代码来源:port_status_update.py

示例3: test_delete_listener

 def test_delete_listener(self):
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             ctx = context.get_admin_context()
             self.plugin_instance.delete_listener(
                 ctx, listener['listener']['id'])
             calls = self.mock_api.delete_listener.call_args_list
             _, called_listener, called_host = calls[0][0]
             self.assertEqual(listener_id, called_listener.id)
             self.assertEqual('host', called_host)
             self.assertEqual(constants.PENDING_DELETE,
                              called_listener.provisioning_status)
             ctx = context.get_admin_context()
             lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id)
             self.assertEqual(constants.ACTIVE,
                              lb.provisioning_status)
             self.assertRaises(
                 loadbalancerv2.EntityNotFound,
                 self.plugin_instance.db.get_listener, ctx, listener_id)
开发者ID:openstack,项目名称:neutron-lbaas,代码行数:25,代码来源:test_agent_driver_base.py

示例4: _extend_port_resource_request

    def _extend_port_resource_request(port_res, port_db):
        """Add resource request to a port."""
        port_res['resource_request'] = None
        qos_policy = policy_object.QosPolicy.get_port_policy(
            context.get_admin_context(), port_res['id'])
        # Note(lajoskatona): QosPolicyPortBinding is not ready for some
        # reasons, so let's try and fetch the QoS policy directly if there is a
        # qos_policy_id in port_res.
        if (not qos_policy and 'qos_policy_id' in port_res and
                port_res['qos_policy_id']):
            qos_policy = policy_object.QosPolicy.get_policy_obj(
                context.get_admin_context(), port_res['qos_policy_id']
            )

        # Note(lajoskatona): handle the case when the port inherits qos-policy
        # from the network.
        if not qos_policy:
            net = network_object.Network.get_object(
                context.get_admin_context(), id=port_res['network_id'])
            if net and net.qos_policy_id:
                qos_policy = policy_object.QosPolicy.get_network_policy(
                    context.get_admin_context(), net.id)

        if not qos_policy:
            return port_res

        resources = {}
        rule_direction_class = {
            nl_constants.INGRESS_DIRECTION:
                pl_constants.CLASS_NET_BW_INGRESS_KBPS,
            nl_constants.EGRESS_DIRECTION:
                pl_constants.CLASS_NET_BW_EGRESS_KBPS
        }
        for rule in qos_policy.rules:
            if rule.rule_type == qos_consts.RULE_TYPE_MINIMUM_BANDWIDTH:
                resources[rule_direction_class[rule.direction]] = rule.min_kbps
        if not resources:
            return port_res

        vnic_trait = pl_utils.vnic_type_trait(
            port_res[portbindings.VNIC_TYPE])

        # TODO(lajoskatona): Change to handle all segments when any traits
        # support will be available. See Placement spec:
        # https://review.openstack.org/565730
        first_segment = network_object.NetworkSegment.get_objects(
            context.get_admin_context(),
            network_id=port_res['network_id'])[0]

        if not first_segment or not first_segment.physical_network:
            return port_res
        physnet_trait = pl_utils.physnet_trait(
            first_segment.physical_network)

        resource_request = {
            'required': [physnet_trait, vnic_trait],
            'resources': resources
        }
        port_res['resource_request'] = resource_request
        return port_res
开发者ID:igordcard,项目名称:neutron,代码行数:60,代码来源:qos_plugin.py

示例5: reschedule_resources_from_down_agents

    def reschedule_resources_from_down_agents(self, agent_type,
                                              get_down_bindings,
                                              agent_id_attr,
                                              resource_id_attr,
                                              resource_name,
                                              reschedule_resource,
                                              rescheduling_failed):
        """Reschedule resources from down neutron agents
        if admin state is up.
        """
        agent_dead_limit = self.agent_dead_limit_seconds()
        self.wait_down_agents(agent_type, agent_dead_limit)

        context = ncontext.get_admin_context()
        try:
            down_bindings = get_down_bindings(context, agent_dead_limit)

            agents_back_online = set()
            for binding in down_bindings:
                binding_agent_id = getattr(binding, agent_id_attr)
                binding_resource_id = getattr(binding, resource_id_attr)
                if binding_agent_id in agents_back_online:
                    continue
                else:
                    # we need new context to make sure we use different DB
                    # transaction - otherwise we may fetch same agent record
                    # each time due to REPEATABLE_READ isolation level
                    context = ncontext.get_admin_context()
                    agent = self._get_agent(context, binding_agent_id)
                    if agent.is_active:
                        agents_back_online.add(binding_agent_id)
                        continue

                LOG.warning(
                    "Rescheduling %(resource_name)s %(resource)s from agent "
                    "%(agent)s because the agent did not report to the server "
                    "in the last %(dead_time)s seconds.",
                    {'resource_name': resource_name,
                     'resource': binding_resource_id,
                     'agent': binding_agent_id,
                     'dead_time': agent_dead_limit})
                try:
                    reschedule_resource(context, binding_resource_id)
                except (rescheduling_failed, oslo_messaging.RemoteError):
                    # Catch individual rescheduling errors here
                    # so one broken one doesn't stop the iteration.
                    LOG.exception("Failed to reschedule %(resource_name)s "
                                  "%(resource)s",
                                  {'resource_name': resource_name,
                                   'resource': binding_resource_id})
        except Exception:
            # we want to be thorough and catch whatever is raised
            # to avoid loop abortion
            LOG.exception("Exception encountered during %(resource_name)s "
                          "rescheduling.",
                          {'resource_name': resource_name})
开发者ID:openstack,项目名称:neutron,代码行数:56,代码来源:agentschedulers_db.py

示例6: _gen_port

 def _gen_port(self):
     network_id = self.plugin.create_network(context.get_admin_context(), {
         'network':
         {'name': 'pecannet', 'tenant_id': 'tenid', 'shared': False,
          'admin_state_up': True, 'status': 'ACTIVE'}})['id']
     self.port = self.plugin.create_port(context.get_admin_context(), {
         'port':
         {'tenant_id': 'tenid', 'network_id': network_id,
          'fixed_ips': n_const.ATTR_NOT_SPECIFIED,
          'mac_address': '00:11:22:33:44:55',
          'admin_state_up': True, 'device_id': 'FF',
          'device_owner': 'pecan', 'name': 'pecan'}})
开发者ID:eayunstack,项目名称:neutron,代码行数:12,代码来源:test_controllers.py

示例7: test_member_crud

    def test_member_crud(self):
        with self.subnet(cidr='10.0.0.0/24') as s:
            with self.loadbalancer(subnet=s) as lb:
                lb_id = lb['loadbalancer']['id']
                with self.listener(loadbalancer_id=lb_id) as l:
                    listener_id = l['listener']['id']
                    with self.pool(
                        protocol=lb_con.PROTOCOL_HTTP,
                        listener_id=listener_id) as p:
                        pool_id = p['pool']['id']
                        with self.member(
                            no_delete=True, address='10.0.1.10',
                            pool_id=pool_id, subnet=s) as m1:
                            member1_id = m1['member']['id']

                            self.driver_rest_call_mock.reset_mock()
                            rest_call_function_mock.__dict__.update(
                                {'WORKFLOW_MISSING': False})

                            with self.member(
                                no_delete=True, pool_id=pool_id,
                                subnet=s, address='10.0.1.20') as m2:
                                member2_id = m2['member']['id']
                                self.compare_apply_call()

                                self.driver_rest_call_mock.reset_mock()
                                m = self.plugin_instance.db.get_pool_member(
                                    context.get_admin_context(),
                                    m1['member']['id']).to_dict(pool=False)

                                m['weight'] = 2
                                self.plugin_instance.update_pool_member(
                                    context.get_admin_context(),
                                    m1['member']['id'], p['pool']['id'],
                                    {'member': m})
                                self.update_member(pool_id, id=member1_id,
                                                   weight=2)
                                self.compare_apply_call()

                                self.driver_rest_call_mock.reset_mock()

                                self.plugin_instance.delete_pool_member(
                                    context.get_admin_context(),
                                    member2_id, pool_id)
                                self.delete_member(member2_id, pool_id)
                                self.compare_apply_call()

                                lb = self.plugin_instance.db.get_loadbalancer(
                                    context.get_admin_context(),
                                    lb_id).to_dict(listener=False)
                                self.assertEqual('ACTIVE',
                                             lb['provisioning_status'])
开发者ID:openstack,项目名称:neutron-lbaas,代码行数:52,代码来源:test_v2_plugin_driver.py

示例8: test_create_health_monitor

 def test_create_health_monitor(self):
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             with self.pool(listener_id=listener_id, loadbalancer_id=lb_id,
                            no_delete=True) as pool:
                 pool_id = pool['pool']['id']
                 self._update_status(models.LoadBalancer, constants.ACTIVE,
                                     lb_id)
                 with self.healthmonitor(pool_id=pool_id,
                                         no_delete=True) as monitor:
                     hm_id = monitor['healthmonitor']['id']
                     calls = (
                         self.mock_api.create_healthmonitor.call_args_list)
                     _, called_hm, called_host = calls[0][0]
                     self.assertEqual(hm_id, called_hm.id)
                     self.assertEqual('host', called_host)
                     self.assertEqual(constants.PENDING_CREATE,
                                      called_hm.provisioning_status)
                     ctx = context.get_admin_context()
                     lb = self.plugin_instance.db.get_loadbalancer(
                         ctx, lb_id)
                     self.assertEqual(constants.PENDING_UPDATE,
                                      lb.provisioning_status)
开发者ID:openstack,项目名称:neutron-lbaas,代码行数:29,代码来源:test_agent_driver_base.py

示例9: test_create_member

 def test_create_member(self):
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             with self.pool(listener_id=listener_id, loadbalancer_id=lb_id,
                            no_delete=True) as pool:
                 pool_id = pool['pool']['id']
                 self._update_status(models.LoadBalancer, constants.ACTIVE,
                                     lb_id)
                 with self.subnet(cidr='11.0.0.0/24') as subnet:
                     with self.member(pool_id=pool_id, subnet=subnet,
                                      no_delete=True) as member:
                         member_id = member['member']['id']
                         calls = self.mock_api.create_member.call_args_list
                         _, called_member, called_host = calls[0][0]
                         self.assertEqual(member_id, called_member.id)
                         self.assertEqual('host', called_host)
                         self.assertEqual(constants.PENDING_CREATE,
                                          called_member.provisioning_status)
                         ctx = context.get_admin_context()
                         lb = self.plugin_instance.db.get_loadbalancer(
                             ctx, lb_id)
                         self.assertEqual(constants.PENDING_UPDATE,
                                          lb.provisioning_status)
开发者ID:openstack,项目名称:neutron-lbaas,代码行数:29,代码来源:test_agent_driver_base.py

示例10: test_update_pool

 def test_update_pool(self):
     ctx = context.get_admin_context()
     with self.loadbalancer(no_delete=True) as loadbalancer:
         lb_id = loadbalancer['loadbalancer']['id']
         self._update_status(models.LoadBalancer, constants.ACTIVE, lb_id)
         with self.listener(loadbalancer_id=lb_id,
                            no_delete=True) as listener:
             listener_id = listener['listener']['id']
             self._update_status(models.LoadBalancer, constants.ACTIVE,
                                 lb_id)
             with self.pool(loadbalancer_id=lb_id, listener_id=listener_id,
                            no_delete=True) as pool:
                 pool_id = pool['pool']['id']
                 old_name = pool['pool']['name']
                 self._update_status(models.LoadBalancer, constants.ACTIVE,
                                     lb_id)
                 new_name = 'new_name'
                 pool['pool']['name'] = new_name
                 self.plugin_instance.update_pool(ctx, pool_id, pool)
                 calls = self.mock_api.update_pool.call_args_list
                 (_, old_called_pool,
                  new_called_pool, called_host) = calls[0][0]
                 self.assertEqual(pool_id, new_called_pool.id)
                 self.assertEqual(pool_id, old_called_pool.id)
                 self.assertEqual(old_name, old_called_pool.name)
                 self.assertEqual(new_name, new_called_pool.name)
                 self.assertEqual(constants.PENDING_UPDATE,
                                  new_called_pool.provisioning_status)
                 lb = self.plugin_instance.db.get_loadbalancer(ctx, lb_id)
                 self.assertEqual(constants.PENDING_UPDATE,
                                  lb.provisioning_status)
                 self.assertEqual('host', called_host)
开发者ID:openstack,项目名称:neutron-lbaas,代码行数:32,代码来源:test_agent_driver_base.py

示例11: tag_default_ports

def tag_default_ports(resource, event, trigger, **kwargs):
    nsxlib = v3_utils.get_connected_nsxlib()
    admin_cxt = neutron_context.get_admin_context()
    filters = v3_utils.get_plugin_filters(admin_cxt)

    # the plugin creation below will create the NS group and update the default
    # OS section to have the correct applied to group
    with v3_utils.NsxV3PluginWrapper() as _plugin:
        neutron_ports = _plugin.get_ports(admin_cxt, filters=filters)
        for port in neutron_ports:
            neutron_id = port['id']
            # get the network nsx id from the mapping table
            nsx_id = plugin_utils.get_port_nsx_id(admin_cxt.session,
                                                  neutron_id)
            if not nsx_id:
                continue
            device_owner = port['device_owner']
            if (device_owner == l3_db.DEVICE_OWNER_ROUTER_INTF or
                device_owner == const.DEVICE_OWNER_DHCP):
                continue
            ps = _plugin._get_port_security_binding(admin_cxt,
                                                    neutron_id)
            if not ps:
                continue
            try:
                nsx_port = nsxlib.logical_port.get(nsx_id)
            except nsx_exc.ResourceNotFound:
                continue
            tags_update = nsx_port['tags']
            tags_update += [{'scope': security.PORT_SG_SCOPE,
                             'tag': plugin.NSX_V3_DEFAULT_SECTION}]
            nsxlib.logical_port.update(nsx_id, None,
                                       tags_update=tags_update)
开发者ID:openstack,项目名称:vmware-nsx,代码行数:33,代码来源:ports.py

示例12: setUp

 def setUp(self):
     super(TestDriverController, self).setUp()
     self.setup_coreplugin(DB_PLUGIN_KLASS)
     self.fake_l3 = mock.Mock()
     self.dc = driver_controller.DriverController(self.fake_l3)
     self.fake_l3.l3_driver_controller = self.dc
     self.ctx = context.get_admin_context()
开发者ID:AradhanaSingh,项目名称:neutron,代码行数:7,代码来源:test_driver_controller.py

示例13: setUp

    def setUp(self):
        flowclassifier_plugin = (
            test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS)

        service_plugins = {
            flowclassifier.FLOW_CLASSIFIER_EXT: flowclassifier_plugin
        }
        fdb.FlowClassifierDbPlugin.supported_extension_aliases = [
            flowclassifier.FLOW_CLASSIFIER_EXT]
        fdb.FlowClassifierDbPlugin.path_prefix = (
            flowclassifier.FLOW_CLASSIFIER_PREFIX
        )
        super(OVSFlowClassifierDriverTestCase, self).setUp(
            ext_mgr=None,
            plugin=None,
            service_plugins=service_plugins
        )
        self.flowclassifier_plugin = importutils.import_object(
            flowclassifier_plugin)
        ext_mgr = api_ext.PluginAwareExtensionManager(
            test_flowclassifier_db.extensions_path,
            {
                flowclassifier.FLOW_CLASSIFIER_EXT: self.flowclassifier_plugin
            }
        )
        app = config.load_paste_app('extensions_test_app')
        self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
        self.ctx = context.get_admin_context()
        self.driver = driver.OVSFlowClassifierDriver()
        self.driver.initialize()
开发者ID:openstack,项目名称:networking-sfc,代码行数:30,代码来源:test_driver.py

示例14: setUp

 def setUp(self):
     plugin = 'neutron.tests.unit.extensions.test_agent.TestAgentPlugin'
     # for these tests we need to enable overlapping ips
     cfg.CONF.set_default('allow_overlapping_ips', True)
     ext_mgr = AgentTestExtensionManager()
     super(AgentDBTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
     self.adminContext = context.get_admin_context()
开发者ID:openstack,项目名称:neutron,代码行数:7,代码来源:test_agent.py

示例15: _ext_extend_subnet_dict

 def _ext_extend_subnet_dict(result, subnetdb):
     ctx = n_context.get_admin_context()
     # get the core plugin as this is a static method with no 'self'
     plugin = directory.get_plugin()
     with db_api.CONTEXT_WRITER.using(ctx):
         plugin._extension_manager.extend_subnet_dict(
             ctx.session, subnetdb, result)
开发者ID:openstack,项目名称:vmware-nsx,代码行数:7,代码来源:plugin.py


注:本文中的neutron_lib.context.get_admin_context函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。