本文整理汇总了Python中neutron._i18n._LW函数的典型用法代码示例。如果您正苦于以下问题:Python _LW函数的具体用法?Python _LW怎么用?Python _LW使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_LW函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: send_events
def send_events(self, batched_events):
LOG.debug("Sending events: %s", batched_events)
try:
response = self.nclient.server_external_events.create(
batched_events)
except nova_exceptions.NotFound:
LOG.warning(_LW("Nova returned NotFound for event: %s"),
batched_events)
except Exception:
LOG.exception(_LE("Failed to notify nova on events: %s"),
batched_events)
else:
if not isinstance(response, list):
LOG.error(_LE("Error response returned from nova: %s"),
response)
return
response_error = False
for event in response:
try:
code = event['code']
except KeyError:
response_error = True
continue
if code != 200:
LOG.warning(_LW("Nova event: %s returned with failed "
"status"), event)
else:
LOG.info(_LI("Nova event response: %s"), event)
if response_error:
LOG.error(_LE("Error response returned from nova: %s"),
response)
示例2: treat_device
def treat_device(self, device, pci_slot, admin_state_up, spoofcheck=True):
if self.eswitch_mgr.device_exists(device, pci_slot):
try:
self.eswitch_mgr.set_device_spoofcheck(device, pci_slot,
spoofcheck)
except Exception:
LOG.warning(_LW("Failed to set spoofcheck for device %s"),
device)
LOG.info(_LI("Device %(device)s spoofcheck %(spoofcheck)s"),
{"device": device, "spoofcheck": spoofcheck})
try:
self.eswitch_mgr.set_device_state(device, pci_slot,
admin_state_up)
except exc.IpCommandOperationNotSupportedError:
LOG.warning(_LW("Device %s does not support state change"),
device)
except exc.SriovNicError:
LOG.warning(_LW("Failed to set device %s state"), device)
return
if admin_state_up:
# update plugin about port status
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
LOG.info(_LI("No device with MAC %s defined on agent."), device)
示例3: init_firewall
def init_firewall(self, defer_refresh_firewall=False,
integration_bridge=None):
firewall_driver = cfg.CONF.SECURITYGROUP.firewall_driver or 'noop'
LOG.debug("Init firewall settings (driver=%s)", firewall_driver)
if not _is_valid_driver_combination():
LOG.warn(_LW("Driver configuration doesn't match "
"with enable_security_group"))
firewall_class = firewall.load_firewall_driver_class(firewall_driver)
try:
self.firewall = firewall_class(
integration_bridge=integration_bridge)
except TypeError as e:
LOG.warning(_LW("Firewall driver %(fw_driver)s doesn't accept "
"integration_bridge parameter in __init__(): "
"%(err)s"),
{'fw_driver': firewall_driver,
'err': e})
self.firewall = firewall_class()
# The following flag will be set to true if port filter must not be
# applied as soon as a rule or membership notification is received
self.defer_refresh_firewall = defer_refresh_firewall
# Stores devices for which firewall should be refreshed when
# deferred refresh is enabled.
self.devices_to_refilter = set()
# Flag raised when a global refresh is needed
self.global_refresh_firewall = False
self._use_enhanced_rpc = None
示例4: bind_port
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
agents = context.host_agents(self.agent_type)
if not agents:
LOG.warning(_LW("Port %(pid)s on network %(network)s not bound, "
"no agent registered on host %(host)s"),
{'pid': context.current['id'],
'network': context.network.current['id'],
'host': context.host})
for agent in agents:
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
for segment in context.segments_to_bind:
if self.try_to_bind_segment_for_agent(context, segment,
agent):
LOG.debug("Bound using segment: %s", segment)
return
else:
LOG.warning(_LW("Refusing to bind port %(pid)s to dead agent: "
"%(agent)s"),
{'pid': context.current['id'], 'agent': agent})
示例5: remove_networks_from_down_agents
def remove_networks_from_down_agents(self):
"""Remove networks from down DHCP agents if admin state is up.
Reschedule them if configured so.
"""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents("DHCP", agent_dead_limit)
cutoff = self.get_cutoff_time(agent_dead_limit)
context = ncontext.get_admin_context()
try:
down_bindings = (
context.session.query(NetworkDhcpAgentBinding)
.join(agents_db.Agent)
.filter(agents_db.Agent.heartbeat_timestamp < cutoff, agents_db.Agent.admin_state_up)
)
dhcp_notifier = self.agent_notifiers.get(constants.AGENT_TYPE_DHCP)
dead_bindings = [b for b in self._filter_bindings(context, down_bindings)]
agents = self.get_agents_db(context, {"agent_type": [constants.AGENT_TYPE_DHCP]})
active_agents = [agent for agent in agents if self.is_eligible_agent(context, True, agent)]
if not active_agents:
LOG.warning(_LW("No DHCP agents available, " "skipping rescheduling"))
return
for binding in dead_bindings:
LOG.warning(
_LW(
"Removing network %(network)s from agent "
"%(agent)s because the agent did not report "
"to the server in the last %(dead_time)s "
"seconds."
),
{"network": binding.network_id, "agent": binding.dhcp_agent_id, "dead_time": agent_dead_limit},
)
# save binding object to avoid ObjectDeletedError
# in case binding is concurrently deleted from the DB
saved_binding = {"net": binding.network_id, "agent": binding.dhcp_agent_id}
try:
# do not notify agent if it considered dead
# so when it is restarted it won't see network delete
# notifications on its queue
self.remove_network_from_dhcp_agent(
context, binding.dhcp_agent_id, binding.network_id, notify=False
)
except dhcpagentscheduler.NetworkNotHostedByDhcpAgent:
# measures against concurrent operation
LOG.debug("Network %(net)s already removed from DHCP " "agent %(agent)s", saved_binding)
# still continue and allow concurrent scheduling attempt
except Exception:
LOG.exception(
_LE("Unexpected exception occurred while " "removing network %(net)s from agent " "%(agent)s"),
saved_binding,
)
if cfg.CONF.network_auto_schedule:
self._schedule_network(context, saved_binding["net"], dhcp_notifier)
except Exception:
# we want to be thorough and catch whatever is raised
# to avoid loop abortion
LOG.exception(_LE("Exception encountered during network " "rescheduling"))
示例6: _get_candidates
def _get_candidates(self, plugin, context, sync_router):
"""Return L3 agents where a router could be scheduled."""
with context.session.begin(subtransactions=True):
# allow one router is hosted by just
# one enabled l3 agent hosting since active is just a
# timing problem. Non-active l3 agent can return to
# active any time
current_l3_agents = plugin.get_l3_agents_hosting_routers(context, [sync_router["id"]], admin_state_up=True)
is_router_distributed = sync_router.get("distributed", False)
if current_l3_agents and not is_router_distributed:
LOG.debug(
"Router %(router_id)s has already been hosted " "by L3 agent %(agent_id)s",
{"router_id": sync_router["id"], "agent_id": current_l3_agents[0]["id"]},
)
return []
active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warn(_LW("No active L3 agents"))
return []
potential_candidates = list(set(active_l3_agents) - set(current_l3_agents))
new_l3agents = []
if potential_candidates:
new_l3agents = plugin.get_l3_agent_candidates(context, sync_router, potential_candidates)
if not new_l3agents:
LOG.warn(_LW("No L3 agents can host the router %s"), sync_router["id"])
return new_l3agents
示例7: _load_all_extensions_from_path
def _load_all_extensions_from_path(self, path):
# Sorting the extension list makes the order in which they
# are loaded predictable across a cluster of load-balanced
# Neutron Servers
for f in sorted(os.listdir(path)):
try:
LOG.debug('Loading extension file: %s', f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
mod = imp.load_source(mod_name, ext_path)
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warning(_LW('Did not find expected name '
'"%(ext_name)s" in %(file)s'),
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
except Exception as exception:
LOG.warning(_LW("Extension file %(f)s wasn't loaded due to "
"%(exception)s"),
{'f': f, 'exception': exception})
示例8: _get_candidates
def _get_candidates(self, plugin, context, sync_router):
"""Return L3 agents where a router could be scheduled."""
with context.session.begin(subtransactions=True):
# allow one router is hosted by just
# one enabled l3 agent hosting since active is just a
# timing problem. Non-active l3 agent can return to
# active any time
current_l3_agents = plugin.get_l3_agents_hosting_routers(
context, [sync_router['id']], admin_state_up=True)
if current_l3_agents:
LOG.debug('Router %(router_id)s has already been hosted '
'by L3 agent %(agent_id)s',
{'router_id': sync_router['id'],
'agent_id': current_l3_agents[0]['id']})
return []
active_l3_agents = plugin.get_l3_agents(context, active=True)
if not active_l3_agents:
LOG.warning(_LW('No active L3 agents'))
return []
candidates = plugin.get_l3_agent_candidates(context,
sync_router,
active_l3_agents)
if not candidates:
LOG.warning(_LW('No L3 agents can host the router %s'),
sync_router['id'])
return candidates
示例9: plug_new
def plug_new(self, network_id, port_id, device_name, mac_address,
bridge=None, namespace=None, prefix=None, mtu=None):
"""Plug in the interface."""
if not bridge:
bridge = self.conf.ovs_integration_bridge
self.check_bridge_exists(bridge)
ip = ip_lib.IPWrapper()
tap_name = self._get_tap_name(device_name, prefix)
if self.conf.ovs_use_veth:
# Create ns_dev in a namespace if one is configured.
root_dev, ns_dev = ip.add_veth(tap_name,
device_name,
namespace2=namespace)
root_dev.disable_ipv6()
else:
ns_dev = ip.device(device_name)
internal = not self.conf.ovs_use_veth
self._ovs_add_port(bridge, tap_name, port_id, mac_address,
internal=internal)
for i in range(9):
# workaround for the OVS shy port syndrome. ports sometimes
# hide for a bit right after they are first created.
# see bug/1618987
try:
ns_dev.link.set_address(mac_address)
break
except RuntimeError as e:
LOG.warning(_LW("Got error trying to set mac, retrying: %s"),
str(e))
time.sleep(1)
else:
# didn't break, we give it one last shot without catching
ns_dev.link.set_address(mac_address)
# Add an interface created by ovs to the namespace.
if not self.conf.ovs_use_veth and namespace:
namespace_obj = ip.ensure_namespace(namespace)
namespace_obj.add_device_to_namespace(ns_dev)
# NOTE(ihrachys): the order here is significant: we must set MTU after
# the device is moved into a namespace, otherwise OVS bridge does not
# allow to set MTU that is higher than the least of all device MTUs on
# the bridge
if mtu:
ns_dev.link.set_mtu(mtu)
if self.conf.ovs_use_veth:
root_dev.link.set_mtu(mtu)
else:
LOG.warning(_LW("No MTU configured for port %s"), port_id)
ns_dev.link.set_up()
if self.conf.ovs_use_veth:
root_dev.link.set_up()
示例10: reschedule_routers_from_down_agents
def reschedule_routers_from_down_agents(self):
"""Reschedule routers from down l3 agents if admin state is up."""
agent_dead_limit = self.agent_dead_limit_seconds()
self.wait_down_agents('L3', agent_dead_limit)
cutoff = self.get_cutoff_time(agent_dead_limit)
context = n_ctx.get_admin_context()
down_bindings = (
context.session.query(RouterL3AgentBinding).
join(agents_db.Agent).
filter(agents_db.Agent.heartbeat_timestamp < cutoff,
agents_db.Agent.admin_state_up).
outerjoin(l3_attrs_db.RouterExtraAttributes,
l3_attrs_db.RouterExtraAttributes.router_id ==
RouterL3AgentBinding.router_id).
filter(sa.or_(l3_attrs_db.RouterExtraAttributes.ha == sql.false(),
l3_attrs_db.RouterExtraAttributes.ha == sql.null())))
try:
agents_back_online = set()
for binding in down_bindings:
if binding.l3_agent_id in agents_back_online:
continue
else:
agent = self._get_agent(context, binding.l3_agent_id)
if agent.is_active:
agents_back_online.add(binding.l3_agent_id)
continue
agent_mode = self._get_agent_mode(binding.l3_agent)
if agent_mode == constants.L3_AGENT_MODE_DVR:
# rescheduling from l3 dvr agent on compute node doesn't
# make sense. Router will be removed from that agent once
# there are no dvr serviceable ports on that compute node
LOG.warn(_LW('L3 DVR agent on node %(host)s is down. '
'Not rescheduling from agent in \'dvr\' '
'mode.'), {'host': binding.l3_agent.host})
continue
LOG.warn(_LW(
"Rescheduling router %(router)s from agent %(agent)s "
"because the agent did not report to the server in "
"the last %(dead_time)s seconds."),
{'router': binding.router_id,
'agent': binding.l3_agent_id,
'dead_time': agent_dead_limit})
try:
self.reschedule_router(context, binding.router_id)
except (l3agentscheduler.RouterReschedulingFailed,
oslo_messaging.RemoteError):
# Catch individual router rescheduling errors here
# so one broken one doesn't stop the iteration.
LOG.exception(_LE("Failed to reschedule router %s"),
binding.router_id)
except Exception:
# we want to be thorough and catch whatever is raised
# to avoid loop abortion
LOG.exception(_LE("Exception encountered during router "
"rescheduling."))
示例11: _destroy_namespace_and_port
def _destroy_namespace_and_port(self):
try:
self.device_manager.destroy(self.network, self.interface_name)
except RuntimeError:
LOG.warning(_LW("Failed trying to delete interface: %s"), self.interface_name)
ns_ip = ip_lib.IPWrapper(namespace=self.network.namespace)
try:
ns_ip.netns.delete(self.network.namespace)
except RuntimeError:
LOG.warning(_LW("Failed trying to delete namespace: %s"), self.network.namespace)
示例12: get_vif_port_set
def get_vif_port_set(self):
edge_ports = set()
results = self.get_ports_attributes("Interface", columns=["name", "external_ids", "ofport"], if_exists=True)
for result in results:
if result["ofport"] == UNASSIGNED_OFPORT:
LOG.warn(_LW("Found not yet ready openvswitch port: %s"), result["name"])
elif result["ofport"] == INVALID_OFPORT:
LOG.warn(_LW("Found failed openvswitch port: %s"), result["name"])
elif "attached-mac" in result["external_ids"]:
port_id = self.portid_from_external_ids(result["external_ids"])
if port_id:
edge_ports.add(port_id)
return edge_ports
示例13: vxlan_mcast_supported
def vxlan_mcast_supported(self):
if not cfg.CONF.VXLAN.vxlan_group:
LOG.warning(
_LW("VXLAN muticast group(s) must be provided in " "vxlan_group option to enable VXLAN MCAST mode")
)
return False
if not ip_lib.iproute_arg_supported(["ip", "link", "add", "type", "vxlan"], "proxy"):
LOG.warning(
_LW('Option "%(option)s" must be supported by command ' '"%(command)s" to enable %(mode)s mode'),
{"option": "proxy", "command": "ip link add type vxlan", "mode": "VXLAN MCAST"},
)
return False
return True
示例14: _delete_network
def _delete_network(self, plugin_context, network_id, clean_session=True):
try:
self._delete_resource(self._core_plugin, plugin_context,
'network', network_id,
clean_session=clean_session)
except n_exc.NetworkNotFound:
LOG.warning(_LW('Network %s already deleted'), network_id)
示例15: vxlan_mcast_supported
def vxlan_mcast_supported(self):
if not cfg.CONF.VXLAN.vxlan_group:
LOG.warning(_LW('VXLAN muticast group(s) must be provided in '
'vxlan_group option to enable VXLAN MCAST mode'))
return False
if not ip_lib.iproute_arg_supported(
['ip', 'link', 'add', 'type', 'vxlan'],
'proxy'):
LOG.warning(_LW('Option "%(option)s" must be supported by command '
'"%(command)s" to enable %(mode)s mode'),
{'option': 'proxy',
'command': 'ip link add type vxlan',
'mode': 'VXLAN MCAST'})
return False
return True