本文整理汇总了Python中senlin.common.i18n._LE函数的典型用法代码示例。如果您正苦于以下问题:Python _LE函数的具体用法?Python _LE怎么用?Python _LE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_LE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pre_op
def pre_op(self, cluster_id, action):
"""Callback function when cluster membership is about to change.
:param cluster_id: ID of the target cluster.
:param action: The action that triggers this policy check.
:returns: ``None``.
"""
if action.action == consts.CLUSTER_SCALE_IN:
expand = False
# use action input directly if available
count = action.inputs.get('count', None)
if not count:
# check if policy decisions available
pd = action.data.get('deletion', None)
count = pd.get('count', 1) if pd else 1
else:
# this is an action that inflates the cluster
expand = True
count = action.inputs.get('count', None)
if not count:
# check if policy decisions available
pd = action.data.get('creation', None)
count = pd.get('count', 1) if pd else 1
cluster = cluster_mod.Cluster.load(action.context, cluster_id)
kc = self._keystone(cluster)
regions_good = kc.validate_regions(self.regions.keys())
if len(regions_good) == 0:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('No region is found usable.')
LOG.error(_LE('No region is found usable.'))
return
regions = {}
for r in self.regions.items():
if r[0] in regions_good:
regions[r[0]] = r[1]
current_dist = cluster.get_region_distribution(regions_good)
result = self._create_plan(current_dist, regions, count, expand)
if not result:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('There is no feasible plan to '
'handle all nodes.')
LOG.error(_LE('There is no feasible plan to handle all nodes.'))
return
if expand:
if 'creation' not in action.data:
action.data['creation'] = {}
action.data['creation']['count'] = count
action.data['creation']['regions'] = result
else:
if 'deletion' not in action.data:
action.data['deletion'] = {}
action.data['deletion']['count'] = count
action.data['deletion']['regions'] = result
示例2: __call__
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
try:
deserialized_request = self.dispatch(self.deserializer,
action, request)
action_args.update(deserialized_request)
LOG.debug(('Calling %(controller)s : %(action)s'),
{'controller': self.controller, 'action': action})
action_result = self.dispatch(self.controller, action,
request, **action_args)
except TypeError as err:
LOG.error(_LE('Exception handling resource: %s') % err)
msg = _('The server could not comply with the request since '
'it is either malformed or otherwise incorrect.')
err = webob.exc.HTTPBadRequest(msg)
http_exc = translate_exception(err, request.best_match_language())
# NOTE(luisg): We disguise HTTP exceptions, otherwise they will be
# treated by wsgi as responses ready to be sent back and they
# won't make it into the pipeline app that serializes errors
raise exception.HTTPExceptionDisguise(http_exc)
except webob.exc.HTTPException as err:
if not isinstance(err, webob.exc.HTTPError):
# Some HTTPException are actually not errors, they are
# responses ready to be sent back to the users, so we don't
# create error log, but disguise and translate them to meet
# openstacksdk's need.
http_exc = translate_exception(err,
request.best_match_language())
raise exception.HTTPExceptionDisguise(http_exc)
if isinstance(err, webob.exc.HTTPServerError):
LOG.error(
_LE("Returning %(code)s to user: %(explanation)s"),
{'code': err.code, 'explanation': err.explanation})
http_exc = translate_exception(err, request.best_match_language())
raise exception.HTTPExceptionDisguise(http_exc)
except exception.SenlinException as err:
raise translate_exception(err, request.best_match_language())
except Exception as err:
log_exception(err, sys.exc_info())
raise translate_exception(err, request.best_match_language())
serializer = self.serializer or serializers.JSONResponseSerializer()
try:
response = webob.Response(request=request)
self.dispatch(serializer, action, response, action_result)
return response
# return unserializable result (typically an exception)
except Exception:
return action_result
示例3: pre_op
def pre_op(self, cluster_id, action):
"""Callback function when cluster membership is about to change.
:param cluster_id: ID of the target cluster.
:param action: The action that triggers this policy check.
"""
count = action.inputs.get('count', None)
if action.action == consts.CLUSTER_SCALE_IN:
expand = False
if not count:
pd = action.data.get('deletion', None)
count = pd.get('count', 1) if pd else 1
else:
expand = True
if not count:
pd = action.data.get('creation', None)
count = pd.get('count', 1) if pd else 1
cluster = cluster_mod.Cluster.load(action.context, cluster_id)
nc = self._nova(cluster)
zones_good = nc.validate_azs(self.zones.keys())
if len(zones_good) == 0:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('No availability zone found available.')
LOG.error(_LE('No availability zone found available.'))
return
zones = {}
for z, w in self.zones.items():
if z in zones_good:
zones[z] = w
current = cluster.get_zone_distribution(action.context, zones.keys())
result = self._create_plan(current, zones, count, expand)
if not result:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('There is no feasible plan to '
'handle all nodes.')
LOG.error(_LE('There is no feasible plan to handle all nodes.'))
return
if expand:
if 'creation' not in action.data:
action.data['creation'] = {}
action.data['creation']['count'] = count
action.data['creation']['zones'] = result
else:
if 'deletion' not in action.data:
action.data['deletion'] = {}
action.data['deletion']['count'] = count
action.data['deletion']['zones'] = result
示例4: member_add
def member_add(self, node, lb_id, pool_id, port, subnet):
"""Add a member to Neutron lbaas pool.
:param node: A node object to be added to the specified pool.
:param lb_id: The ID of the loadbalancer.
:param pool_id: The ID of the pool for receiving the node.
:param port: The port for the new LB member to be created.
:param subnet: The subnet to be used by the new LB member.
:returns: The ID of the new LB member or None if errors occurred.
"""
addresses = self._get_node_address(node, version=4)
if not addresses:
LOG.error(_LE('Node (%(n)s) does not have valid IPv4 address.'),
{'n': node.id})
return None
try:
subnet_obj = self.nc().subnet_get(subnet)
net_id = subnet_obj.network_id
net = self.nc().network_get(net_id)
except exception.InternalError as ex:
resource = 'subnet' if subnet in ex.message else 'network'
msg = _LE('Failed in getting %(resource)s: %(msg)s.'
) % {'resource': resource, 'msg': six.text_type(ex)}
LOG.exception(msg)
event.warning(oslo_context.get_current(), self,
resource.upper()+'_GET', 'ERROR', msg)
return None
net_name = net.name
if net_name not in addresses:
LOG.error(_LE('Node is not in subnet %(subnet)s'),
{'subnet': subnet})
return None
address = addresses[net_name]
try:
member = self.nc().pool_member_create(pool_id, address, port,
subnet_obj.id)
except exception.InternalError as ex:
msg = _LE('Failed in creating lb pool member: %s.'
) % six.text_type(ex)
LOG.exception(msg)
event.warning(oslo_context.get_current(), self,
'POOL_MEMBER_CREATE', 'ERROR', msg)
return None
res = self._wait_for_lb_ready(lb_id)
if res is False:
LOG.error(_LE('Failed in creating pool member (%s).') % member.id)
return None
return member.id
示例5: pre_op
def pre_op(self, cluster_id, action):
"""Callback function when cluster membership is about to change.
:param cluster_id: ID of the target cluster.
:param action: The action that triggers this policy check.
:returns: ``None``.
"""
count = self._get_count(cluster_id, action)
if count == 0:
return
expand = True
if count < 0:
expand = False
count = -count
cluster = cluster_mod.Cluster.load(action.context, cluster_id)
kc = self._keystone(cluster)
regions_good = kc.validate_regions(self.regions.keys())
if len(regions_good) == 0:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('No region is found usable.')
LOG.error(_LE('No region is found usable.'))
return
regions = {}
for r in self.regions.items():
if r[0] in regions_good:
regions[r[0]] = r[1]
current_dist = cluster.get_region_distribution(regions_good)
result = self._create_plan(current_dist, regions, count, expand)
if not result:
action.data['status'] = base.CHECK_ERROR
action.data['reason'] = _('There is no feasible plan to '
'handle all nodes.')
LOG.error(_LE('There is no feasible plan to handle all nodes.'))
return
if expand:
if 'creation' not in action.data:
action.data['creation'] = {}
action.data['creation']['count'] = count
action.data['creation']['regions'] = result
else:
if 'deletion' not in action.data:
action.data['deletion'] = {}
action.data['deletion']['count'] = count
action.data['deletion']['regions'] = result
示例6: attach
def attach(self, cluster):
"""Routine to be invoked when policy is to be attached to a cluster.
:para cluster: The Target cluster to be attached to;
:returns: When the operation was successful, returns a tuple (True,
message); otherwise, return a tuple (False, error).
"""
data = {}
nv_client = self.nova(cluster)
placement_group = self.properties.get(self.PLACEMENT_GROUP)
group_name = placement_group.get('group_name', None)
if group_name is None:
profile = profile_base.Profile.load(
oslo_context.get_current(), cluster.profile_id)
if 'scheduler_hints' in profile.spec:
hints = profile.spec['scheduler_hints']
group_name = hints.get('group', None)
if group_name is not None:
# to add into nova driver
try:
server_group = nv_client.get_server_group(group_name)
except exception.InternalError as ex:
msg = 'Failed in searching server_group'
LOG.exception(_LE('%(msg)s: %(ex)s') % {
'msg': msg, 'ex': six.text_type(ex)})
return False, msg
data['group_id'] = server_group.id
data['inherited_group'] = True
if data.get('group_id') is None:
# to add into nova driver
rule = placement_group.get('placement_rule', 'anti-affinity')
try:
server_group = nv_client.create_server_group(rule)
except exception.InternalError as ex:
msg = 'Failed in creating server_group'
LOG.exception(_LE('%(msg)s: %(ex)s') % {
'msg': msg, 'ex': six.text_type(ex)})
return False, msg
data['group_id'] = server_group.id
data['inherited_group'] = False
policy_data = self._build_policy_data(data)
return True, policy_data
示例7: detach
def detach(self, cluster):
"""Routine to be called when the policy is detached from a cluster.
:param cluster: The cluster from which the policy is to be detached.
:returns: When the operation was successful, returns a tuple of
(True, data) where the data contains references to the
resources created; otherwise returns a tuple of (False,
error) where the err contains a error message.
"""
reason = _('Servergroup resource deletion succeeded.')
ctx = context.get_admin_context()
binding = cpo.ClusterPolicy.get(ctx, cluster.id, self.id)
if not binding or not binding.data:
return True, reason
policy_data = self._extract_policy_data(binding.data)
if not policy_data:
return True, reason
group_id = policy_data.get('servergroup_id', None)
inherited_group = policy_data.get('inherited_group', False)
if group_id and not inherited_group:
try:
self.nova(cluster).delete_server_group(group_id)
except Exception as ex:
msg = _('Failed in deleting servergroup.')
LOG.exception(_LE('%(msg)s: %(ex)s') % {
'msg': msg, 'ex': six.text_type(ex)})
return False, msg
return True, reason
示例8: ActionProc
def ActionProc(context, action_id):
'''Action process.'''
# Step 1: materialize the action object
action = Action.load(context, action_id=action_id)
if action is None:
LOG.error(_LE('Action "%s" could not be found.'), action_id)
return False
# TODO(Anyone): Remove context usage in event module
EVENT.info(action.context, action, action.action, 'START')
reason = 'Action completed'
success = True
try:
# Step 2: execute the action
result, reason = action.execute()
except Exception as ex:
# We catch exception here to make sure the following logics are
# executed.
result = action.RES_ERROR
reason = six.text_type(ex)
LOG.exception(_('Unexpected exception occurred during action '
'%(action)s (%(id)s) execution: %(reason)s'),
{'action': action.action, 'id': action.id,
'reason': reason})
success = False
finally:
# NOTE: locks on action is eventually released here by status update
action.set_status(result, reason)
return success
示例9: node_lock_acquire
def node_lock_acquire(node_id, action_id, forced=False):
'''Try to lock the specified node.
:param forced_locking: set to True to cancel current action that
owns the lock, if any.
'''
# Step 1: try lock the node - if the returned owner_id is the
# action id, it was a success
owner = db_api.node_lock_acquire(node_id, action_id)
if action_id == owner:
return True
# Step 2: retry using global configuration options
retries = cfg.CONF.lock_retry_times
retry_interval = cfg.CONF.lock_retry_interval
while retries > 0:
scheduler.sleep(retry_interval)
owner = db_api.node_lock_acquire(node_id, action_id)
if action_id == owner:
return True
retries = retries - 1
# Step 3: Last resort is 'forced locking', only needed when retry failed
if forced:
owner = db_api.node_lock_steal(node_id, action_id)
return action_id == owner
LOG.error(_LE('Node is already locked by action %(old)s, '
'action %(new)s failed grabbing the lock') % {
'old': owner, 'new': action_id})
return False
示例10: detach
def detach(self, cluster):
"""Routine to be called when the policy is detached from a cluster.
:param cluster: The cluster from which the policy is to be detached.
:returns: When the operation was successful, returns a tuple of
(True, data) where the data contains references to the
resources created; otherwise returns a tuple of (False,
error) where the err contains a error message.
"""
reason = _('Server group resources deletion succeeded')
cp = cluster_policy.ClusterPolicy.load(oslo_context.get_current(),
cluster.id, self.id)
if cp is None or cp.data is None:
return True, reason
policy_data = self._extract_policy_data(cp.data)
if policy_data is None:
return True, reason
group_id = policy_data.get('group_id', None)
inherited_group = policy_data.get('inherited_group', False)
if group_id and not inherited_group:
try:
# to add into nova driver
self.nova(cluster).delete_server_group(group_id)
except exception.InternalError as ex:
msg = 'Failed in deleting server_group'
LOG.exception(_LE('%(msg)s: %(ex)s') % {
'msg': msg, 'ex': six.text_type(ex)})
return False, msg
return True, reason
示例11: hup
def hup(*args):
# Shuts down the server(s), but allows running requests to complete
self.LOG.error(_LE('SIGHUP received'))
signal.signal(signal.SIGHUP, signal.SIG_IGN)
os.killpg(0, signal.SIGHUP)
signal.signal(signal.SIGHUP, hup)
示例12: execute
def execute(self, **kwargs):
'''Wrapper of action execution.
This is mainly a wrapper that executes an action with cluster lock
acquired.
:return: A tuple (res, reason) that indicates whether the execution
was a success and why if it wasn't a success.
'''
try:
cluster = cluster_mod.Cluster.load(self.context, self.target)
except exception.NotFound:
reason = _('Cluster %(id)s not found') % {'id': self.target}
LOG.error(_LE(reason))
return self.RES_ERROR, reason
# Try to lock cluster before do real operation
forced = True if self.action == self.CLUSTER_DELETE else False
res = senlin_lock.cluster_lock_acquire(cluster.id, self.id,
senlin_lock.CLUSTER_SCOPE,
forced)
if not res:
return self.RES_ERROR, _('Failed locking cluster')
try:
res, reason = self._execute(cluster)
finally:
senlin_lock.cluster_lock_release(cluster.id, self.id,
senlin_lock.CLUSTER_SCOPE)
return res, reason
示例13: kill_children
def kill_children(self, *args):
"""Kills the entire process group."""
LOG.error(_LE('SIGTERM received'))
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.running = False
os.killpg(0, signal.SIGTERM)
示例14: cluster_lock_acquire
def cluster_lock_acquire(context, cluster_id, action_id, engine=None,
scope=CLUSTER_SCOPE, forced=False):
"""Try to lock the specified cluster.
:param cluster_id: ID of the cluster to be locked.
:param action_id: ID of the action which wants to lock the cluster.
:param engine: ID of the engine which wants to lock the cluster.
:param scope: scope of lock, could be cluster wide lock, or node-wide
lock.
:param forced: set to True to cancel current action that owns the lock,
if any.
:returns: True if lock is acquired, or False otherwise.
"""
# Step 1: try lock the cluster - if the returned owner_id is the
# action id, it was a success
owners = db_api.cluster_lock_acquire(cluster_id, action_id, scope)
if action_id in owners:
return True
# Step 2: retry using global configuration options
retries = cfg.CONF.lock_retry_times
retry_interval = cfg.CONF.lock_retry_interval
while retries > 0:
scheduler.sleep(retry_interval)
LOG.debug('Acquire lock for cluster %s again' % cluster_id)
owners = db_api.cluster_lock_acquire(cluster_id, action_id, scope)
if action_id in owners:
return True
retries = retries - 1
# Step 3: Last resort is 'forced locking', only needed when retry failed
if forced:
owners = db_api.cluster_lock_steal(cluster_id, action_id)
return action_id in owners
# Will reach here only because scope == CLUSTER_SCOPE
action = db_api.action_get(context, owners[0])
if (action and action.owner and action.owner != engine and
is_engine_dead(context, action.owner)):
LOG.info(_LI('The cluster %(c)s is locked by dead action %(a)s, '
'try to steal the lock.'), {
'c': cluster_id,
'a': owners[0]
})
reason = _('Engine died when executing this action.')
db_api.action_mark_failed(context, action.id, time.time(),
reason=reason)
owners = db_api.cluster_lock_steal(cluster_id, action_id)
return action_id in owners
LOG.error(_LE('Cluster is already locked by action %(old)s, '
'action %(new)s failed grabbing the lock'),
{'old': str(owners), 'new': action_id})
return False
示例15: node_lock_acquire
def node_lock_acquire(context, node_id, action_id, engine=None,
forced=False):
"""Try to lock the specified node.
:param context: the context used for DB operations;
:param node_id: ID of the node to be locked.
:param action_id: ID of the action that attempts to lock the node.
:param engine: ID of the engine that attempts to lock the node.
:param forced: set to True to cancel current action that owns the lock,
if any.
:returns: True if lock is acquired, or False otherwise.
"""
# Step 1: try lock the node - if the returned owner_id is the
# action id, it was a success
owner = db_api.node_lock_acquire(node_id, action_id)
if action_id == owner:
return True
# Step 2: retry using global configuration options
retries = cfg.CONF.lock_retry_times
retry_interval = cfg.CONF.lock_retry_interval
while retries > 0:
scheduler.sleep(retry_interval)
LOG.debug('Acquire lock for node %s again' % node_id)
owner = db_api.node_lock_acquire(node_id, action_id)
if action_id == owner:
return True
retries = retries - 1
# Step 3: Last resort is 'forced locking', only needed when retry failed
if forced:
owner = db_api.node_lock_steal(node_id, action_id)
return action_id == owner
# if this node lock by dead engine
action = db_api.action_get(context, owner)
if (action and action.owner and action.owner != engine and
is_engine_dead(context, action.owner)):
LOG.info(_LI('The node %(n)s is locked by dead action %(a)s, '
'try to steal the lock.'), {
'n': node_id,
'a': owner
})
reason = _('Engine died when executing this action.')
db_api.action_mark_failed(context, action.id, time.time(),
reason=reason)
db_api.node_lock_steal(node_id, action_id)
return True
LOG.error(_LE('Node is already locked by action %(old)s, '
'action %(new)s failed grabbing the lock'),
{'old': owner, 'new': action_id})
return False