本文整理汇总了Python中senlin.common.i18n._函数的典型用法代码示例。如果您正苦于以下问题:Python _函数的具体用法?Python _怎么用?Python _使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: parse_exception
def parse_exception(ex):
'''Parse exception code and yield useful information.
:param details: details of the exception.
'''
if isinstance(ex, exc.HttpException):
record = jsonutils.loads(ex.details)
elif isinstance(ex, reqexc.RequestException):
# Exceptions that are not captured by SDK
code = ex.message[1].errno
record = {
'error': {
'code': code,
'message': ex.message[0],
}
}
else:
print(_('Unknown exception: %s') % ex)
return
try:
code = record['error'].get('code', None)
if code is None:
code = record['code']
record['error']['code'] = code
except KeyError as err:
print(_('Malformed exception record, missing field "%s"') % err)
print(_('Original error record: %s') % record)
return
if code in _EXCEPTION_MAP:
inst = _EXCEPTION_MAP.get(code)
return inst(record)
else:
return HTTPException(record)
示例2: dependency_add
def dependency_add(context, depended, dependent):
if isinstance(depended, list) and isinstance(dependent, list):
raise exception.NotSupport(
_('Multiple dependencies between lists not support'))
with session_for_write() as session:
if isinstance(depended, list): # e.g. D depends on A,B,C
for d in depended:
r = models.ActionDependency(depended=d, dependent=dependent)
session.add(r)
query = session.query(models.Action).filter_by(id=dependent)
query.update({'status': consts.ACTION_WAITING,
'status_reason': _('Waiting for depended actions.')},
synchronize_session='fetch')
return
# Only dependent can be a list now, convert it to a list if it
# is not a list
if not isinstance(dependent, list): # e.g. B,C,D depend on A
dependents = [dependent]
else:
dependents = dependent
for d in dependents:
r = models.ActionDependency(depended=depended, dependent=d)
session.add(r)
q = session.query(models.Action).filter(
models.Action.id.in_(dependents))
q.update({'status': consts.ACTION_WAITING,
'status_reason': _('Waiting for depended actions.')},
synchronize_session='fetch')
示例3: _sanitize_policy
def _sanitize_policy(self, data):
"""Validate dict body of policy attach or update.
:param dict data: A dictionary containing the properties of the policy
to be attached/updated including the policy ID.
:returns: A sanitized dict containing the policy properties.
:raises: :class:`~webob.exception.HTTPBadRequest` if the policy dict
contains invalid property values.
"""
if not isinstance(data, dict):
msg = _("The data provided is not a map.")
raise exc.HTTPBadRequest(msg)
if consts.CP_POLICY_ID not in data:
msg = _("The 'policy_id' field is missing in the request.")
raise exc.HTTPBadRequest(msg)
if consts.CP_ENABLED in data:
enabled = data.get(consts.CP_ENABLED)
try:
enabled = utils.parse_bool_param(consts.CP_ENABLED, enabled)
except senlin_exc.InvalidParameter as ex:
raise exc.HTTPBadRequest(six.text_type(ex))
data[consts.CP_ENABLED] = enabled
return data
示例4: test_cluster_add_nodes_node_already_owned
def test_cluster_add_nodes_node_already_owned(self, notify):
c1 = self.eng.cluster_create(self.ctx, 'c-1', 0, self.profile['id'])
cid1 = c1['id']
c2 = self.eng.cluster_create(self.ctx, 'c-2', 0, self.profile['id'])
cid2 = c2['id']
nodes1 = self._prepare_nodes(self.ctx, count=1, cluster_id=cid1)
nodes2 = self._prepare_nodes(self.ctx, count=1, cluster_id=cid2)
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_add_nodes,
self.ctx, cid1, nodes1)
# adding from the same cluster is not allowed
self.assertEqual(exception.NodeNotOrphan, ex.exc_info[0])
msg = _("Nodes %s owned by other cluster, need to delete them from "
"those clusters first.") % nodes1
self.assertEqual(msg, six.text_type(ex.exc_info[1]))
ex = self.assertRaises(rpc.ExpectedException,
self.eng.cluster_add_nodes,
self.ctx, cid1, nodes2)
# adding from a different cluster is not allowed either
self.assertEqual(exception.NodeNotOrphan, ex.exc_info[0])
msg = _("Nodes %s owned by other cluster, need to delete them from "
"those clusters first.") % nodes2
self.assertEqual(msg, six.text_type(ex.exc_info[1]))
示例5: do_rebuild
def do_rebuild(self, obj):
if not obj.physical_id:
return False
self.server_id = obj.physical_id
try:
server = self.nova(obj).server_get(self.server_id)
except Exception as ex:
LOG.exception(_('Failed at getting server: %s'),
six.text_type(ex))
return False
if server is None or server.image is None:
return False
image_id = server.image['id']
admin_pass = self.properties.get(self.ADMIN_PASS)
try:
self.nova(obj).server_rebuild(self.server_id, image_id,
self.properties.get(self.NAME),
admin_pass)
self.nova(obj).wait_for_server(self.server_id, 'ACTIVE')
except Exception as ex:
LOG.exception(_('Failed at rebuilding server: %s'),
six.text_type(ex))
return False
return True
示例6: truncate_desired
def truncate_desired(cluster, desired, min_size, max_size):
'''Do truncation of desired capacity for non-strict cases.'''
if min_size is not None and desired < min_size:
desired = min_size
LOG.debug(_("Truncating shrinkage to specified min_size (%s).")
% desired)
if min_size is None and desired < cluster.min_size:
desired = cluster.min_size
LOG.debug(_("Truncating shrinkage to cluster's min_size (%s).")
% desired)
if max_size is not None and max_size > 0 and desired > max_size:
desired = max_size
LOG.debug(_("Truncating growth to specified max_size (%s).")
% desired)
if (max_size is None and desired > cluster.max_size and
cluster.max_size > 0):
desired = cluster.max_size
LOG.debug(_("Truncating growth to cluster's max_size (%s).")
% desired)
return desired
示例7: _wait_for_dependents
def _wait_for_dependents(self):
self.get_status()
reason = ''
while self.status != self.READY:
if self.status == self.FAILED:
reason = _('%(action)s [%(id)s] failed due to dependent '
'action failure') % {'action': self.action,
'id': self.id}
LOG.debug(reason)
return self.RES_ERROR, reason
if self.is_cancelled():
# During this period, if cancel request come, cancel this
# cluster operation immediately, then release the cluster
# lock and return.
reason = _('%(action)s %(id)s cancelled') % {
'action': self.action, 'id': self.id}
LOG.debug(reason)
return self.RES_CANCEL, reason
if self.is_timeout():
# Action timeout, return
reason = _('%(action)s %(id)s timeout') % {
'action': self.action, 'id': self.id}
LOG.debug(reason)
return self.RES_TIMEOUT
# Continue waiting (with reschedule)
scheduler.reschedule(self, 1)
self.get_status()
return self.RES_OK, 'All dependents ended with success'
示例8: do_create
def do_create(self):
"""Handler for the NODE_CREATE action.
:returns: A tuple containing the result and the corresponding reason.
"""
if self.node.cluster_id and self.cause == base.CAUSE_RPC:
# If node is created with target cluster specified,
# check cluster size constraint
cluster = cm.Cluster.load(self.context, self.node.cluster_id)
result = scaleutils.check_size_params(
cluster, cluster.desired_capacity + 1, None, None, True)
if result:
return self.RES_ERROR, result
res = self.node.do_create(self.context)
if res:
if self.node.cluster_id and self.cause == base.CAUSE_RPC:
# Update cluster desired_capacity if node creation succeeded
cluster.desired_capacity += 1
cluster.store(self.context)
cluster.add_node(self.node)
return self.RES_OK, _('Node created successfully.')
else:
return self.RES_ERROR, _('Node creation failed.')
示例9: node_update
def node_update(context, node_id, values):
'''Update a node with new property values.
:param node_id: ID of the node to be updated.
:param values: A dictionary of values to be updated on the node.
:raises NotFound: The specified node does not exist in database.
'''
session = _session(context)
session.begin()
node = session.query(models.Node).get(node_id)
if not node:
session.rollback()
raise exception.NotFound(
_('Attempt to update a node with id "%s" that does '
'not exists failed.') % node_id)
node.update(values)
node.save(session)
if 'status' in values and node.cluster_id is not None:
cluster = session.query(models.Cluster).get(node.cluster_id)
if cluster is not None:
if values['status'] == 'ERROR':
cluster.status = 'WARNING'
if 'status_reason' in values:
cluster.status_reason = _('Node %(node)s: %(reason)s') % {
'node': node.name, 'reason': values['status_reason']}
cluster.save(session)
session.commit()
示例10: pre_op
def pre_op(self, cluster_id, action, policy_data):
nodes = db_api.node_get_all_by_cluster(action.context, cluster_id)
current_size = len(nodes)
if self.adjustment_type == self.EXACT_CAPACITY:
count = self.adjustment_number - current_size
elif self.adjustment_type == self.CHANGE_IN_CAPACITY:
count = self.adjustment_number
elif self.adjustment_type == self.CHANGE_IN_PERCENTAGE:
count = int((self.adjustment_number * current_size) / 100.0)
if count < self.adjustment_min_step:
count = self.adjustment_min_step
if current_size + count > self.max_size:
policy_data.status = base.CHECK_ERROR
policy_data.reason = _('Attempted scaling exceeds maximum size')
elif current_size + count < self.min_size:
policy_data.status = base.CHECK_ERROR
policy_data.reason = _('Attempted scaling exceeds minimum size')
else:
policy_data.status = base.CHECK_OK
policy_data.reason = _('Scaling request validated')
pd = {'count': count}
if action.action == consts.CLUSTER_SCALE_OUT:
if count < 0:
LOG.warning(_LW('Requesting a scale out operation but scaling '
'policy generates a negative count.'))
policy_data['creation'] = pd
elif action.action == consts.CLUSTER_SCALE_IN:
if count > 0:
LOG.warning(_LW('Requesting a scale out operation but scaling '
'policy generates a negative count.'))
policy_data['deletion'] = pd
return policy_data
示例11: do_delete
def do_delete(self):
"""Handler for the NODE_DELETE action.
:returns: A tuple containing the result and the corresponding reason.
"""
if self.node.cluster_id and self.cause == base.CAUSE_RPC:
# If node belongs to a cluster, check size constraint
# before deleting it
cluster = cluster_mod.Cluster.load(self.context,
self.node.cluster_id)
result = scaleutils.check_size_params(cluster,
cluster.desired_capacity - 1,
None, None, True)
if result:
return self.RES_ERROR, result
res = self.node.do_delete(self.context)
if res:
if self.node.cluster_id and self.cause == base.CAUSE_RPC:
# Update cluster desired_capacity if node deletion succeeded
cluster.desired_capacity -= 1
cluster.store(self.context)
cluster.remove_node(self.node.id)
return self.RES_OK, _('Node deleted successfully.')
else:
return self.RES_ERROR, _('Node deletion failed.')
示例12: action
def action(self, req, node_id, body=None):
'''Perform specified action on a node.'''
body = body or {}
if len(body) == 0:
raise exc.HTTPBadRequest(_('No action specified'))
if len(body) > 1:
raise exc.HTTPBadRequest(_('Multiple actions specified'))
this_action = body.keys()[0]
if this_action not in self.SUPPORTED_ACTIONS:
msg = _('Unrecognized action "%s" specified') % this_action
raise exc.HTTPBadRequest(msg)
if this_action == self.NODE_JOIN:
cluster_id = body.get(this_action).get('cluster_id')
if cluster_id is None:
raise exc.HTTPBadRequest(_('No cluster specified'))
res = self.rpc_client.node_join(req.context, node_id, cluster_id)
elif this_action == self.NODE_LEAVE:
res = self.rpc_client.node_leave(req.context, node_id)
else:
raise exc.HTTPInternalServerError(_('Unexpected action "%s"'),
this_action)
return res
示例13: do_recover
def do_recover(self, context, **options):
"""recover a node.
This function is supposed to be invoked from a NODE_RECOVER action.
"""
if not self.physical_id:
return False
self.set_status(context, self.RECOVERING,
reason=_('Recover in progress'))
try:
physical_id = profile_base.Profile.recover_object(context, self,
**options)
except exception.ResourceStatusError as ex:
self._handle_exception(context, 'recover', self.ERROR, ex)
return False
if not physical_id:
self.set_status(context, self.ERROR, reason=_('Recover failed'))
return False
self.set_status(context, self.ACTIVE, reason=_('Recover succeeded'))
if self.physical_id != physical_id:
self.physical_id = physical_id
self.store(context)
return True
示例14: test_generate_url
def test_generate_url(self, mock_service_get, mock_endpoint_get,
mock_init):
mock_init.return_value = None
mock_service_get.return_value = {
'id': 'SENLIN_SERVICE_ID'
}
mock_endpoint_get.return_value = {
'url': 'HTTP://HOST_IP:PORT/V1/$(tenant_id)s'
}
kwargs = {
'id': 'WEBHOOK_ID',
'name': 'test-webhook',
'user': 'test-user',
'project': 'test-project',
'domain': 'test-domain',
'created_time': timeutils.utcnow(),
'deleted_time': None,
'credential': self.credential,
'params': self.params
}
webhook = webhook_mod.Webhook('test-obj-id', 'test-obj-type',
'test-action', **kwargs)
key = 'test-key'
res1, res2 = webhook.generate_url(key)
expected_url = _('HTTP://HOST_IP:PORT/V1/%(tenant_id)s/webhooks/'
'%(webhook_id)s/trigger?key=%(key)s'
) % {'tenant_id': 'test-project',
'webhook_id': webhook.id,
'key': six.text_type(key)}
self.assertEqual(expected_url, res1)
self.assertEqual(key, res2)
# Senlin service not found
mock_service_get.return_value = None
ex = self.assertRaises(exception.ResourceNotFound,
webhook.generate_url, key)
resource = _('service:type=clustering,name=senlin')
msg = _('The resource (%(resource)s) could not be found.'
) % {'resource': resource}
self.assertEqual(msg, six.text_type(ex))
# Senlin endpoint not found
mock_service_get.return_value = {
'id': 'SENLIN_SERVICE_ID'
}
service_id = mock_service_get.return_value['id']
mock_endpoint_get.return_value = None
ex = self.assertRaises(exception.ResourceNotFound,
webhook.generate_url, key)
resource = _('endpoint: service=%(service)s,region='
'%(region)s,visibility=%(interface)s'
) % {'service': service_id,
'region': None,
'interface': 'public'}
msg = _('The resource (%(resource)s) could not be found.'
) % {'resource': resource}
self.assertEqual(msg, six.text_type(ex))
示例15: detach_policy
def detach_policy(self, ctx, policy_id):
"""Detach policy object from the cluster.
Note this method MUST be called with the cluster locked.
:param ctx: A context for DB operation.
:param policy_id: ID of the policy object.
:returns: A tuple containing a boolean result and a reason string.
"""
# Check if policy has already been attached
found = None
for existing in self.policies:
if existing.id == policy_id:
found = existing
break
if found is None:
return False, _('Policy not attached.')
policy = policy_base.Policy.load(ctx, policy_id)
res, reason = policy.detach(self)
if not res:
return res, reason
db_api.cluster_policy_detach(ctx, self.id, policy_id)
self.rt['policies'].remove(found)
return True, _('Policy detached.')