本文整理汇总了Python中sahara.i18n._函数的典型用法代码示例。如果您正苦于以下问题:Python _函数的具体用法?Python _怎么用?Python _使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: domain_for_proxy
def domain_for_proxy():
'''Return the proxy domain or None
If configured to use the proxy domain, this function will return that
domain. If not configured to use the proxy domain, this function will
return None. If the proxy domain can't be found this will raise an
exception.
:returns: A Keystone Domain object or None.
:raises ConfigurationError: If the domain is requested but not specified.
:raises NotFoundException: If the domain name is specified but cannot be
found.
'''
if CONF.use_domain_for_proxy_users is False:
return None
if CONF.proxy_user_domain_name is None:
raise ex.ConfigurationError(_('Proxy domain requested but not '
'specified.'))
admin = k.client_for_admin()
global PROXY_DOMAIN
if not PROXY_DOMAIN:
domain_list = admin.domains.list(name=CONF.proxy_user_domain_name)
if len(domain_list) == 0:
raise ex.NotFoundException(value=CONF.proxy_user_domain_name,
message=_('Failed to find domain %s'))
# the domain name should be globally unique in Keystone
if len(domain_list) > 1:
raise ex.NotFoundException(value=CONF.proxy_user_domain_name,
message=_('Unexpected results found '
'when searching for domain '
'%s'))
PROXY_DOMAIN = domain_list[0]
return PROXY_DOMAIN
示例2: validate_number_of_datanodes
def validate_number_of_datanodes(cluster, scaled_groups, default_configs):
dfs_replication = 0
for config in default_configs:
if config.name == "dfs.replication":
dfs_replication = config.default_value
conf = cluster.cluster_configs
if "HDFS" in conf and "dfs.replication" in conf["HDFS"]:
dfs_replication = conf["HDFS"]["dfs.replication"]
if not scaled_groups:
scaled_groups = {}
dn_count = 0
for ng in cluster.node_groups:
if "DATANODE" in ng.node_processes:
if ng.id in scaled_groups:
dn_count += scaled_groups[ng.id]
else:
dn_count += ng.count
if dn_count < int(dfs_replication):
raise ex.InvalidComponentCountException(
"datanode",
_("%s or more") % dfs_replication,
dn_count,
_("Number of %(dn)s instances should not be less " "than %(replication)s")
% {"dn": "DATANODE", "replication": "dfs.replication"},
)
示例3: wait_ambari_requests
def wait_ambari_requests(self, requests, cluster_name):
requests = set(requests)
failed = []
while len(requests) > 0:
completed, not_completed = set(), set()
for req_id in requests:
request = self.get_request_info(cluster_name, req_id)
status = request.get("request_status")
if status == 'COMPLETED':
completed.add(req_id)
elif status in ['IN_PROGRESS', 'PENDING']:
not_completed.add(req_id)
else:
failed.append(request)
if failed:
msg = _("Some Ambari request(s) "
"not in COMPLETED state: %(description)s.")
descrs = []
for req in failed:
descr = _(
"request %(id)d: %(name)s - in status %(status)s")
descrs.append(descr %
{'id': req.get("id"),
'name': req.get("request_context"),
'status': req.get("request_status")})
raise p_exc.HadoopProvisionError(msg % {'description': descrs})
requests = not_completed
context.sleep(5)
LOG.debug("Waiting for %d ambari request(s) to be completed",
len(not_completed))
LOG.debug("All ambari requests have been completed")
示例4: _install_services
def _install_services(self, cluster_name, ambari_info):
LOG.info(_LI('Installing required Hadoop services ...'))
ambari_address = ambari_info.get_address()
install_url = ('http://{0}/api/v1/clusters/{'
'1}/services?ServiceInfo/state=INIT'.format(
ambari_address, cluster_name))
body = ('{"RequestInfo" : { "context" : "Install all services" },'
'"Body" : {"ServiceInfo": {"state" : "INSTALLED"}}}')
result = self._put(install_url, ambari_info, data=body)
if result.status_code == 202:
json_result = json.loads(result.text)
request_id = json_result['Requests']['id']
success = self._wait_for_async_request(self._get_async_request_uri(
ambari_info, cluster_name, request_id),
ambari_info)
if success:
LOG.info(_LI("Install of Hadoop stack successful."))
self._finalize_ambari_state(ambari_info)
else:
LOG.critical(_LC('Install command failed.'))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
elif result.status_code != 200:
LOG.error(
_LE('Install command failed. {0}').format(result.text))
raise ex.HadoopProvisionError(
_('Installation of Hadoop stack failed.'))
示例5: _check_storm
def _check_storm(cluster):
dr_count = utils.get_instances_count(cluster, common.DRPC_SERVER)
ni_count = utils.get_instances_count(cluster, common.NIMBUS)
su_count = utils.get_instances_count(cluster, common.STORM_UI_SERVER)
sv_count = utils.get_instances_count(cluster, common.SUPERVISOR)
if dr_count > 1:
raise ex.InvalidComponentCountException(common.DRPC_SERVER,
_("0 or 1"), dr_count)
if ni_count > 1:
raise ex.InvalidComponentCountException(common.NIMBUS,
_("0 or 1"), ni_count)
if su_count > 1:
raise ex.InvalidComponentCountException(common.STORM_UI_SERVER,
_("0 or 1"), su_count)
if dr_count == 0 and ni_count == 1:
raise ex.RequiredServiceMissingException(
common.DRPC_SERVER, required_by=common.NIMBUS)
if dr_count == 1 and ni_count == 0:
raise ex.RequiredServiceMissingException(
common.NIMBUS, required_by=common.DRPC_SERVER)
if su_count == 1 and (dr_count == 0 or ni_count == 0):
raise ex.RequiredServiceMissingException(
common.NIMBUS, required_by=common.STORM_UI_SERVER)
if dr_count == 1 and sv_count == 0:
raise ex.RequiredServiceMissingException(
common.SUPERVISOR, required_by=common.DRPC_SERVER)
if sv_count > 0 and dr_count == 0:
raise ex.RequiredServiceMissingException(
common.DRPC_SERVER, required_by=common.SUPERVISOR)
示例6: render
def render(res=None, resp_type=None, status=None, **kwargs):
if not res:
res = {}
if type(res) is dict:
res.update(kwargs)
elif kwargs:
# can't merge kwargs into the non-dict res
abort_and_log(500,
_("Non-dict and non-empty kwargs passed to render"))
status_code = getattr(flask.request, 'status_code', None)
if status:
status_code = status
if not status_code:
status_code = 200
if not resp_type:
resp_type = getattr(flask.request, 'resp_type', RT_JSON)
if not resp_type:
resp_type = RT_JSON
serializer = None
if "application/json" in resp_type:
resp_type = RT_JSON
serializer = wsgi.JSONDictSerializer()
else:
abort_and_log(400, _("Content type '%s' isn't supported") % resp_type)
body = serializer.serialize(res)
resp_type = str(resp_type)
return flask.Response(response=body, status=status_code,
mimetype=resp_type)
示例7: validate
def validate(self, cluster):
nn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "namenode")])
if nn_count != 1:
raise ex.InvalidComponentCountException("namenode", 1, nn_count)
snn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "secondarynamenode")])
if snn_count > 1:
raise ex.InvalidComponentCountException("secondarynamenode", _("0 or 1"), snn_count)
jt_count = sum([ng.count for ng in utils.get_node_groups(cluster, "jobtracker")])
if jt_count > 1:
raise ex.InvalidComponentCountException("jobtracker", _("0 or 1"), jt_count)
oozie_count = sum([ng.count for ng in utils.get_node_groups(cluster, "oozie")])
if oozie_count > 1:
raise ex.InvalidComponentCountException("oozie", _("0 or 1"), oozie_count)
hive_count = sum([ng.count for ng in utils.get_node_groups(cluster, "hiveserver")])
if jt_count == 0:
tt_count = sum([ng.count for ng in utils.get_node_groups(cluster, "tasktracker")])
if tt_count > 0:
raise ex.RequiredServiceMissingException("jobtracker", required_by="tasktracker")
if oozie_count > 0:
raise ex.RequiredServiceMissingException("jobtracker", required_by="oozie")
if hive_count > 0:
raise ex.RequiredServiceMissingException("jobtracker", required_by="hive")
if hive_count > 1:
raise ex.InvalidComponentCountException("hive", _("0 or 1"), hive_count)
示例8: __call__
def __call__(self, req):
"""Ensures that tenants in url and token are equal.
Handle incoming request by checking tenant info prom the headers and
url ({tenant_id} url attribute).
Pass request downstream on success.
Reject request if tenant_id from headers not equals to tenant_id from
url.
"""
token_tenant = req.environ.get("HTTP_X_TENANT_ID")
if not token_tenant:
LOG.warning("Can't get tenant_id from env")
raise ex.HTTPServiceUnavailable()
path = req.environ['PATH_INFO']
if path != '/':
try:
version, possibly_url_tenant, rest = (
strutils.split_path(path, 2, 3, True)
)
except ValueError:
LOG.warning("Incorrect path: {path}".format(path=path))
raise ex.HTTPNotFound(_("Incorrect path"))
if uuidutils.is_uuid_like(possibly_url_tenant):
url_tenant = possibly_url_tenant
if token_tenant != url_tenant:
LOG.debug("Unauthorized: token tenant != requested tenant")
raise ex.HTTPUnauthorized(
_('Token tenant != requested tenant'))
return self.application
示例9: node_group_template_update
def node_group_template_update(context, values, ignore_default=False):
session = get_session()
try:
with session.begin():
ngt_id = values['id']
ngt = _node_group_template_get(context, session, ngt_id)
if not ngt:
raise ex.NotFoundException(
ngt_id, _("NodeGroupTemplate id '%s' not found"))
elif not ignore_default and ngt.is_default:
raise ex.UpdateFailedException(
ngt_id,
_("NodeGroupTemplate id '%s' can not be updated. "
"It is a default template.")
)
# Check to see that the node group template to be updated is not in
# use by an existing cluster.
for template_relationship in ngt.templates_relations:
if len(template_relationship.cluster_template.clusters) > 0:
raise ex.UpdateFailedException(
ngt_id,
_("NodeGroupTemplate id '%s' can not be updated. "
"It is referenced by an existing cluster.")
)
ngt.update(values)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for NodeGroupTemplate: %s") % e.columns)
return ngt
示例10: cluster_create
def cluster_create(context, values):
values = values.copy()
cluster = m.Cluster()
node_groups = values.pop("node_groups", [])
cluster.update(values)
session = get_session()
with session.begin():
try:
cluster.save(session=session)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for Cluster: %s") % e.columns)
try:
for ng in node_groups:
node_group = m.NodeGroup()
node_group.update({"cluster_id": cluster.id})
node_group.update(ng)
node_group.save(session=session)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for NodeGroup: %s") % e.columns)
return cluster_get(context, cluster.id)
示例11: cluster_template_create
def cluster_template_create(context, values):
values = values.copy()
cluster_template = m.ClusterTemplate()
node_groups = values.pop("node_groups") or []
cluster_template.update(values)
session = get_session()
with session.begin():
try:
cluster_template.save(session=session)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for ClusterTemplate: %s") % e.columns)
try:
for ng in node_groups:
node_group = m.TemplatesRelation()
node_group.update({"cluster_template_id": cluster_template.id})
node_group.update(ng)
node_group.save(session=session)
except db_exc.DBDuplicateEntry as e:
raise ex.DBDuplicateEntry(
_("Duplicate entry for TemplatesRelation: %s") % e.columns)
return cluster_template_get(context, cluster_template.id)
示例12: validate_number_of_datanodes
def validate_number_of_datanodes(cluster, scaled_groups, default_configs):
dfs_replication = 0
for config in default_configs:
if config.name == 'dfs.replication':
dfs_replication = config.default_value
conf = cluster.cluster_configs
if 'HDFS' in conf and 'dfs.replication' in conf['HDFS']:
dfs_replication = conf['HDFS']['dfs.replication']
if not scaled_groups:
scaled_groups = {}
dn_count = 0
for ng in cluster.node_groups:
if 'DATANODE' in ng.node_processes:
if ng.id in scaled_groups:
dn_count += scaled_groups[ng.id]
else:
dn_count += ng.count
if dn_count < int(dfs_replication):
raise ex.InvalidComponentCountException(
'datanode', _('%s or more') % dfs_replication, dn_count,
_('Number of %(dn)s instances should not be less '
'than %(replication)s')
% {'dn': 'DATANODE', 'replication': 'dfs.replication'})
示例13: _await_cldb
def _await_cldb(self, cluster_context, instances=None, timeout=600):
instances = instances or cluster_context.get_instances()
cldb_node = cluster_context.get_instance(mfs.CLDB)
start_time = timeutils.utcnow()
retry_count = 0
with cldb_node.remote() as r:
LOG.debug("Waiting {count} seconds for CLDB initialization".format(
count=timeout))
while timeutils.delta_seconds(start_time,
timeutils.utcnow()) < timeout:
ec, out = r.execute_command(NODE_LIST_CMD,
raise_when_error=False)
resp = json.loads(out)
status = resp['status']
if str(status).lower() == 'ok':
ips = [n['ip'] for n in resp['data']]
retry_count += 1
for i in instances:
if (i.management_ip not in ips
and retry_count > DEFAULT_RETRY_COUNT):
raise ex.HadoopProvisionError(_(
"Node failed to connect to CLDB: %s") %
i.management_ip)
break
else:
context.sleep(DELAY)
else:
raise ex.HadoopProvisionError(_("CLDB failed to start"))
示例14: proxy_user_delete
def proxy_user_delete(username=None, user_id=None):
'''Delete the user from the proxy domain.
:param username: The name of the user to delete.
:param user_id: The id of the user to delete, if provided this overrides
the username.
:raises NotFoundException: If there is an error locating the user in the
proxy domain.
'''
admin = k.client_for_admin()
if not user_id:
domain = domain_for_proxy()
user_list = b.execute_with_retries(
admin.users.list, domain=domain.id, name=username)
if len(user_list) == 0:
raise ex.NotFoundException(
value=username,
message_template=_('Failed to find user %s'))
if len(user_list) > 1:
raise ex.NotFoundException(
value=username,
message_template=_('Unexpected results found when searching '
'for user %s'))
user_id = user_list[0].id
b.execute_with_retries(admin.users.delete, user_id)
LOG.debug('Deleted proxy user id {user_id}'.format(user_id=user_id))
示例15: generate_key_pair
def generate_key_pair(key_length=2048):
"""Create RSA key pair with specified number of bits in key.
Returns tuple of private and public keys.
"""
with tempfiles.tempdir() as tmpdir:
keyfile = os.path.join(tmpdir, 'tempkey')
args = [
'ssh-keygen',
'-q', # quiet
'-N', '', # w/o passphrase
'-t', 'rsa', # create key of rsa type
'-f', keyfile, # filename of the key file
'-C', 'Generated-by-Sahara' # key comment
]
if key_length is not None:
args.extend(['-b', key_length])
processutils.execute(*args)
if not os.path.exists(keyfile):
raise ex.SystemError(_("Private key file hasn't been created"))
private_key = open(keyfile).read()
public_key_path = keyfile + '.pub'
if not os.path.exists(public_key_path):
raise ex.SystemError(_("Public key file hasn't been created"))
public_key = open(public_key_path).read()
return private_key, public_key