本文整理汇总了Python中sahara.plugins.utils.get_config_value_or_default函数的典型用法代码示例。如果您正苦于以下问题:Python get_config_value_or_default函数的具体用法?Python get_config_value_or_default怎么用?Python get_config_value_or_default使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_config_value_or_default函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_open_ports
def get_open_ports(self, node_group):
cluster = node_group.cluster
ports_map = {
'namenode': [8020, 50070, 50470],
'datanode': [50010, 1004, 50075, 1006, 50020],
'master': [
int(utils.get_config_value_or_default("Spark", "Master port",
cluster)),
int(utils.get_config_value_or_default("Spark",
"Master webui port",
cluster)),
],
'slave': [
int(utils.get_config_value_or_default("Spark",
"Worker webui port",
cluster))
],
'zeppelin': [int(utils.get_config_value_or_default("Zeppelin",
"Web UI port",
cluster))]
}
ports = []
for process in node_group.node_processes:
if process in ports_map:
ports.extend(ports_map[process])
return ports
示例2: _validate_existing_ng_scaling
def _validate_existing_ng_scaling(self, cluster, existing):
scalable_processes = self._get_scalable_processes()
dn_to_delete = 0
for ng in cluster.node_groups:
if ng.id in existing:
if ng.count > existing[ng.id] and ("datanode" in
ng.node_processes):
dn_to_delete += ng.count - existing[ng.id]
if not set(ng.node_processes).issubset(scalable_processes):
raise ex.NodeGroupCannotBeScaled(
ng.name, _("Spark plugin cannot scale nodegroup"
" with processes: %s") %
' '.join(ng.node_processes))
dn_amount = len(utils.get_instances(cluster, "datanode"))
rep_factor = utils.get_config_value_or_default('HDFS',
"dfs.replication",
cluster)
if dn_to_delete > 0 and dn_amount - dn_to_delete < rep_factor:
raise ex.ClusterCannotBeScaled(
cluster.name, _("Spark plugin cannot shrink cluster because "
"there would be not enough nodes for HDFS "
"replicas (replication factor is %s)") %
rep_factor)
示例3: generate_spark_executor_classpath
def generate_spark_executor_classpath(cluster):
cp = utils.get_config_value_or_default("Spark",
"Executor extra classpath",
cluster)
if cp:
return "spark.executor.extraClassPath " + cp
return "\n"
示例4: validate
def validate(self, cluster):
if cluster.hadoop_version == "1.0.0":
raise exceptions.DeprecatedException(
_("Support for Spark version 1.0.0 is now deprecated and will" " be removed in the 2016.1 release.")
)
nn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "namenode")])
if nn_count != 1:
raise ex.InvalidComponentCountException("namenode", 1, nn_count)
dn_count = sum([ng.count for ng in utils.get_node_groups(cluster, "datanode")])
if dn_count < 1:
raise ex.InvalidComponentCountException("datanode", _("1 or more"), nn_count)
rep_factor = utils.get_config_value_or_default("HDFS", "dfs.replication", cluster)
if dn_count < rep_factor:
raise ex.InvalidComponentCountException(
"datanode",
_("%s or more") % rep_factor,
dn_count,
_("Number of %(dn)s instances should not be less " "than %(replication)s")
% {"dn": "datanode", "replication": "dfs.replication"},
)
# validate Spark Master Node and Spark Slaves
sm_count = sum([ng.count for ng in utils.get_node_groups(cluster, "master")])
if sm_count != 1:
raise ex.RequiredServiceMissingException("Spark master")
sl_count = sum([ng.count for ng in utils.get_node_groups(cluster, "slave")])
if sl_count < 1:
raise ex.InvalidComponentCountException("Spark slave", _("1 or more"), sl_count)
示例5: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = utils.get_instance(cluster, "namenode")
sp_master = utils.get_instance(cluster, "master")
info = {}
if nn:
address = utils.get_config_value_or_default("HDFS", "dfs.http.address", cluster)
port = address[address.rfind(":") + 1 :]
info["HDFS"] = {"Web UI": "http://%s:%s" % (nn.management_ip, port)}
info["HDFS"]["NameNode"] = "hdfs://%s:8020" % nn.hostname()
if sp_master:
port = utils.get_config_value_or_default("Spark", "Master webui port", cluster)
if port is not None:
info["Spark"] = {"Web UI": "http://%s:%s" % (sp_master.management_ip, port)}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {"info": info})
示例6: __init__
def __init__(self, cluster):
super(EdpSparkEngine, self).__init__(cluster)
self.master = u.get_instance(cluster, "SPARK_YARN_HISTORY_SERVER")
self.plugin_params["spark-user"] = "sudo -u spark "
self.plugin_params["spark-submit"] = "spark-submit"
self.plugin_params["deploy-mode"] = "cluster"
self.plugin_params["master"] = "yarn-cluster"
driver_cp = u.get_config_value_or_default("Spark", "Executor extra classpath", self.cluster)
self.plugin_params["driver-class-path"] = driver_cp
示例7: generate_spark_env_configs
def generate_spark_env_configs(cluster):
configs = []
# master configuration
sp_master = utils.get_instance(cluster, "master")
configs.append('SPARK_MASTER_IP=' + sp_master.hostname())
# point to the hadoop conf dir so that Spark can read things
# like the swift configuration without having to copy core-site
# to /opt/spark/conf
configs.append('HADOOP_CONF_DIR=' + HADOOP_CONF_DIR)
masterport = utils.get_config_value_or_default("Spark",
"Master port",
cluster)
if masterport and masterport != _get_spark_opt_default("Master port"):
configs.append('SPARK_MASTER_PORT=' + str(masterport))
masterwebport = utils.get_config_value_or_default("Spark",
"Master webui port",
cluster)
if (masterwebport and
masterwebport != _get_spark_opt_default("Master webui port")):
configs.append('SPARK_MASTER_WEBUI_PORT=' + str(masterwebport))
# configuration for workers
workercores = utils.get_config_value_or_default("Spark",
"Worker cores",
cluster)
if workercores and workercores != _get_spark_opt_default("Worker cores"):
configs.append('SPARK_WORKER_CORES=' + str(workercores))
workermemory = utils.get_config_value_or_default("Spark",
"Worker memory",
cluster)
if (workermemory and
workermemory != _get_spark_opt_default("Worker memory")):
configs.append('SPARK_WORKER_MEMORY=' + str(workermemory))
workerport = utils.get_config_value_or_default("Spark",
"Worker port",
cluster)
if workerport and workerport != _get_spark_opt_default("Worker port"):
configs.append('SPARK_WORKER_PORT=' + str(workerport))
workerwebport = utils.get_config_value_or_default("Spark",
"Worker webui port",
cluster)
if (workerwebport and
workerwebport != _get_spark_opt_default("Worker webui port")):
configs.append('SPARK_WORKER_WEBUI_PORT=' + str(workerwebport))
workerinstances = utils.get_config_value_or_default("Spark",
"Worker instances",
cluster)
if (workerinstances and
workerinstances != _get_spark_opt_default("Worker instances")):
configs.append('SPARK_WORKER_INSTANCES=' + str(workerinstances))
return '\n'.join(configs)
示例8: generate_job_cleanup_config
def generate_job_cleanup_config(cluster):
args = {
'minimum_cleanup_megabytes': utils.get_config_value_or_default(
"Spark", "Minimum cleanup megabytes", cluster),
'minimum_cleanup_seconds': utils.get_config_value_or_default(
"Spark", "Minimum cleanup seconds", cluster),
'maximum_cleanup_seconds': utils.get_config_value_or_default(
"Spark", "Maximum cleanup seconds", cluster)
}
job_conf = {'valid': (args['maximum_cleanup_seconds'] > 0 and
(args['minimum_cleanup_megabytes'] > 0
and args['minimum_cleanup_seconds'] > 0))}
if job_conf['valid']:
job_conf['cron'] = f.get_file_text(
'plugins/spark/resources/spark-cleanup.cron'),
job_cleanup_script = f.get_file_text(
'plugins/spark/resources/tmp-cleanup.sh.template')
job_conf['script'] = job_cleanup_script.format(**args)
return job_conf
示例9: generate_zeppelin_setup_script
def generate_zeppelin_setup_script(sp_master):
script_lines = ["#!/bin/bash -x"]
script_lines.append(
"echo 'export MASTER=spark://{0}:{1}' "
">> /opt/incubator-zeppelin/conf/zeppelin-env.sh".format(
sp_master['instance_name'],
utils.get_config_value_or_default(
"Spark", "Master port", sp_master.node_group.cluster)))
script_lines.append("echo 'export SPARK_HOME=/opt/spark' >> "
"/opt/incubator-zeppelin/conf/zeppelin-env.sh")
script_lines.append("echo 'export PYTHONPATH=$SPARK_HOME/python:"
"$SPARK_HOME/python/lib/py4j-0.8.2.1-src.zip:"
"$PYTHONPATH' >> "
"/opt/incubator-zeppelin/conf/zeppelin-env.sh")
script_lines.append("sed -i 's|<value>8080</value>|<value>{0}</value>|g'"
" /opt/incubator-zeppelin/conf/zeppelin-site.xml"
.format(utils.get_config_value_or_default(
"Zeppelin",
"Web UI port",
sp_master.node_group.cluster)))
return "\n".join(script_lines)
示例10: __init__
def __init__(self, cluster):
super(EdpSparkEngine, self).__init__(cluster)
self.master = u.get_instance(cluster, "CLOUDERA_MANAGER")
self.plugin_params["spark-user"] = "sudo -u spark "
self.plugin_params["spark-submit"] = "spark-submit"
self.plugin_params["deploy-mode"] = "cluster"
self.plugin_params["master"] = "yarn-cluster"
driver_cp = u.get_config_value_or_default(
"Spark", "Executor extra classpath", self.cluster)
if driver_cp:
driver_cp = " --driver-class-path " + driver_cp
self.plugin_params["driver-class-path"] = driver_cp
示例11: _set_cluster_info
def _set_cluster_info(self, cluster):
nn = utils.get_instance(cluster, "namenode")
sp_master = utils.get_instance(cluster, "master")
info = {}
if nn:
address = utils.get_config_value_or_default(
'HDFS', 'dfs.http.address', cluster)
port = address[address.rfind(':') + 1:]
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, port)
}
info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()
if sp_master:
port = utils.get_config_value_or_default(
'Spark', 'Master webui port', cluster)
if port is not None:
info['Spark'] = {
'Web UI': 'http://%s:%s' % (sp_master.management_ip, port)
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})
示例12: decommission_sl
def decommission_sl(master, inst_to_be_deleted, survived_inst):
if survived_inst is not None:
slavenames = []
for slave in survived_inst:
slavenames.append(slave.hostname())
slaves_content = c_helper.generate_spark_slaves_configs(slavenames)
else:
slaves_content = "\n"
cluster = master.cluster
sp_home = utils.get_config_value_or_default("Spark", "Spark home", cluster)
r_master = remote.get_remote(master)
run.stop_spark(r_master, sp_home)
# write new slave file to master
files = {os.path.join(sp_home, 'conf/slaves'): slaves_content}
r_master.write_files_to(files)
# write new slaves file to each survived slave as well
for i in survived_inst:
with remote.get_remote(i) as r:
r.write_files_to(files)
run.start_spark_master(r_master, sp_home)
示例13: is_kerberos_security_enabled
def is_kerberos_security_enabled(cluster):
return pl_utils.get_config_value_or_default(
cluster=cluster, config=enable_kerberos)
示例14: get_admin_password
def get_admin_password(cluster):
# TODO(vgridnev): support in follow-up improved secret storage for
# configs
return pl_utils.get_config_value_or_default(
cluster=cluster, config=admin_password)
示例15: get_policy_url
def get_policy_url(cluster):
return pl_utils.get_config_value_or_default(
cluster=cluster, config=policy_url)