本文整理汇总了Python中sahara.utils.remote.get_remote函数的典型用法代码示例。如果您正苦于以下问题:Python get_remote函数的具体用法?Python get_remote怎么用?Python get_remote使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_remote函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _push_configs_to_existing_node
def _push_configs_to_existing_node(self, cluster, extra, instance):
node_processes = instance.node_group.node_processes
need_update_hadoop = (c_helper.is_data_locality_enabled(cluster) or
'namenode' in node_processes)
need_update_spark = ('master' in node_processes or
'slave' in node_processes)
if need_update_spark:
ng_extra = extra[instance.node_group.id]
sp_home = self._spark_home(cluster)
files = {
os.path.join(sp_home,
'conf/spark-env.sh'): ng_extra['sp_master'],
os.path.join(sp_home, 'conf/slaves'): ng_extra['sp_slaves'],
os.path.join(
sp_home,
'conf/spark-defaults.conf'): ng_extra['sp_defaults']
}
r = remote.get_remote(instance)
r.write_files_to(files)
self._push_cleanup_job(r, cluster, extra, instance)
if need_update_hadoop:
with remote.get_remote(instance) as r:
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
示例2: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
sm_instance = utils.get_instance(cluster, "master")
dn_instances = utils.get_instances(cluster, "datanode")
# Start the name node
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_processes(r, "namenode")
# start the data nodes
self._start_slave_datanode_processes(dn_instances)
LOG.info("Hadoop services in cluster %s have been started" %
cluster.name)
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command(("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/"))
# start spark nodes
if sm_instance:
with remote.get_remote(sm_instance) as r:
run.start_spark_master(r, self._spark_home(cluster))
LOG.info("Spark service at '%s' has been started",
sm_instance.hostname())
LOG.info('Cluster %s has been started successfully' % cluster.name)
self._set_cluster_info(cluster)
示例3: scale_cluster
def scale_cluster(self, cluster, instances):
self._setup_instances(cluster, instances)
run.refresh_nodes(remote.get_remote(vu.get_namenode(cluster)), "dfsadmin")
jt = vu.get_jobtracker(cluster)
if jt:
run.refresh_nodes(remote.get_remote(jt), "mradmin")
self._start_tt_dn_processes(instances)
示例4: decommission_tt
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
with remote.get_remote(jt) as r:
r.write_file_to('/etc/hadoop/tt.excl',
utils.generate_fqdn_host_names(
inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(jt), "mradmin")
context.sleep(3)
r.write_files_to({'/etc/hadoop/tt.incl':
utils.generate_fqdn_host_names(survived_inst),
'/etc/hadoop/tt.excl': "",
})
示例5: scale_cluster
def scale_cluster(self, cluster, instances):
master = utils.get_instance(cluster, "master")
r_master = remote.get_remote(master)
run.stop_spark(r_master, self._spark_home(cluster))
self._setup_instances(cluster, instances)
nn = utils.get_instance(cluster, "namenode")
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
self._start_slave_datanode_processes(instances)
run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info(_LI("Spark master service at '%s' has been restarted"), master.hostname())
示例6: scale_cluster
def scale_cluster(self, cluster, instances):
master = utils.get_instance(cluster, "master")
r_master = remote.get_remote(master)
run.stop_spark(r_master, self._spark_home(cluster))
self._setup_instances(cluster, instances)
nn = utils.get_instance(cluster, "namenode")
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
dn_instances = [instance for instance in instances if "datanode" in instance.node_group.node_processes]
self._start_datanode_processes(dn_instances)
run.start_spark_master(r_master, self._spark_home(cluster))
LOG.info(_LI("Spark master service has been restarted"))
示例7: start_cluster
def start_cluster(self, cluster):
nn_instance = vu.get_namenode(cluster)
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_processes(r, "namenode")
for snn in vu.get_secondarynamenodes(cluster):
run.start_processes(remote.get_remote(snn), "secondarynamenode")
jt_instance = vu.get_jobtracker(cluster)
if jt_instance:
run.start_processes(remote.get_remote(jt_instance), "jobtracker")
self._start_tt_dn_processes(utils.get_instances(cluster))
self._await_datanodes(cluster)
LOG.info(_LI("Hadoop services in cluster %s have been started"),
cluster.name)
oozie = vu.get_oozie(cluster)
if oozie:
with remote.get_remote(oozie) as r:
if c_helper.is_mysql_enable(cluster):
run.mysql_start(r, oozie)
run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname())
run.start_oozie(r)
LOG.info(_LI("Oozie service at '%s' has been started"),
nn_instance.hostname())
hive_server = vu.get_hiveserver(cluster)
if hive_server:
with remote.get_remote(hive_server) as r:
run.hive_create_warehouse_dir(r)
run.hive_copy_shared_conf(
r, edp.get_hive_shared_conf_path('hadoop'))
if c_helper.is_mysql_enable(cluster):
if not oozie or hive_server.hostname() != oozie.hostname():
run.mysql_start(r, hive_server)
run.hive_create_db(r)
run.hive_metastore_start(r)
LOG.info(_LI("Hive Metastore server at %s has been "
"started"),
hive_server.hostname())
LOG.info(_LI('Cluster %s has been started successfully'), cluster.name)
self._set_cluster_info(cluster)
示例8: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_instance(cluster, "namenode")
dn_instances = utils.get_instances(cluster, "datanode")
zep_instance = utils.get_instance(cluster, "zeppelin")
# Start the name node
self._start_namenode(nn_instance)
# start the data nodes
self._start_datanode_processes(dn_instances)
LOG.info(_LI("Hadoop services have been started"))
with remote.get_remote(nn_instance) as r:
r.execute_command("sudo -u hdfs hdfs dfs -mkdir -p /user/$USER/")
r.execute_command("sudo -u hdfs hdfs dfs -chown $USER "
"/user/$USER/")
# start spark nodes
self.start_spark(cluster)
# start zeppelin, if necessary
if zep_instance:
self._start_zeppelin(zep_instance)
LOG.info(_LI('Cluster has been started successfully'))
self._set_cluster_info(cluster)
示例9: _execute_remote_job
def _execute_remote_job(self, master, wf_dir, cmd):
with remote.get_remote(master) as r:
ret, stdout = r.execute_command(
"cd %s; %s > /dev/null 2>&1 & echo $!"
% (wf_dir, cmd))
return ret, stdout
示例10: _exec_cmd_on_remote_instance
def _exec_cmd_on_remote_instance(self, master, cmd):
if master is not None:
with remote.get_remote(master) as r:
ret, stdout = r.execute_command("%s > /dev/null 2>&1 & echo $!"
% cmd)
return ret, stdout
示例11: _upload_wrapper_xml
def _upload_wrapper_xml(self, where, job_dir, job_configs):
xml_name = 'spark.xml'
proxy_configs = job_configs.get('proxy_configs')
configs = {}
cfgs = job_configs.get('configs', {})
if proxy_configs:
configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get(
'proxy_username')
configs[sw.HADOOP_SWIFT_PASSWORD] = key_manager.get_secret(
proxy_configs.get('proxy_password'))
configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get(
'proxy_trust_id')
configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name
else:
targets = [sw.HADOOP_SWIFT_USERNAME]
configs = {k: cfgs[k] for k in targets if k in cfgs}
if sw.HADOOP_SWIFT_PASSWORD in cfgs:
configs[sw.HADOOP_SWIFT_PASSWORD] = (
key_manager.get_secret(cfgs[sw.HADOOP_SWIFT_PASSWORD])
)
for s3_cfg_key in s3_common.S3_DS_CONFIGS:
if s3_cfg_key in cfgs:
if s3_cfg_key == s3_common.S3_SECRET_KEY_CONFIG:
configs[s3_cfg_key] = (
key_manager.get_secret(cfgs[s3_cfg_key])
)
else:
configs[s3_cfg_key] = cfgs[s3_cfg_key]
content = xmlutils.create_hadoop_xml(configs)
with remote.get_remote(where) as r:
dst = os.path.join(job_dir, xml_name)
r.write_file_to(dst, content)
return xml_name
示例12: _upload_job_files_to_hdfs
def _upload_job_files_to_hdfs(self, where, job_dir, job, configs, proxy_configs=None):
mains = job.mains or []
libs = job.libs or []
builtin_libs = edp.get_builtin_binaries(job, configs)
uploaded_paths = []
hdfs_user = self.get_hdfs_user()
job_dir_suffix = "lib" if job.type != edp.JOB_TYPE_SHELL else ""
lib_dir = os.path.join(job_dir, job_dir_suffix)
with remote.get_remote(where) as r:
for main in mains:
raw_data = dispatch.get_raw_binary(main, proxy_configs)
h.put_file_to_hdfs(r, raw_data, main.name, job_dir, hdfs_user)
uploaded_paths.append(job_dir + "/" + main.name)
if len(libs) and job_dir_suffix:
# HDFS 2.2.0 fails to put file if the lib dir does not exist
self.create_hdfs_dir(r, lib_dir)
for lib in libs:
raw_data = dispatch.get_raw_binary(lib, proxy_configs)
h.put_file_to_hdfs(r, raw_data, lib.name, lib_dir, hdfs_user)
uploaded_paths.append(lib_dir + "/" + lib.name)
for lib in builtin_libs:
h.put_file_to_hdfs(r, lib["raw"], lib["name"], lib_dir, hdfs_user)
uploaded_paths.append(lib_dir + "/" + lib["name"])
return uploaded_paths
示例13: _push_configs_to_new_node
def _push_configs_to_new_node(self, cluster, extra, instance):
ng_extra = extra[instance.node_group.id]
files_supervisor = {
'/etc/supervisor/supervisord.conf': ng_extra['slave_sv_conf']
}
files_storm = {
'/usr/local/storm/conf/storm.yaml': ng_extra['st_instances']
}
files_zk = {
'/opt/zookeeper/zookeeper/conf/zoo.cfg': ng_extra['zk_conf']
}
files_supervisor_master = {
'/etc/supervisor/supervisord.conf': ng_extra['master_sv_conf']
}
with remote.get_remote(instance) as r:
node_processes = instance.node_group.node_processes
r.write_files_to(files_storm, run_as_root=True)
if 'zookeeper' in node_processes:
self._push_zk_configs(r, files_zk)
if 'nimbus' in node_processes:
self._push_supervisor_configs(r, files_supervisor_master)
if 'supervisor' in node_processes:
self._push_supervisor_configs(r, files_supervisor)
示例14: _get_job_status_from_remote
def _get_job_status_from_remote(self, job_execution, retries=3):
topology_name, inst_id = self._get_instance_if_running(
job_execution)
if topology_name is None or inst_id is None:
return edp.JOB_STATUSES_TERMINATED
topology_name = self._get_topology_name(job_execution)
master = plugin_utils.get_instance(self.cluster, "nimbus")
cmd = (
"%(storm)s -c nimbus.host=%(host)s "
"list | grep %(topology_name)s | awk '{print $2}'") % (
{
"storm": "/usr/local/storm/bin/storm",
"host": master.hostname(),
"topology_name": topology_name
})
for i in range(retries):
with remote.get_remote(master) as r:
ret, stdout = r.execute_command("%s " % (cmd))
# If the status is ACTIVE is there, it's still running
if stdout.strip() == "ACTIVE":
return {"status": edp.JOB_STATUS_RUNNING}
else:
if i == retries - 1:
return {"status": edp.JOB_STATUS_KILLED}
context.sleep(10)
示例15: _upload_job_files_to_hdfs
def _upload_job_files_to_hdfs(self, where, job_dir, job, configs,
proxy_configs=None):
mains = list(job.mains) if job.mains else []
libs = list(job.libs) if job.libs else []
builtin_libs = edp.get_builtin_binaries(job, configs)
uploaded_paths = []
hdfs_user = self.get_hdfs_user()
job_dir_suffix = 'lib' if job.type != edp.JOB_TYPE_SHELL else ''
lib_dir = os.path.join(job_dir, job_dir_suffix)
with remote.get_remote(where) as r:
job_binaries = mains + libs
self._prepare_job_binaries(job_binaries, r)
# upload mains
uploaded_paths.extend(self._upload_job_binaries(r, mains,
proxy_configs,
hdfs_user,
job_dir))
# upload libs
if len(libs) and job_dir_suffix:
# HDFS 2.2.0 fails to put file if the lib dir does not exist
self.create_hdfs_dir(r, lib_dir)
uploaded_paths.extend(self._upload_job_binaries(r, libs,
proxy_configs,
hdfs_user,
lib_dir))
# upload buitin_libs
for lib in builtin_libs:
h.put_file_to_hdfs(r, lib['raw'], lib['name'], lib_dir,
hdfs_user)
uploaded_paths.append(lib_dir + lib['name'])
return uploaded_paths