本文整理汇总了Python中savanna.utils.remote.get_remote函数的典型用法代码示例。如果您正苦于以下问题:Python get_remote函数的具体用法?Python get_remote怎么用?Python get_remote使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_remote函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_namenode(cluster)
datanodes = utils.get_datanodes(cluster)
jt_instance = utils.get_jobtracker(cluster)
tasktrackers = utils.get_tasktrackers(cluster)
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_process(r, "namenode")
snns = utils.get_secondarynamenodes(cluster)
if snns:
for snn in snns:
run.start_process(remote.get_remote(snn), "secondarynamenode")
for dn in datanodes:
run.start_process(remote.get_remote(dn), "datanode")
LOG.info("HDFS service at '%s' has been started",
nn_instance.hostname)
if jt_instance:
run.start_process(remote.get_remote(jt_instance), "jobtracker")
for tt in tasktrackers:
run.start_process(remote.get_remote(tt), "tasktracker")
LOG.info("MapReduce service at '%s' has been started",
jt_instance.hostname)
LOG.info('Cluster %s has been started successfully' % cluster.name)
self._set_cluster_info(cluster)
示例2: decommission_dn
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
with remote.get_remote(nn) as r:
r.write_file_to("/etc/hadoop/dn.excl", utils.generate_fqdn_host_names(inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
context.sleep(3)
att_amount = 100
while att_amount:
cmd = r.execute_command("sudo su -c 'hadoop dfsadmin -report' hadoop")
all_found = True
datanodes_info = parse_dfs_report(cmd[1])
for i in inst_to_be_deleted:
for dn in datanodes_info:
if (dn["Name"].startswith(i.internal_ip)) and (dn["Decommission Status"] != "Decommissioned"):
all_found = False
break
if all_found:
r.write_files_to(
{"/etc/hadoop/dn.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/dn.excl": ""}
)
break
context.sleep(3)
att_amount -= 1
if not att_amount:
raise Exception("Cannot finish decommission")
示例3: decommission_tt
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
with remote.get_remote(jt) as r:
r.write_file_to("/etc/hadoop/tt.excl", utils.generate_fqdn_host_names(inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(jt), "mradmin")
context.sleep(3)
r.write_files_to(
{"/etc/hadoop/tt.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/tt.excl": ""}
)
示例4: scale_cluster
def scale_cluster(self, cluster, instances):
self._setup_instances(cluster, instances)
run.refresh_nodes(remote.get_remote(
utils.get_namenode(cluster)), "dfsadmin")
jt = utils.get_jobtracker(cluster)
if jt:
run.refresh_nodes(remote.get_remote(jt), "mradmin")
self._start_tt_dn_processes(instances)
示例5: _push_configs_to_nodes
def _push_configs_to_nodes(self, cluster, instances=None):
extra = self._extract_configs_to_extra(cluster)
if instances is None:
instances = utils.get_instances(cluster)
for inst in instances:
ng_extra = extra[inst.node_group.id]
files = {
'/etc/hadoop/core-site.xml': ng_extra['xml']['core-site'],
'/etc/hadoop/mapred-site.xml': ng_extra['xml']['mapred-site'],
'/etc/hadoop/hdfs-site.xml': ng_extra['xml']['hdfs-site'],
'/tmp/savanna-hadoop-init.sh': ng_extra['setup_script']
}
with remote.get_remote(inst) as r:
# TODO(aignatov): sudo chown is wrong solution. But it works.
r.execute_command(
'sudo chown -R $USER:$USER /etc/hadoop'
)
r.execute_command(
'sudo chown -R $USER:$USER /opt/oozie/conf'
)
r.write_files_to(files)
r.execute_command(
'sudo chmod 0500 /tmp/savanna-hadoop-init.sh'
)
r.execute_command(
'sudo /tmp/savanna-hadoop-init.sh '
'>> /tmp/savanna-hadoop-init.log 2>&1')
nn = utils.get_namenode(cluster)
jt = utils.get_jobtracker(cluster)
with remote.get_remote(nn) as r:
r.write_file_to('/etc/hadoop/dn.incl', utils.
generate_fqdn_host_names(
utils.get_datanodes(cluster)))
if jt:
with remote.get_remote(jt) as r:
r.write_file_to('/etc/hadoop/tt.incl', utils.
generate_fqdn_host_names(
utils.get_tasktrackers(cluster)))
oozie = utils.get_oozie(cluster)
if oozie:
with remote.get_remote(oozie) as r:
r.write_file_to('/opt/oozie/conf/oozie-site.xml',
extra[oozie.node_group.id]
['xml']['oozie-site'])
示例6: _configure_instances
def _configure_instances(cluster):
"""Configure active instances.
* generate /etc/hosts
* setup passwordless login
* etc.
"""
hosts = _generate_etc_hosts(cluster)
for node_group in cluster.node_groups:
for instance in node_group.instances:
with remote.get_remote(instance) as r:
r.write_file_to('etc-hosts', hosts)
r.execute_command('sudo mv etc-hosts /etc/hosts')
# wait generate id_rsa key
timeout = 10
cur_time = 0
while cur_time < timeout:
code, _ = r.execute_command('ls .ssh/id_rsa',
raise_when_error=False)
if code:
cur_time += 1
context.sleep(1)
else:
break
else:
raise RuntimeError("Error getting user private key")
r.execute_command('sudo chown $USER:$USER .ssh/id_rsa')
r.execute_command('chmod 400 .ssh/id_rsa')
示例7: _check_if_up
def _check_if_up(instance):
if hasattr(instance, '_is_up'):
return True
server = nova.get_instance_info(instance)
if server.status == 'ERROR':
# TODO(slukjanov): replace with specific error
raise RuntimeError("node %s has error status" % server.name)
if server.status != 'ACTIVE':
return False
if len(server.networks) == 0:
return False
if not networks.init_instances_ips(instance, server):
return False
try:
exit_code, _ = remote.get_remote(instance).execute_command("hostname")
if exit_code:
return False
except Exception as ex:
LOG.debug("Can't login to node %s (%s), reason %s",
server.name, instance.management_ip, ex)
return False
instance._is_up = True
return True
示例8: create_workflow_dir
def create_workflow_dir(where, job, hdfs_user):
constructed_dir = '/user/%s/' % hdfs_user
constructed_dir = _add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, six.text_type(uuid.uuid4()))
with remote.get_remote(where) as r:
h.create_dir(r, constructed_dir, hdfs_user)
return constructed_dir
示例9: create_workflow_dir
def create_workflow_dir(where, job):
constructed_dir = '/user/hadoop/'
constructed_dir = _add_postfix(constructed_dir)
constructed_dir += '%s/%s' % (job.name, uuidutils.generate_uuid())
with remote.get_remote(where) as r:
h.create_dir(r, constructed_dir)
return constructed_dir
示例10: scale_cluster
def scale_cluster(self, cluster, instances):
self._push_configs_to_nodes(cluster, instances=instances)
self._write_hadoop_user_keys(cluster.private_key,
instances)
run.refresh_nodes(remote.get_remote(
utils.get_namenode(cluster)), "dfsadmin")
jt = utils.get_jobtracker(cluster)
if jt:
run.refresh_nodes(remote.get_remote(jt), "mradmin")
for i in instances:
with remote.get_remote(i) as r:
if "datanode" in i.node_group.node_processes:
run.start_process(r, "datanode")
if "tasktracker" in i.node_group.node_processes:
run.start_process(r, "tasktracker")
示例11: upload_job_file
def upload_job_file(where, job_dir, job_origin, job):
main_binary = conductor.job_binary_get_raw_data(context.ctx(),
job_origin.url)
if job.type == 'Jar':
job_dir += '/lib'
with remote.get_remote(where) as r:
h.put_file_to_hdfs(r, main_binary, main_res_names[job.type], job_dir)
return "%s/%s" % (job_dir, main_res_names[job.type])
示例12: start_cluster
def start_cluster(self, cluster):
nn_instance = utils.get_namenode(cluster)
datanodes = utils.get_datanodes(cluster)
jt_instance = utils.get_jobtracker(cluster)
tasktrackers = utils.get_tasktrackers(cluster)
oozie = utils.get_oozie(cluster)
hive_server = utils.get_hiveserver(cluster)
with remote.get_remote(nn_instance) as r:
run.format_namenode(r)
run.start_process(r, "namenode")
snns = utils.get_secondarynamenodes(cluster)
if snns:
for snn in snns:
run.start_process(remote.get_remote(snn), "secondarynamenode")
for dn in datanodes:
run.start_process(remote.get_remote(dn), "datanode")
LOG.info("HDFS service at '%s' has been started",
nn_instance.hostname)
if jt_instance:
run.start_process(remote.get_remote(jt_instance), "jobtracker")
for tt in tasktrackers:
run.start_process(remote.get_remote(tt), "tasktracker")
LOG.info("MapReduce service at '%s' has been started",
jt_instance.hostname)
if oozie:
with remote.get_remote(oozie) as r:
if c_helper.is_mysql_enable(cluster):
run.mysql_start(r, oozie)
run.oozie_create_db(r)
run.oozie_share_lib(r, nn_instance.hostname)
run.start_oozie(r)
LOG.info("Oozie service at '%s' has been started",
nn_instance.hostname)
if hive_server:
with remote.get_remote(nn_instance) as r:
run.hive_create_warehouse_dir(r)
if c_helper.is_mysql_enable(cluster):
with remote.get_remote(hive_server) as h:
if not oozie or hive_server.hostname != oozie.hostname:
run.mysql_start(h, hive_server)
run.hive_create_db(h)
run.hive_metastore_start(h)
LOG.info("Hive Metastore server at %s has been started",
hive_server.hostname)
LOG.info('Cluster %s has been started successfully' % cluster.name)
self._set_cluster_info(cluster)
示例13: _push_configs_to_nodes
def _push_configs_to_nodes(self, cluster, instances=None):
if instances is None:
instances = utils.get_instances(cluster)
for inst in instances:
files = {
'/etc/hadoop/core-site.xml': inst.node_group.extra['xml'][
'core-site'],
'/etc/hadoop/mapred-site.xml': inst.node_group.extra['xml'][
'mapred-site'],
'/etc/hadoop/hdfs-site.xml': inst.node_group.extra['xml'][
'hdfs-site'],
'/tmp/savanna-hadoop-init.sh': inst.node_group.extra[
'setup_script']
}
with remote.get_remote(inst) as r:
r.execute_command(
'sudo chown -R $USER:$USER /etc/hadoop'
)
r.write_files_to(files)
r.execute_command(
'sudo chmod 0500 /tmp/savanna-hadoop-init.sh'
)
r.execute_command(
'sudo /tmp/savanna-hadoop-init.sh '
'>> /tmp/savanna-hadoop-init.log 2>&1')
nn = utils.get_namenode(cluster)
jt = utils.get_jobtracker(cluster)
with remote.get_remote(nn) as r:
r.write_file_to('/etc/hadoop/dn.incl', utils.
generate_fqdn_host_names(
utils.get_datanodes(cluster)))
if jt:
with remote.get_remote(jt) as r:
r.write_file_to('/etc/hadoop/tt.incl', utils.
generate_fqdn_host_names(
utils.get_tasktrackers(cluster)))
示例14: _push_configs_to_existing_node
def _push_configs_to_existing_node(self, cluster, extra, instance):
node_processes = instance.node_group.node_processes
need_update = (c_helper.is_data_locality_enabled(cluster) or
'namenode' in node_processes or
'jobtracker' in node_processes or
'oozie' in node_processes or
'hiveserver' in node_processes)
if not need_update:
return
with remote.get_remote(instance) as r:
self._write_topology_data(r, cluster, extra)
self._push_master_configs(r, cluster, extra, instance)
示例15: _mount_volume
def _mount_volume(instance, device_path, mount_point):
codes = []
with remote.get_remote(instance) as r:
code, _ = r.execute_command('sudo mkdir -p %s' % mount_point)
codes.append(code)
code, _ = r.execute_command('sudo mkfs.ext4 %s' % device_path)
codes.append(code)
code, _ = r.execute_command('sudo mount %s %s' % (device_path,
mount_point))
codes.append(code)
if any(codes):
raise RuntimeError("Error mounting volume to instance %s" %
instance.instance_id)