本文整理汇总了Python中savanna.context.sleep函数的典型用法代码示例。如果您正苦于以下问题:Python sleep函数的具体用法?Python sleep怎么用?Python sleep使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sleep函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _configure_instances
def _configure_instances(cluster):
"""Configure active instances.
* generate /etc/hosts
* setup passwordless login
* etc.
"""
hosts = _generate_etc_hosts(cluster)
for node_group in cluster.node_groups:
for instance in node_group.instances:
with remote.get_remote(instance) as r:
r.write_file_to('etc-hosts', hosts)
r.execute_command('sudo mv etc-hosts /etc/hosts')
# wait generate id_rsa key
timeout = 10
cur_time = 0
while cur_time < timeout:
code, _ = r.execute_command('ls .ssh/id_rsa',
raise_when_error=False)
if code:
cur_time += 1
context.sleep(1)
else:
break
else:
raise RuntimeError("Error getting user private key")
r.execute_command('sudo chown $USER:$USER .ssh/id_rsa')
r.execute_command('chmod 400 .ssh/id_rsa')
示例2: _await_networks
def _await_networks(cluster, instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not g.check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
LOG.info("Cluster '%s': all instances have IPs assigned" % cluster.id)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, instances[0].node_group.cluster)
instances = _get_instances(cluster, ips_assigned)
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
_wait_until_accessible, instance)
LOG.info("Cluster '%s': all instances are accessible" % cluster.id)
示例3: _await_networks
def _await_networks(instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not _check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
ctx = context.ctx()
cluster = conductor.cluster_get(ctx, instances[0].node_group.cluster)
instances = get_instances(cluster, ips_assigned)
accessible_instances = set()
while len(accessible_instances) != len(instances):
if not _check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in accessible_instances:
if _check_if_accessible(instance):
accessible_instances.add(instance.id)
context.sleep(1)
示例4: _wait_for_host_registrations
def _wait_for_host_registrations(self, num_hosts, ambari_info):
LOG.info(
'Waiting for all Ambari agents to register with server ...')
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
result = None
json_result = None
#TODO(jspeidel): timeout
while result is None or len(json_result['items']) < num_hosts:
context.sleep(5)
try:
result = requests.get(url, auth=(ambari_info.user,
ambari_info.password))
json_result = json.loads(result.text)
# TODO(jspeidel): just for debug
LOG.info('Registered Hosts: {0} of {1}'.format(
len(json_result['items']), num_hosts))
for hosts in json_result['items']:
LOG.debug('Registered Host: {0}'.format(
hosts['Hosts']['host_name']))
except requests.ConnectionError:
#TODO(jspeidel): max wait time
LOG.info('Waiting to connect to ambari server ...')
示例5: decommission_dn
def decommission_dn(nn, inst_to_be_deleted, survived_inst):
with remote.get_remote(nn) as r:
r.write_file_to("/etc/hadoop/dn.excl", utils.generate_fqdn_host_names(inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(nn), "dfsadmin")
context.sleep(3)
att_amount = 100
while att_amount:
cmd = r.execute_command("sudo su -c 'hadoop dfsadmin -report' hadoop")
all_found = True
datanodes_info = parse_dfs_report(cmd[1])
for i in inst_to_be_deleted:
for dn in datanodes_info:
if (dn["Name"].startswith(i.internal_ip)) and (dn["Decommission Status"] != "Decommissioned"):
all_found = False
break
if all_found:
r.write_files_to(
{"/etc/hadoop/dn.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/dn.excl": ""}
)
break
context.sleep(3)
att_amount -= 1
if not att_amount:
raise Exception("Cannot finish decommission")
示例6: decommission_tt
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
with remote.get_remote(jt) as r:
r.write_file_to("/etc/hadoop/tt.excl", utils.generate_fqdn_host_names(inst_to_be_deleted))
run.refresh_nodes(remote.get_remote(jt), "mradmin")
context.sleep(3)
r.write_files_to(
{"/etc/hadoop/tt.incl": utils.generate_fqdn_host_names(survived_inst), "/etc/hadoop/tt.excl": ""}
)
示例7: _await_instances
def _await_instances(cluster):
"""Await all instances are in Active status and available."""
all_up = False
while not all_up:
all_up = True
for node_group in cluster.node_groups:
for instance in node_group.instances:
if not _check_if_up(instance):
all_up = False
context.sleep(1)
示例8: _await_attach_volume
def _await_attach_volume(instance, device_path):
timeout = 10
for _ in six.moves.xrange(timeout):
device_paths = _get_device_paths(instance)
if device_path in device_paths:
return
else:
context.sleep(1)
raise RuntimeError("Error attach volume to instance %s" %
instance.instance_name)
示例9: decommission_tt
def decommission_tt(jt, inst_to_be_deleted, survived_inst):
with jt.remote as r:
r.write_file_to('/etc/hadoop/tt.excl',
utils.generate_fqdn_host_names(
inst_to_be_deleted))
run.refresh_nodes(jt.remote, "mradmin")
context.sleep(3)
r.write_files_to({'/etc/hadoop/tt.incl':
utils.generate_fqdn_host_names(survived_inst),
'/etc/hadoop/tt.excl': "",
})
示例10: _await_attach_volumes
def _await_attach_volumes(instance, count_volumes):
timeout = 10
step = 2
while timeout > 0:
if len(_get_unmounted_devices(instance)) == count_volumes:
return
timeout -= step
context.sleep(step)
raise RuntimeError("Error attach volume to instance %s" %
instance.instance_name)
示例11: _await_active
def _await_active(instances):
"""Await all instances are in Active status and available."""
if not instances:
return
active_ids = set()
while len(active_ids) != len(instances):
if not _check_cluster_exists(instances[0].node_group.cluster):
return
for instance in instances:
if instance.id not in active_ids:
if _check_if_active(instance):
active_ids.add(instance.id)
context.sleep(1)
示例12: _create_attach_volume
def _create_attach_volume(instance, size, device_path, display_name=None,
volume_type=None):
volume = cinder.client().volumes.create(size=size,
display_name=display_name,
volume_type=volume_type)
instance.volumes.append(volume.id)
while volume.status != 'available':
volume = cinder.get_volume(volume.id)
if volume.status == 'error':
raise RuntimeError("Volume %s has error status" % volume.id)
context.sleep(1)
nova.client().volumes.create_server_volume(instance.instance_id,
volume.id, device_path)
示例13: _wait_until_accessible
def _wait_until_accessible(instance):
while True:
try:
# check if ssh is accessible and cloud-init
# script is finished generating authorized_keys
exit_code, stdout = instance.remote().execute_command("ls .ssh/authorized_keys", raise_when_error=False)
if exit_code == 0:
LOG.debug("Instance %s is accessible" % instance.instance_name)
return
except Exception as ex:
LOG.debug("Can't login to node %s (%s), reason %s", instance.instance_name, instance.management_ip, ex)
context.sleep(5)
if not g.check_cluster_exists(instance.node_group.cluster):
return
示例14: _wait_for_async_request
def _wait_for_async_request(self, request_url, auth):
started = False
while not started:
result = requests.get(request_url, auth=auth)
LOG.debug(
'async request ' + request_url + ' response:\n' + result.text)
json_result = json.loads(result.text)
started = True
for items in json_result['items']:
status = items['Tasks']['status']
if status == 'FAILED' or status == 'ABORTED':
return False
else:
if status != 'COMPLETED':
started = False
context.sleep(5)
return started
示例15: run_in_subprocess
def run_in_subprocess(proc, func, args=(), kwargs={}):
try:
pickle.dump(func, proc.stdin)
pickle.dump(args, proc.stdin)
pickle.dump(kwargs, proc.stdin)
proc.stdin.flush()
result = pickle.load(proc.stdout)
if 'exception' in result:
raise SubprocessException(result['exception'])
return result['output']
finally:
# NOTE(dmitryme): in openstack/common/processutils.py it
# is suggested to sleep a little between calls to multiprocessing.
# That should allow it make some necessary cleanup
context.sleep(0)