本文整理汇总了Python中sahara.context.sleep函数的典型用法代码示例。如果您正苦于以下问题:Python sleep函数的具体用法?Python sleep怎么用?Python sleep使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了sleep函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: wait_ambari_requests
def wait_ambari_requests(self, requests, cluster_name):
requests = set(requests)
failed = []
while len(requests) > 0:
completed, not_completed = set(), set()
for req_id in requests:
request = self.get_request_info(cluster_name, req_id)
status = request.get("request_status")
if status == 'COMPLETED':
completed.add(req_id)
elif status in ['IN_PROGRESS', 'PENDING']:
not_completed.add(req_id)
else:
failed.append(request)
if failed:
msg = _("Some Ambari request(s) "
"not in COMPLETED state: %(description)s.")
descrs = []
for req in failed:
descr = _(
"request %(id)d: %(name)s - in status %(status)s")
descrs.append(descr %
{'id': req.get("id"),
'name': req.get("request_context"),
'status': req.get("request_status")})
raise p_exc.HadoopProvisionError(msg % {'description': descrs})
requests = not_completed
context.sleep(5)
LOG.debug("Waiting for %d ambari request(s) to be completed",
len(not_completed))
LOG.debug("All ambari requests have been completed")
示例2: wait_till_active
def wait_till_active(self):
while self.heat_stack.stack_status in ("CREATE_IN_PROGRESS", "UPDATE_IN_PROGRESS"):
context.sleep(1)
self.heat_stack.get()
if self.heat_stack.stack_status not in ("CREATE_COMPLETE", "UPDATE_COMPLETE"):
raise ex.HeatStackException(self.heat_stack.stack_status)
示例3: start_cluster
def start_cluster(cluster):
cl_tmpl = {
"blueprint": cluster.name,
"default_password": uuidutils.generate_uuid(),
"host_groups": []
}
for ng in cluster.node_groups:
for instance in ng.instances:
cl_tmpl["host_groups"].append({
"name": instance.instance_name,
"hosts": [{"fqdn": instance.fqdn()}]
})
ambari = plugin_utils.get_instance(cluster, p_common.AMBARI_SERVER)
password = cluster.extra["ambari_password"]
with ambari_client.AmbariClient(ambari, password=password) as client:
req_id = client.create_cluster(cluster.name, cl_tmpl)["id"]
while True:
status = client.check_request_status(cluster.name, req_id)
LOG.debug("Task %s in %s state. Completed %.1f%%" % (
status["request_context"], status["request_status"],
status["progress_percent"]))
if status["request_status"] == "COMPLETED":
return
if status["request_status"] in ["IN_PROGRESS", "PENDING"]:
context.sleep(5)
else:
raise p_exc.HadoopProvisionError(
_("Ambari request in %s state") % status["request_status"])
示例4: get
def get(self, relpath=None, params=None):
"""Invoke the GET method on a resource
:param relpath: Optional. A relative path to this resource's path.
:param params: Key-value data.
:return: A dictionary of the JSON result.
"""
for retry in six.moves.xrange(self.retries + 1):
if retry:
context.sleep(self.retry_sleep)
try:
return self.invoke("GET", relpath, params)
except (socket.error, urllib.error.URLError) as e:
if "timed out" in six.text_type(e).lower():
if retry < self.retries:
LOG.warning(_LW("Timeout issuing GET request for "
"{path}. Will retry").format(
path=self._join_uri(relpath)))
else:
LOG.warning(_LW("Timeout issuing GET request for "
"{path}. No retries left").format(
path=self._join_uri(relpath)))
else:
raise e
else:
raise ex.CMApiException(_("Get retry max time reached."))
示例5: _wait_all_processes_removed
def _wait_all_processes_removed(cluster, instance):
with _get_ambari_client(cluster) as client:
while True:
hdp_processes = client.list_host_processes(cluster.name, instance)
if not hdp_processes:
return
context.sleep(5)
示例6: _start_cloudera_manager
def _start_cloudera_manager(cluster):
manager = pu.get_manager(cluster)
with manager.remote() as r:
cmd.start_cloudera_db(r)
cmd.start_manager(r)
timeout = 300
LOG.debug("Waiting %(timeout)s seconds for Manager to start : " % {
'timeout': timeout})
s_time = timeutils.utcnow()
while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout:
try:
conn = telnetlib.Telnet(manager.management_ip, CM_API_PORT)
conn.close()
break
except IOError:
context.sleep(2)
else:
message = _("Cloudera Manager failed to start in %(timeout)s minutes "
"on node '%(node)s' of cluster '%(cluster)s'") % {
'timeout': timeout / 60,
'node': manager.management_ip,
'cluster': cluster.name}
raise ex.HadoopProvisionError(message)
LOG.info(_LI("Cloudera Manager has been started"))
示例7: wait_for_host_registrations
def wait_for_host_registrations(self, num_hosts, ambari_info):
LOG.info(
_LI('Waiting for all Ambari agents to register with server ...'))
url = 'http://{0}/api/v1/hosts'.format(ambari_info.get_address())
result = None
json_result = None
# TODO(jspeidel): timeout
while result is None or len(json_result['items']) < num_hosts:
context.sleep(5)
try:
result = self._get(url, ambari_info)
json_result = json.loads(result.text)
LOG.info(_LI('Registered Hosts: %(current_number)s of '
'%(final_number)s'),
{'current_number': len(json_result['items']),
'final_number': num_hosts})
for hosts in json_result['items']:
LOG.debug('Registered Host: {0}'.format(
hosts['Hosts']['host_name']))
except Exception:
# TODO(jspeidel): max wait time
LOG.info(_LI('Waiting to connect to ambari server ...'))
示例8: execute_with_retries
def execute_with_retries(method, *args, **kwargs):
attempts = CONF.retries.retries_number + 1
while attempts > 0:
try:
return method(*args, **kwargs)
except Exception as e:
error_code = getattr(e, 'http_status', None) or getattr(
e, 'status_code', None) or getattr(e, 'code', None)
if error_code in ERRORS_TO_RETRY:
LOG.warning(_LW('Occasional error occurred during "{method}" '
'execution: {error_msg} ({error_code}). '
'Operation will be retried.').format(
method=method.__name__,
error_msg=e,
error_code=error_code))
attempts -= 1
retry_after = getattr(e, 'retry_after', 0)
context.sleep(max(retry_after, CONF.retries.retry_after))
else:
LOG.debug('Permanent error occurred during "{method}" '
'execution: {error_msg}.'.format(
method=method.__name__, error_msg=e))
raise e
else:
raise ex.MaxRetriesExceeded(attempts, method.__name__)
示例9: _await_cldb
def _await_cldb(self, cluster_context, instances=None, timeout=600):
instances = instances or cluster_context.get_instances()
cldb_node = cluster_context.get_instance(mfs.CLDB)
start_time = timeutils.utcnow()
retry_count = 0
with cldb_node.remote() as r:
LOG.debug("Waiting {count} seconds for CLDB initialization".format(
count=timeout))
while timeutils.delta_seconds(start_time,
timeutils.utcnow()) < timeout:
ec, out = r.execute_command(NODE_LIST_CMD,
raise_when_error=False)
resp = json.loads(out)
status = resp['status']
if str(status).lower() == 'ok':
ips = [n['ip'] for n in resp['data']]
retry_count += 1
for i in instances:
if (i.management_ip not in ips
and retry_count > DEFAULT_RETRY_COUNT):
raise ex.HadoopProvisionError(_(
"Node failed to connect to CLDB: %s") %
i.management_ip)
break
else:
context.sleep(DELAY)
else:
raise ex.HadoopProvisionError(_("CLDB failed to start"))
示例10: _await_networks
def _await_networks(self, cluster, instances):
if not instances:
return
ips_assigned = set()
while len(ips_assigned) != len(instances):
if not g.check_cluster_exists(cluster):
return
for instance in instances:
if instance.id not in ips_assigned:
if networks.init_instances_ips(instance):
ips_assigned.add(instance.id)
context.sleep(1)
LOG.info(
_LI("Cluster '%s': all instances have IPs assigned"), cluster.id)
cluster = conductor.cluster_get(context.ctx(), cluster)
instances = g.get_instances(cluster, ips_assigned)
with context.ThreadGroup() as tg:
for instance in instances:
tg.spawn("wait-for-ssh-%s" % instance.instance_name,
self._wait_until_accessible, instance)
LOG.info(_LI("Cluster '%s': all instances are accessible"), cluster.id)
示例11: _get_job_status_from_remote
def _get_job_status_from_remote(self, job_execution, retries=3):
topology_name, inst_id = self._get_instance_if_running(
job_execution)
if topology_name is None or inst_id is None:
return edp.JOB_STATUSES_TERMINATED
topology_name = self._get_topology_name(job_execution)
master = plugin_utils.get_instance(self.cluster, "nimbus")
cmd = (
"%(storm)s -c nimbus.host=%(host)s "
"list | grep %(topology_name)s | awk '{print $2}'") % (
{
"storm": "/usr/local/storm/bin/storm",
"host": master.hostname(),
"topology_name": topology_name
})
for i in range(retries):
with remote.get_remote(master) as r:
ret, stdout = r.execute_command("%s " % (cmd))
# If the status is ACTIVE is there, it's still running
if stdout.strip() == "ACTIVE":
return {"status": edp.JOB_STATUS_RUNNING}
else:
if i == retries - 1:
return {"status": edp.JOB_STATUS_KILLED}
context.sleep(10)
示例12: wait
def wait(self, timeout=None):
"""Wait for command to finish
:param timeout: (Optional) Max amount of time (in seconds) to wait.
Wait forever by default.
:return: The final ApiCommand object, containing the last known state.
The command may still be running in case of timeout.
"""
if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID:
return self
SLEEP_SEC = 5
if timeout is None:
deadline = None
else:
deadline = time.time() + timeout
while True:
cmd = self.fetch()
if not cmd.active:
return cmd
if deadline is not None:
now = time.time()
if deadline < now:
return cmd
else:
context.sleep(min(SLEEP_SEC, deadline - now))
else:
context.sleep(SLEEP_SEC)
示例13: _detach_volume
def _detach_volume(instance, volume_id):
volume = cinder.get_volume(volume_id)
try:
LOG.debug("Detaching volume %s from instance %s" % (
volume_id, instance.instance_name))
nova.client().volumes.delete_server_volume(instance.instance_id,
volume_id)
except Exception:
LOG.exception(_LE("Can't detach volume %s"), volume.id)
detach_timeout = CONF.detach_volume_timeout
LOG.debug("Waiting %d seconds to detach %s volume" % (detach_timeout,
volume_id))
s_time = tu.utcnow()
while tu.delta_seconds(s_time, tu.utcnow()) < detach_timeout:
volume = cinder.get_volume(volume_id)
if volume.status not in ['available', 'error']:
context.sleep(2)
else:
LOG.debug("Volume %s has been detached" % volume_id)
return
else:
LOG.warn(_LW("Can't detach volume %(volume)s. "
"Current status of volume: %(status)s"),
{'volume': volume_id, 'status': volume.status})
示例14: wait_stack_completion
def wait_stack_completion(stack, is_update=False, last_updated_time=None):
base.execute_with_retries(stack.get)
while not _verify_completion(stack, is_update, last_updated_time):
context.sleep(1)
base.execute_with_retries(stack.get)
if stack.status != 'COMPLETE':
raise ex.HeatStackException(stack.stack_status_reason)
示例15: _make_checks
def _make_checks(self, instance_info, sleep=True):
ctx = context.ctx()
if sleep:
context.sleep(2)
current_instance_info = ctx.current_instance_info
self.assertEqual(instance_info, current_instance_info)