本文整理汇总了Python中Cobalt.Proxy.ComponentProxy.get_child_data方法的典型用法代码示例。如果您正苦于以下问题:Python ComponentProxy.get_child_data方法的具体用法?Python ComponentProxy.get_child_data怎么用?Python ComponentProxy.get_child_data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Cobalt.Proxy.ComponentProxy
的用法示例。
在下文中一共展示了ComponentProxy.get_child_data方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_done_cleaning
# 需要导入模块: from Cobalt.Proxy import ComponentProxy [as 别名]
# 或者: from Cobalt.Proxy.ComponentProxy import get_child_data [as 别名]
def check_done_cleaning(self):
"""Check to see if the processes we are using to clean up nodes
post-run are done. If they are, release the nodes back for general
consumption. If the cleanup fails for some reason, then mark the node
down and release it.
"""
if self.cleaning_processes == []:
#don't worry if we have nothing to cleanup
return
for cleaning_process in self.cleaning_processes:
#if we can't reach the forker, we've lost all the cleanup scripts.
#don't try and recover, just assume all nodes that were being
#cleaned are down. --PMR
if cleaning_process['retry'] == True:
continue #skip this. Try anyway, if component came back up.
jobid = cleaning_process['jobid']
user = cleaning_process['user']
try:
exit_status = ComponentProxy("system_script_forker").child_completed(
cleaning_process['cleaning_id'])
ComponentProxy("system_script_forker").child_cleanup(
[cleaning_process['cleaning_id']])
except ComponentLookupError:
self.logger.error("Job %s/%s: Error contacting forker "
"component. Running child processes are "
"unrecoverable." % (jobid, user))
return
if exit_status != None:
#we're done, this node is now free to be scheduled again.
self.running_nodes.discard(cleaning_process["host"])
cleaning_process["completed"] = True
self.cleaning_host_count[jobid] -= 1
else:
#timeout exceeded
if (time.time() - cleaning_process["start_time"] >
float(get_cluster_system_config("epilogue_timeout", 60.0))):
cleaning_process["completed"] = True
try:
forker = ComponentProxy("system_script_forker").signal(
cleaning_process['cleaning_id'], "SIGINT")
child_output = forker.get_child_data(
cleaning_process['cleaning_id'])
forker.child_cleanup([cleaning_process['cleaning_id']])
#mark as dirty and arrange to mark down.
self.down_nodes.add(cleaning_process['host'])
self.running_nodes.discard(cleaning_process['host'].host) # <---????check this!
self.logger.error("Job %s/%s: epilogue timed out on host %s, marking hosts down",
user, jobid, cleaning_process['host'])
self.logger.error("Job %s/%s: stderr from epilogue on host %s: [%s]",
user, jobid,
cleaning_process['host'],
child_output['stderr'].strip())
self.cleaning_host_count[jobid] -= 1
except ComponentLookupError:
self.logger.error("Job %s/%s: Error contacting forker "
"component. Running child processes are "
"unrecoverable." % (jobid, user))
if self.cleaning_host_count[jobid] == 0:
self.del_process_groups(jobid)
#clean up other cleanup-monitoring stuff
self.logger.info("Job %s/%s: job finished on %s",
user, jobid, Cobalt.Util.merge_nodelist(self.locations_by_jobid[jobid]))
del self.locations_by_jobid[jobid]
del self.jobid_to_user[jobid]
self.cleaning_processes = [cleaning_process for cleaning_process in self.cleaning_processes
if cleaning_process["completed"] == False]