本文整理汇总了Python中pilot.PilotComputeService.cancel方法的典型用法代码示例。如果您正苦于以下问题:Python PilotComputeService.cancel方法的具体用法?Python PilotComputeService.cancel怎么用?Python PilotComputeService.cancel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pilot.PilotComputeService
的用法示例。
在下文中一共展示了PilotComputeService.cancel方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from pilot import PilotComputeService [as 别名]
# 或者: from pilot.PilotComputeService import cancel [as 别名]
class simple:
def __init__(self,no_jobs,pilot,COORD_url=None):
self.no_jobs = no_jobs
self.pilot = pilot
if(COORD_url == None):
self.COORD = "redis://[email protected]:6379"
else:
self.COORD = COORD_url
def check(self):
print 'Checkup time'
print self.COORD
def startpilot(self):
print 'Start pilot service'
self.pilot_compute_service = PilotComputeService(self.COORD)
self.pilot_compute_description = {
"service_url" : self.pilot["service_url"]
}
if self.pilot.has_key("number_of_processes"):
self.pilot_compute_description["number_of_processes"] = self.pilot["number_of_processes"]
if self.pilot.has_key("working_directory"):
self.pilot_compute_description["working_directory"] = self.pilot["working_directory"]
if self.pilot.has_key("queue"):
self.pilot_compute_description["queue"] = self.pilot["queue"]
if self.pilot.has_key("walltime"):
self.pilot_compute_description["walltime"] = self.pilot["walltime"]
self.pilotjob = self.pilot_compute_service.create_pilot(pilot_compute_description=self.pilot_compute_description)
print 'Pilot successfully started'
def startCU(self):
print 'Starting Compute Unit submissions'
self.compute_data_service = ComputeDataService()
self.compute_data_service.add_pilot_compute_service(self.pilot_compute_service)
for i in range(self.no_jobs):
print 'Submitting job %s on %s'%(i+1,self.pilot["service_url"])
self.compute_unit_description = {
"executable":"/bin/echo",
"arguments" : ["$MYOUTPUT"],
"environment" : {'MYOUTPUT':'"Hello from Simple API"'},
"number_of_processes" : 1,
"output" : "stdout.txt",
"error" : "stderr.txt"
}
self.compute_unit = self.compute_data_service.submit_compute_unit(self.compute_unit_description)
print 'All Compute Units Submitted. Waiting for completion'
self.compute_data_service.wait()
print 'All CU executions completed'
def terminate(self):
print 'Terminating pilot'
self.compute_data_service.cancel()
self.pilot_compute_service.cancel()
示例2: range
# 需要导入模块: from pilot import PilotComputeService [as 别名]
# 或者: from pilot.PilotComputeService import cancel [as 别名]
sys.stdout.flush()
# start compute units
for i in range(NUMBER_JOBS):
compute_unit_description = {
"executable": "/home/jweichel/runmatrix.sh",
"arguments": [60, 10, RUN_ID],
"number_of_processes": 1,
"spmd_variation": "single",
"output": "stdout.txt", RUN_ID+"out.txt"
"error": "stderr.txt",
#"output_data":[{ output_du.get_url(): ['std*', RUN_ID+"out.txt"]} ]
}
compute_unit = compute_data_service.submit_compute_unit(compute_unit_description)
print ("Finished setup of PSS and PDS. Waiting for scheduling of PD")
sys.stdout.flush()
compute_service.wait()
# export the output files to local directory.
#print ("Returning output files")
#sys.stdout.flush()
#output_du.export(os.getcwd()+"/bigjob")
print ("Terminate Pilot Compute/Data Services")
sys.stdout.flush()
compute_data_service.cancel()
#pilot_data_service.cancel()
pilot_compute_service.cancel()
示例3: async_re_job
# 需要导入模块: from pilot import PilotComputeService [as 别名]
# 或者: from pilot.PilotComputeService import cancel [as 别名]
#.........这里部分代码省略.........
time.sleep(1)
self.updateStatus()
self.print_status()
self.launchJobs()
self.updateStatus()
self.print_status()
time.sleep(cycle_time)
self.updateStatus()
self.print_status()
self.doExchanges()
self.updateStatus()
self.print_status()
self.waitJob()
self.cleanJob()
def waitJob(self):
# cancel all not-running submitted subjobs
# for k in range(self.nreplicas):
# if self.status[k]['running_status'] == "R":
# if self.cus[k].get_state() != "Running":
# self.cus[k].cancel()
# self.status[k]['running_status'] = "W"
# update status
# self.updateStatus()
# self.print_status()
# wait until running jobs complete
self.cds.wait()
def cleanJob(self):
self.cds.cancel()
self.pj.cancel()
def launch_pilotjob(self):
# pilotjob: PilotJob description
# pilotjob: Variables defined in command.inp
pcd = {
"service_url": self.keywords.get("RESOURCE_URL"),
"number_of_processes": self.keywords.get("TOTAL_CORES"),
"working_directory": self.bj_working_dir,
"queue": self.keywords.get("QUEUE"),
"processes_per_node": self.ppn,
"project": self.keywords.get("PROJECT"),
"walltime": int(self.keywords.get("WALL_TIME")),
}
if self.keywords.get("SGE_WAYNESS") is not None:
pcd["spmd_variation"] = self.keywords.get("SGE_WAYNESS")
# pilotjob: Create pilot job with above description
self.pj.create_pilot(pilot_compute_description=pcd)
self.cds.add_pilot_compute_service(self.pj)
self.pilotcompute = self.pj.list_pilots()[0]
def _write_status(self):
"""
Pickle the current state of the RE job and write to in BASENAME.stat.
"""
status_file = "%s.stat" % self.basename
f = _open(status_file, "w")
pickle.dump(self.status, f)
f.close()
示例4: __init__
# 需要导入模块: from pilot import PilotComputeService [as 别名]
# 或者: from pilot.PilotComputeService import cancel [as 别名]
class simple:
#Input values from user - Number of jobs, Pilot Description, Redis Server Coordination URL
def __init__(self,no_jobs,pilot,COORD_url=None):
self.no_jobs = no_jobs
self.pilot = pilot
if(COORD_url == None):
self.COORD = "redis://[email protected]:6379"
else:
self.COORD = COORD_url
#Time variable to analyse timing responses
self.pilot_setup_time = 0
self.total_cu_time = 0
self.cu_sub_time = 0
self.cu_wait_time = 0
def startpilot(self):
#API currently supports single pilot applications
print 'Start pilot service'
p1=time.time()
self.pilot_compute_service = PilotComputeService(self.COORD)
#Mandatory service url,number_of_processes of the Pilot
self.pilot_compute_description = {
"service_url" : self.pilot["service_url"],
"number_of_processes" : self.pilot["number_of_processes"]
}
#Check for possible keys for Working Directory,
#Queue, Walltime. Take default values if not mentioned.
if self.pilot.has_key("working_directory"):
self.pilot_compute_description["working_directory"] = self.pilot["working_directory"]
if self.pilot.has_key("queue"):
self.pilot_compute_description["queue"] = self.pilot["queue"]
if self.pilot.has_key("walltime"):
self.pilot_compute_description["walltime"] = self.pilot["walltime"]
self.pilotjob = self.pilot_compute_service.create_pilot(pilot_compute_description=self.pilot_compute_description)
p2=time.time()
self.pilot_setup_time = p2 - p1
print 'Pilot successfully started'
def startCU(self):
print 'Starting Compute Unit submissions'
#p1,p2,p3,p4,p5 - Probes for time calculations
p1 = time.time()
self.compute_data_service = ComputeDataService()
self.compute_data_service.add_pilot_compute_service(self.pilot_compute_service)
for i in range(self.no_jobs):
print 'Submitting job %s on %s'%(i+1,self.pilot["service_url"])
p2 = time.time()
self.compute_unit_description = {
"executable":"/bin/sleep 4",
#"arguments" : ["$MYOUTPUT"],
#"environment" : {'MYOUTPUT':'"Hello from Simple API"'},
"number_of_processes" : 1,
"output" : "stdout.txt",
"error" : "stderr.txt"
}
self.compute_unit = self.compute_data_service.submit_compute_unit(self.compute_unit_description)
p3 = time.time()
self.cu_sub_time = self.cu_sub_time + (p3-p2)
print 'All Compute Units Submitted. Waiting for completion'
p4 = time.time()
self.compute_data_service.wait()
p5 = time.time()
self.total_cu_time = p5 - p1
self.cu_wait_time = p5 - p4
print 'All CU executions completed'
def terminate(self):
#Terminate all CUs and pilot, Display all timing responses
print 'Terminating pilot'
self.compute_data_service.cancel()
self.pilot_compute_service.cancel()
print 'No of jobs : ',self.no_jobs
print 'Pilot setup time : ',self.pilot_setup_time
print 'CU submission time : ',self.cu_sub_time
print 'CU wait time : ',self.cu_wait_time
print 'Total CU time : ',self.total_cu_time
示例5: DareManager
# 需要导入模块: from pilot import PilotComputeService [as 别名]
# 或者: from pilot.PilotComputeService import cancel [as 别名]
#.........这里部分代码省略.........
while(1):
darelogger.info(" Checking to start step %s " % step_id)
if self.check_to_start_step(step_id):
self.run_step(step_id)
break
else:
darelogger.info(" Cannot start this step %s sleeping..." % step_id)
time.sleep(10)
def run_step(self, step_id):
#self.step_run_lock.acquire()
#job started update status
this_su = self.workflow.step_units_repo[step_id].UnitInfo
self.updater.update_status(this_su['dare_web_id'], "%s in step %s" % ('Running', this_su['name']))
darelogger.info(" Started running %s " % step_id)
jobs = []
job_start_times = {}
job_states = {}
NUMBER_JOBS = len(self.workflow.step_units_repo[step_id].UnitInfo['compute_units'])
for cu_id in self.workflow.step_units_repo[step_id].UnitInfo['compute_units']:
compute_unit_desc = self.workflow.compute_units_repo[cu_id]
input_dus = compute_unit_desc.pop('input_data_units')
output_dus = compute_unit_desc.pop('output_data_units')
input_data_units = []
for du_id in input_dus:
input_data_units.append(self.compute_data_service.submit_data_unit(self.workflow.data_units_repo[du_id]))
output_data_units = []
for du_id in output_dus:
output_data_units.append(self.compute_data_service.submit_data_unit(self.workflow.data_units_repo[du_id]))
compute_unit_desc["input_data"] = [du.get_url() for du in input_data_units]
compute_unit_desc["output_data"] = [{du.get_url(): ['std*']} for du in output_data_units]
compute_unit = self.compute_data_service.submit_compute_unit(compute_unit_desc)
darelogger.info("Compute Unit: Description: \n%s" % (str(self.workflow.compute_units_repo[cu_id])))
jobs.append(compute_unit)
job_start_times[compute_unit] = time.time()
job_states[compute_unit] = compute_unit.get_state()
darelogger.debug("************************ All Jobs submitted ************************")
while 1:
finish_counter = 0
result_map = {}
for i in range(0, NUMBER_JOBS):
old_state = job_states[jobs[i]]
state = jobs[i].get_state()
if state in result_map == False:
result_map[state] = 0
result_map[state] = result_map.get(state, 0) + 1
#print "counter: " + str(i) + " job: " + str(jobs[i]) + " state: " + state
if old_state != state:
darelogger.debug("Job " + str(jobs[i]) + " changed from: " + old_state + " to " + state)
if old_state != state and self.has_finished(state) == True:
darelogger.info("%s step Job: " % (self.workflow.step_units_repo[step_id].UnitInfo['name']) + str(jobs[i]) + " Runtime: " + str(time.time() - job_start_times[jobs[i]]) + " s.")
if self.has_finished(state) == True:
finish_counter = finish_counter + 1
job_states[jobs[i]] = state
darelogger.debug("Current states: " + str(result_map))
time.sleep(5)
if finish_counter == NUMBER_JOBS:
break
self.workflow.step_units_repo[step_id].set_status(StepUnitStates.Done)
#self.compute_data_service.wait()
darelogger.debug(" Compute jobs for step %s complete" % step_id)
#runtime = time.time()-starttime
#all jobs done update status
self.updater.update_status(this_su['dare_web_id'], "%s is Done" % this_su['name'])
#self.step_run_lock.release()
def has_finished(self, state):
state = state.lower()
if state == "done" or state == "failed" or state == "canceled":
return True
else:
return False
def quit(self, message=None):
if message:
darelogger.debug(message)
darelogger.debug("Terminating steps")
for step, thread in self.step_threads.items():
darelogger.debug("Stoppping step %s" % step)
thread._Thread__stop()
darelogger.debug("Terminating Pilot Compute/Data Service")
try:
self.compute_data_service.cancel()
self.pilot_data_service.cancel()
self.pilot_compute_service.cancel()
except:
pass