本文整理汇总了Python中pilot.PilotComputeService类的典型用法代码示例。如果您正苦于以下问题:Python PilotComputeService类的具体用法?Python PilotComputeService怎么用?Python PilotComputeService使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PilotComputeService类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: submit_pilot_by_description
def submit_pilot_by_description(self, coordination_url="redis://localhost/", pilot_compute_description={}):
pilot_compute_service = PilotComputeService(coordination_url=coordination_url)
pilot_compute = pilot_compute_service.create_pilot(pilot_compute_description=pilot_compute_description)
pilot_url = pilot_compute.get_url()
self.pilots.append(pilot_url)
print("Started Pilot: %s"%(pilot_url))
self.__persist()
示例2: start_run_pilot
def start_run_pilot(pilot_id, coordination_url=COORD_URL):
pilot = DareBigJobPilot.objects.get(id=pilot_id)
pilot_compute_service = PilotComputeService(coordination_url=COORD_URL)
print pilot.get_pilot_info()
pilot_compute = pilot_compute_service.create_pilot(pilot_compute_description=pilot.get_pilot_info())
pilot.pilot_url = pilot_compute.get_url()
pilot.status = "Submitted"
pilot.save()
print("Started Pilot: %s " % (pilot.pilot_url), pilot.id)
示例3: start_pilotcompute
def start_pilotcompute(pilot_compute_description=None):
pilot_compute_service = PilotComputeService(coordination_url=COORDINATION_URL)
if pilot_compute_description==None:
pilot_compute_description = {
"service_url": 'fork://localhost',
"number_of_processes": 2,
"working_directory": os.getcwd() + "/work/",
}
pilotcompute = pilot_compute_service.create_pilot(pilot_compute_description=pilot_compute_description)
return pilotcompute
示例4: __init__
class simple:
def __init__(self,no_jobs,pilot,COORD_url=None):
self.no_jobs = no_jobs
self.pilot = pilot
if(COORD_url == None):
self.COORD = "redis://[email protected]:6379"
else:
self.COORD = COORD_url
def check(self):
print 'Checkup time'
print self.COORD
def startpilot(self):
print 'Start pilot service'
self.pilot_compute_service = PilotComputeService(self.COORD)
self.pilot_compute_description = {
"service_url" : self.pilot["service_url"]
}
if self.pilot.has_key("number_of_processes"):
self.pilot_compute_description["number_of_processes"] = self.pilot["number_of_processes"]
if self.pilot.has_key("working_directory"):
self.pilot_compute_description["working_directory"] = self.pilot["working_directory"]
if self.pilot.has_key("queue"):
self.pilot_compute_description["queue"] = self.pilot["queue"]
if self.pilot.has_key("walltime"):
self.pilot_compute_description["walltime"] = self.pilot["walltime"]
self.pilotjob = self.pilot_compute_service.create_pilot(pilot_compute_description=self.pilot_compute_description)
print 'Pilot successfully started'
def startCU(self):
print 'Starting Compute Unit submissions'
self.compute_data_service = ComputeDataService()
self.compute_data_service.add_pilot_compute_service(self.pilot_compute_service)
for i in range(self.no_jobs):
print 'Submitting job %s on %s'%(i+1,self.pilot["service_url"])
self.compute_unit_description = {
"executable":"/bin/echo",
"arguments" : ["$MYOUTPUT"],
"environment" : {'MYOUTPUT':'"Hello from Simple API"'},
"number_of_processes" : 1,
"output" : "stdout.txt",
"error" : "stderr.txt"
}
self.compute_unit = self.compute_data_service.submit_compute_unit(self.compute_unit_description)
print 'All Compute Units Submitted. Waiting for completion'
self.compute_data_service.wait()
print 'All CU executions completed'
def terminate(self):
print 'Terminating pilot'
self.compute_data_service.cancel()
self.pilot_compute_service.cancel()
示例5: setupJob
def setupJob(self):
"""
If RE_SETUP='yes' creates and populates subdirectories, one for each
replica called r0, r1, ..., rN in the working directory. Otherwise
reads saved state from the ENGINE_BASENAME.stat file.
To populate each directory calls _buildInpFile(k) to prepare the MD
engine input file for replica k. Also creates soft links to the working
directory for the accessory files specified in ENGINE_INPUT_EXTFILES.
"""
#pilotjob: Initialize PilotJob at given COORDINATION_URL (CU)
self.pj = PilotComputeService(self.keywords.get('COORDINATION_URL'))
#pilotjob: Initialize PilotJob Data service (DU)
self.cds=ComputeDataService()
#pilotjob: Launch the PilotJob at the given COORDINATION_URL
self.launch_pilotjob()
if (self.keywords.get('RE_SETUP') is not None and
self.keywords.get('RE_SETUP').lower() == 'yes'):
# create replicas directories r1, r2, etc.
for k in range(self.nreplicas):
repl_dir = 'r%d'%k
if os.path.exists(repl_dir):
_exit('Replica directories already exist. Either turn off '
'RE_SETUP or remove the directories.')
else:
os.mkdir('r%d'%k)
# create links for external files
if self.extfiles is not None:
for file in self.extfiles:
for k in range(self.nreplicas):
self._linkReplicaFile(file,file,k)
# create status table
self.status = [{'stateid_current': k, 'running_status': 'W',
'cycle_current': 1} for k in range(self.nreplicas)]
# save status tables
self._write_status()
# create input files no. 1
for k in range(self.nreplicas):
self._buildInpFile(k)
self.updateStatus()
else:
self._read_status()
self.updateStatus(restart=True)
# if self.remote:
# self._setup_remote_workdir()
self.print_status()
#at this point all replicas should be in wait state
for k in range(self.nreplicas):
if self.status[k]['running_status'] != 'W':
_exit('Internal error after restart. Not all jobs are in wait '
'state.')
示例6: startpilot
def startpilot(self):
print 'Start pilot service'
self.pilot_compute_service = PilotComputeService(self.COORD)
self.pilot_compute_description = {
"service_url" : self.pilot["service_url"]
}
if self.pilot.has_key("number_of_processes"):
self.pilot_compute_description["number_of_processes"] = self.pilot["number_of_processes"]
if self.pilot.has_key("working_directory"):
self.pilot_compute_description["working_directory"] = self.pilot["working_directory"]
if self.pilot.has_key("queue"):
self.pilot_compute_description["queue"] = self.pilot["queue"]
if self.pilot.has_key("walltime"):
self.pilot_compute_description["walltime"] = self.pilot["walltime"]
self.pilotjob = self.pilot_compute_service.create_pilot(pilot_compute_description=self.pilot_compute_description)
print 'Pilot successfully started'
示例7: start
def start(self):
# try:
from pilot import PilotComputeService, PilotDataService, ComputeDataService, State
darelogger.info("Create Compute Engine service ")
self.pilot_compute_service = PilotComputeService(coordination_url=COORDINATION_URL)
self.pilot_data_service = PilotDataService()
for compute_pilot, desc in self.workflow.compute_pilot_repo.items():
self.compute_pilot_service_repo.append(self.pilot_compute_service.create_pilot(pilot_compute_description=desc))
#for data_pilot, desc in self.workflow.data_pilot_repo.items():
# self.data_pilot_service_repo.append(self.pilot_data_service.create_pilot(pilot_data_description=desc))
self.compute_data_service = ComputeDataService()
self.compute_data_service.add_pilot_compute_service(self.pilot_compute_service)
# self.compute_data_service.add_pilot_data_service(self.pilot_data_service)
self.step_thread= {}
### run the steps
self.step_start_lock=threading.RLock()
self.step_run_lock=threading.RLock()
for step_id in self.workflow.step_units_repo.keys():
darelogger.info(" Sumitted step %s "%step_id)
self.step_start_lock.acquire()
self.start_thread_step_id =step_id
self.step_start_lock.release()
self.step_thread[step_id] = threading.Thread(target=self.start_step)
self.step_thread[step_id].start()
while(1):
count_step = [v.is_alive() for k,v in self.step_thread.items()]
darelogger.info('count_step %s'%count_step)
if not True in count_step and len(count_step)>0:
break
time.sleep(10)
darelogger.info(" All Steps Done processing")
self.cancel()
示例8: startpilot
def startpilot(self):
#API currently supports single pilot applications
print 'Start pilot service'
p1=time.time()
self.pilot_compute_service = PilotComputeService(self.COORD)
#Mandatory service url,number_of_processes of the Pilot
self.pilot_compute_description = {
"service_url" : self.pilot["service_url"],
"number_of_processes" : self.pilot["number_of_processes"]
}
#Check for possible keys for Working Directory,
#Queue, Walltime. Take default values if not mentioned.
if self.pilot.has_key("working_directory"):
self.pilot_compute_description["working_directory"] = self.pilot["working_directory"]
if self.pilot.has_key("queue"):
self.pilot_compute_description["queue"] = self.pilot["queue"]
if self.pilot.has_key("walltime"):
self.pilot_compute_description["walltime"] = self.pilot["walltime"]
self.pilotjob = self.pilot_compute_service.create_pilot(pilot_compute_description=self.pilot_compute_description)
p2=time.time()
self.pilot_setup_time = p2 - p1
print 'Pilot successfully started'
示例9: print
import os
import time
from pilot import PilotComputeService, State
#PilotDataService, ComputeDataService, State
RUN_ID = "BJ-"
NUMBER_JOBS = 2
COORDINATION_URL = "redis://[email protected]:6379"
if __name__ == "__main__":
print ("Start")
sys.stdout.flush()
if len(sys.argv) > 1:
RUN_ID = sys.argv[1]
pilot_compute_service = PilotComputeService(COORDINATION_URL)
# create pilot job service
pilot_compute_description = {
"service_url": 'pbs+ssh://trestles.sdsc.edu',
"number_of_processes": 1,
"queue": "shared",
"project": "TG-MCB090174",
"working_directory": "/home/jweichel/agent",
"walltime": 90
# 'affinity_datacenter_label': "trestles",
# 'affinity_machine_label': "mymachine"
}
# initiate a pilot job
pilotjob = pilot_compute_service.create_pilot(pilot_compute_description)
示例10: async_re_job
#.........这里部分代码省略.........
self.verbose = True
else:
self.verbose = False
def _linkReplicaFile(self, link_filename, real_filename, repl):
"""
Link the file at real_filename to the name at link_filename in the
directory belonging to the given replica. If a file is already linked
to this name (e.g. from a previous cycle), remove it first.
"""
os.chdir("r%d" % repl)
# Check that the file to be linked actually exists.
# TODO: This is not robust to absolute path specifications.
real_filename = "../%s" % real_filename
if not os.path.exists(real_filename):
self._exit("No such file: %s" % real_filename)
# Make/re-make the symlink.
if os.path.exists(link_filename):
os.remove(link_filename)
os.symlink(real_filename, link_filename)
os.chdir("..")
def setupJob(self):
"""
If RE_SETUP='yes' creates and populates subdirectories, one for each
replica called r0, r1, ..., rN in the working directory. Otherwise
reads saved state from the ENGINE_BASENAME.stat file.
To populate each directory calls _buildInpFile(k) to prepare the MD
engine input file for replica k. Also creates soft links to the working
directory for the accessory files specified in ENGINE_INPUT_EXTFILES.
"""
# pilotjob: Initialize PilotJob at given COORDINATION_URL (CU)
self.pj = PilotComputeService(self.keywords.get("COORDINATION_URL"))
# pilotjob: Initialize PilotJob Data service (DU)
self.cds = ComputeDataService()
# pilotjob: Launch the PilotJob at the given COORDINATION_URL
self.launch_pilotjob()
if self.keywords.get("RE_SETUP") is not None and self.keywords.get("RE_SETUP").lower() == "yes":
# create replicas directories r1, r2, etc.
for k in range(self.nreplicas):
repl_dir = "r%d" % k
if os.path.exists(repl_dir):
_exit("Replica directories already exist. Either turn off " "RE_SETUP or remove the directories.")
else:
os.mkdir("r%d" % k)
# create links for external files
if self.extfiles is not None:
for file in self.extfiles:
for k in range(self.nreplicas):
self._linkReplicaFile(file, file, k)
# create status table
self.status = [
{"stateid_current": k, "running_status": "W", "cycle_current": 1} for k in range(self.nreplicas)
]
# save status tables
self._write_status()
# create input files no. 1
for k in range(self.nreplicas):
self._buildInpFile(k)
self.updateStatus()
else:
self._read_status()
self.updateStatus(restart=True)
示例11: PilotComputeService
#base_dir = "../data1"
#url_list = os.listdir(base_dir)
# make absolute paths
#absolute_url_list = [os.path.join(base_dir, i) for i in url_list]
data_unit_description = {
"file_urls": [os.path.join(os.getcwd(), "test.txt")],
"affinity_datacenter_label": "us-east-1",
"affinity_machine_label": ""
}
# submit pilot data to a pilot store
input_data_unit = pd.submit_data_unit(data_unit_description)
input_data_unit.wait()
logger.info("Data Unit URL: " + input_data_unit.get_url())
pilot_compute_service = PilotComputeService(coordination_url=COORDINATION_URL)
pilot_compute_description_amazon_west = {
"service_url": 'ec2+ssh://aws.amazon.com',
"number_of_processes": 1,
'affinity_datacenter_label': "us-google",
'affinity_machine_label': "",
# cloud specific attributes
#"vm_id":"ami-d7f742be",
"vm_id": "ami-5c3b1b19",
"vm_ssh_username":"ubuntu",
"vm_ssh_keyname":"MyKey",
"vm_ssh_keyfile":"/Users/luckow/.ssh/id_rsa",
"vm_type":"t1.micro",
"region" : "us-west-1",
"access_key_id":"AKIAJPGNDJRYIG5LIEUA",
示例12: PilotComputeService
""" Example application demonstrating how to submit a jobs with PilotJob. """
import os
import time
import sys
from pilot import PilotComputeService, ComputeDataService, State
### This is the number of jobs you want to run
NUMBER_JOBS=4
COORDINATION_URL = "redis://[email protected]:6379"
if __name__ == "__main__":
start_time=time.time()
pilot_compute_service = PilotComputeService(COORDINATION_URL)
pilot_compute_description=[]
pilot_compute_description.append({ "service_url": "ssh://localhost",
"number_of_processes": 12,
"allocation": "TG-MCB090174",
"queue": "normal",
"processes_per_node":12,
"working_directory": os.getcwd()+"/agent",
"walltime":10,
})
for pcd in pilot_compute_description:
pilot_compute_service.create_pilot(pilot_compute_description=pcd)
compute_data_service = ComputeDataService()
示例13: __init__
class simple:
#Input values from user - Number of jobs, Pilot Description, Redis Server Coordination URL
def __init__(self,no_jobs,pilot,COORD_url=None):
self.no_jobs = no_jobs
self.pilot = pilot
if(COORD_url == None):
self.COORD = "redis://[email protected]:6379"
else:
self.COORD = COORD_url
#Time variable to analyse timing responses
self.pilot_setup_time = 0
self.total_cu_time = 0
self.cu_sub_time = 0
self.cu_wait_time = 0
def startpilot(self):
#API currently supports single pilot applications
print 'Start pilot service'
p1=time.time()
self.pilot_compute_service = PilotComputeService(self.COORD)
#Mandatory service url,number_of_processes of the Pilot
self.pilot_compute_description = {
"service_url" : self.pilot["service_url"],
"number_of_processes" : self.pilot["number_of_processes"]
}
#Check for possible keys for Working Directory,
#Queue, Walltime. Take default values if not mentioned.
if self.pilot.has_key("working_directory"):
self.pilot_compute_description["working_directory"] = self.pilot["working_directory"]
if self.pilot.has_key("queue"):
self.pilot_compute_description["queue"] = self.pilot["queue"]
if self.pilot.has_key("walltime"):
self.pilot_compute_description["walltime"] = self.pilot["walltime"]
self.pilotjob = self.pilot_compute_service.create_pilot(pilot_compute_description=self.pilot_compute_description)
p2=time.time()
self.pilot_setup_time = p2 - p1
print 'Pilot successfully started'
def startCU(self):
print 'Starting Compute Unit submissions'
#p1,p2,p3,p4,p5 - Probes for time calculations
p1 = time.time()
self.compute_data_service = ComputeDataService()
self.compute_data_service.add_pilot_compute_service(self.pilot_compute_service)
for i in range(self.no_jobs):
print 'Submitting job %s on %s'%(i+1,self.pilot["service_url"])
p2 = time.time()
self.compute_unit_description = {
"executable":"/bin/sleep 4",
#"arguments" : ["$MYOUTPUT"],
#"environment" : {'MYOUTPUT':'"Hello from Simple API"'},
"number_of_processes" : 1,
"output" : "stdout.txt",
"error" : "stderr.txt"
}
self.compute_unit = self.compute_data_service.submit_compute_unit(self.compute_unit_description)
p3 = time.time()
self.cu_sub_time = self.cu_sub_time + (p3-p2)
print 'All Compute Units Submitted. Waiting for completion'
p4 = time.time()
self.compute_data_service.wait()
p5 = time.time()
self.total_cu_time = p5 - p1
self.cu_wait_time = p5 - p4
print 'All CU executions completed'
def terminate(self):
#Terminate all CUs and pilot, Display all timing responses
print 'Terminating pilot'
self.compute_data_service.cancel()
self.pilot_compute_service.cancel()
print 'No of jobs : ',self.no_jobs
print 'Pilot setup time : ',self.pilot_setup_time
print 'CU submission time : ',self.cu_sub_time
print 'CU wait time : ',self.cu_wait_time
print 'Total CU time : ',self.total_cu_time
示例14: PilotComputeService
# Create Data Unit Description
#base_dir = "../data1"
#url_list = os.listdir(base_dir)
# make absolute paths
#absolute_url_list = [os.path.join(base_dir, i) for i in url_list]
data_unit_description = {
"file_urls": [os.path.join(os.getcwd(), "test.txt")],
"affinity_datacenter_label": "us-east-1",
"affinity_machine_label": ""
}
# submit pilot data to a pilot store
input_data_unit = pd.submit_data_unit(data_unit_description)
input_data_unit.wait()
logger.info("Data Unit URL: " + input_data_unit.get_url())
pilot_compute_service = PilotComputeService(coordination_url=COORDINATION_URL)
pilot_compute_description_euca_india = {
"service_url": 'euca+ssh://149.165.146.135:8773/services/Eucalyptus',
#"service_url": 'fork://localhost',
"number_of_processes": 1,
'affinity_datacenter_label': "us-east",
'affinity_machine_label': "",
#'working_directory': os.getcwd(),
# cloud specific attributes
"vm_id":"emi-36913A82",
"vm_ssh_username":"root",
"vm_ssh_keyname":"luckow",
"vm_ssh_keyfile":"/Users/luckow/.ssh/eucakey-india",
"vm_type":"c1.xlarge",
"access_key_id":"8MCXRAMXMHDYKWNKXZ8WF",
示例15: DareManager
class DareManager(object):
"""DARE manager:
- reads different configuration files
- submits compute/data units as that in various steps"""
"""Constructor"""
def __init__(self, conffile="/path/to/conf/file"):
"" ""
self.dare_conffile = conffile
self.workflow = PrepareWorkFlow(self.dare_conffile)
self.updater = Updater(self.workflow.update_site_db, self.workflow.dare_web_id)
self.dare_id = "dare-" + str(uuid.uuid1())
self.data_pilot_service_repo = []
self.step_threads = {}
try:
self.start()
except KeyboardInterrupt:
self.quit(message='KeyboardInterrupt')
def start(self):
darelogger.info("Creating Compute Engine service ")
self.pilot_compute_service = PilotComputeService(coordination_url=COORDINATION_URL)
self.pilot_data_service = PilotDataService(coordination_url=COORDINATION_URL)
for compute_pilot, desc in self.workflow.compute_pilot_repo.items():
self.pilot_compute_service.create_pilot(pilot_compute_description=desc)
for data_pilot, desc in self.workflow.data_pilot_repo.items():
self.data_pilot_service_repo.append(self.pilot_data_service.create_pilot(pilot_data_description=desc))
self.compute_data_service = ComputeDataServiceDecentral()
self.compute_data_service.add_pilot_compute_service(self.pilot_compute_service)
self.compute_data_service.add_pilot_data_service(self.pilot_data_service)
### run the steps
self.step_start_lock = threading.RLock()
self.step_run_lock = threading.RLock()
for step_id in self.workflow.step_units_repo.keys():
darelogger.info(" Sumitted step %s " % step_id)
self.step_start_lock.acquire()
self.start_thread_step_id = step_id
self.step_start_lock.release()
self.step_threads[step_id] = threading.Thread(target=self.start_step)
self.step_threads[step_id].start()
while(1):
count_step = [v.is_alive() for k, v in self.step_threads.items()]
darelogger.info('count_step %s' % count_step)
if not True in count_step and len(count_step) > 0:
break
time.sleep(10)
darelogger.info(" All Steps Done processing")
self.quit(message='quit gracefully')
def check_to_start_step(self, step_id):
flags = []
darelogger.info(self.workflow.step_units_repo[step_id].UnitInfo['start_after_steps'])
if self.workflow.step_units_repo[step_id].get_status() == StepUnitStates.New:
for dep_step_id in self.workflow.step_units_repo[step_id].UnitInfo['start_after_steps']:
if self.workflow.step_units_repo[dep_step_id].get_status() != StepUnitStates.Done:
flags.append(False)
darelogger.info(self.workflow.step_units_repo[dep_step_id].get_status())
return False if False in flags else True
def start_step(self):
self.step_start_lock.acquire()
step_id = self.start_thread_step_id
self.step_start_lock.release()
while(1):
darelogger.info(" Checking to start step %s " % step_id)
if self.check_to_start_step(step_id):
self.run_step(step_id)
break
else:
darelogger.info(" Cannot start this step %s sleeping..." % step_id)
time.sleep(10)
def run_step(self, step_id):
#self.step_run_lock.acquire()
#job started update status
this_su = self.workflow.step_units_repo[step_id].UnitInfo
self.updater.update_status(this_su['dare_web_id'], "%s in step %s" % ('Running', this_su['name']))
darelogger.info(" Started running %s " % step_id)
jobs = []
job_start_times = {}
job_states = {}
NUMBER_JOBS = len(self.workflow.step_units_repo[step_id].UnitInfo['compute_units'])
for cu_id in self.workflow.step_units_repo[step_id].UnitInfo['compute_units']:
compute_unit_desc = self.workflow.compute_units_repo[cu_id]
input_dus = compute_unit_desc.pop('input_data_units')
output_dus = compute_unit_desc.pop('output_data_units')
input_data_units = []
for du_id in input_dus:
input_data_units.append(self.compute_data_service.submit_data_unit(self.workflow.data_units_repo[du_id]))
#.........这里部分代码省略.........