本文整理汇总了Python中Cobalt.Proxy.ComponentProxy.run_jobs方法的典型用法代码示例。如果您正苦于以下问题:Python ComponentProxy.run_jobs方法的具体用法?Python ComponentProxy.run_jobs怎么用?Python ComponentProxy.run_jobs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Cobalt.Proxy.ComponentProxy
的用法示例。
在下文中一共展示了ComponentProxy.run_jobs方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _start_job
# 需要导入模块: from Cobalt.Proxy import ComponentProxy [as 别名]
# 或者: from Cobalt.Proxy.ComponentProxy import run_jobs [as 别名]
def _start_job(self, job, partition_list):
cqm = ComponentProxy(self.COMP_QUEUE_MANAGER)
try:
self.logger.info("trying to start job %d on partition %r" % (job.jobid, partition_list))
cqm.run_jobs([{'tag':"job", 'jobid':job.jobid}], partition_list)
except ComponentLookupError:
self.logger.error("failed to connect to queue manager")
return
self.started_jobs[job.jobid] = self.get_current_time()
示例2: _start_job
# 需要导入模块: from Cobalt.Proxy import ComponentProxy [as 别名]
# 或者: from Cobalt.Proxy.ComponentProxy import run_jobs [as 别名]
def _start_job(self, job, partition_list, resid=None):
"""Get the queue manager to start a job."""
cqm = ComponentProxy("queue-manager")
try:
self.logger.info("trying to start job %d on partition %r" % (job.jobid, partition_list))
cqm.run_jobs([{'tag':"job", 'jobid':job.jobid}], partition_list, None, resid)
except ComponentLookupError:
self.logger.error("failed to connect to queue manager")
return
self.started_jobs[job.jobid] = self.get_current_time()
示例3: eval
# 需要导入模块: from Cobalt.Proxy import ComponentProxy [as 别名]
# 或者: from Cobalt.Proxy.ComponentProxy import run_jobs [as 别名]
except xmlrpclib.Fault, flt:
if flt.faultCode == JobDeleteError.fault_code:
args = eval(flt.faultString)
exc = JobDeleteError(*args)
print >>sys.stderr, "Job %s: ERROR - %s" % (exc.jobid, exc.message)
raise SystemExit, 1
else:
raise
elif opts['run']:
location = opts['run']
part_list = ComponentProxy("system").get_partitions([{'name': location}])
if len(part_list) != 1:
print "Error: cannot find partition named '%s'" % location
raise SystemExit, 1
try:
response = cqm.run_jobs(spec, location.split(':'), whoami)
except xmlrpclib.Fault, flt:
if flt.faultCode == JobRunError.fault_code:
args = eval(flt.faultString)
exc = JobRunError(*args)
print >>sys.stderr, "Job %s: ERROR - %s" % (exc.jobid, exc.message)
raise SystemExit, 1
else:
raise
elif opts['addq']:
existing_queues = get_queues(cqm)
if [qname for qname in args if qname in
[q.get('name') for q in existing_queues]]:
print 'queue already exists'
response = ''
elif len(args) < 1:
示例4: test_something
# 需要导入模块: from Cobalt.Proxy import ComponentProxy [as 别名]
# 或者: from Cobalt.Proxy.ComponentProxy import run_jobs [as 别名]
def test_something(self):
logging.basicConfig()
try:
cqm = ComponentProxy("queue-manager")
except ComponentLookupError:
assert not "failed to connect to queue manager"
# add a queue
queues = cqm.add_queues([{"tag": "queue", "name": "default"}])
assert len(queues) == 1
# try adding a job to a queue that doesn't exist
try:
jobs = cqm.add_jobs([{"tag": "job", "queue": "jonx"}])
except xmlrpclib.Fault:
# trying to add a job to a queue that doesn't exist results in an xmlrpc Fault
pass
else:
assert not "Adding job to non-existent queue should raise xmlrpclib.Fault"
# get the list of available partitions and add them to the pool of managed partitions
try:
simulator = ComponentProxy("system")
except ComponentLookupError:
assert not "failed to connect to simulator"
for part_name in self.system._partitions:
partitions = simulator.add_partitions([{"tag": "partition", "name": part_name, "queue": "default"}])
assert len(partitions) == 1
partitions = simulator.set_partitions(
[{"tag": "partition", "name": part_name}], {"functional": True, "scheduled": True}
)
assert len(partitions) == 1
partitions = simulator.get_partitions([{"name": "*", "size": "*", "queue": "*"}])
assert len(partitions) > 0
# now run a real job
#
# 1. add the job to the default queue
# 2. obtain a partition for it to run on
# 3. start running it on that paritition
# 4. check that it started running
# 5. sleep for a bit, and then check that it's still running
# 6. sleep some more and then check to see if it actually finished running
nodes = partitions[0]["size"]
jobs = cqm.add_jobs(
[
{
"queue": "default",
"mode": "co",
"command": "/bin/ls",
"outputdir": os.getcwd(),
"walltime": 4,
"nodes": nodes,
"procs": nodes,
"args": [],
"user": "nobody",
"jobid": "*",
}
]
)
assert len(jobs) == 1
job = jobs[0]
jobid = job["jobid"]
job_location_args = [
{
"jobid": jobid,
"nodes": job["nodes"],
"queue": job["queue"],
"utility_score": 1,
"threshold": 1,
"walltime": job["walltime"],
"attrs": {},
}
]
locations = simulator.find_job_location(job_location_args, [])
assert locations.has_key(jobid)
location = locations[jobid]
cqm.run_jobs([{"jobid": jobid}], location)
r = cqm.get_jobs([{"jobid": jobid, "state": "*", "is_active": True}])
if not r:
assert not "the job didn't start"
time.sleep(20)
r = cqm.get_jobs([{"jobid": jobid, "state": "*", "is_active": True}])
if len(r) != 1:
assert not "the job has stopped running prematurely"
start_time = time.time()
while True:
r = cqm.get_jobs([{"jobid": jobid, "state": "*", "is_active": True}])
if r:
if time.time() - start_time > 240:
#.........这里部分代码省略.........