本文整理汇总了Python中Cobalt.Proxy.ComponentProxy.find_job_location方法的典型用法代码示例。如果您正苦于以下问题:Python ComponentProxy.find_job_location方法的具体用法?Python ComponentProxy.find_job_location怎么用?Python ComponentProxy.find_job_location使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Cobalt.Proxy.ComponentProxy
的用法示例。
在下文中一共展示了ComponentProxy.find_job_location方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_something
# 需要导入模块: from Cobalt.Proxy import ComponentProxy [as 别名]
# 或者: from Cobalt.Proxy.ComponentProxy import find_job_location [as 别名]
def test_something(self):
logging.basicConfig()
try:
cqm = ComponentProxy("queue-manager")
except ComponentLookupError:
assert not "failed to connect to queue manager"
# add a queue
queues = cqm.add_queues([{"tag": "queue", "name": "default"}])
assert len(queues) == 1
# try adding a job to a queue that doesn't exist
try:
jobs = cqm.add_jobs([{"tag": "job", "queue": "jonx"}])
except xmlrpclib.Fault:
# trying to add a job to a queue that doesn't exist results in an xmlrpc Fault
pass
else:
assert not "Adding job to non-existent queue should raise xmlrpclib.Fault"
# get the list of available partitions and add them to the pool of managed partitions
try:
simulator = ComponentProxy("system")
except ComponentLookupError:
assert not "failed to connect to simulator"
for part_name in self.system._partitions:
partitions = simulator.add_partitions([{"tag": "partition", "name": part_name, "queue": "default"}])
assert len(partitions) == 1
partitions = simulator.set_partitions(
[{"tag": "partition", "name": part_name}], {"functional": True, "scheduled": True}
)
assert len(partitions) == 1
partitions = simulator.get_partitions([{"name": "*", "size": "*", "queue": "*"}])
assert len(partitions) > 0
# now run a real job
#
# 1. add the job to the default queue
# 2. obtain a partition for it to run on
# 3. start running it on that paritition
# 4. check that it started running
# 5. sleep for a bit, and then check that it's still running
# 6. sleep some more and then check to see if it actually finished running
nodes = partitions[0]["size"]
jobs = cqm.add_jobs(
[
{
"queue": "default",
"mode": "co",
"command": "/bin/ls",
"outputdir": os.getcwd(),
"walltime": 4,
"nodes": nodes,
"procs": nodes,
"args": [],
"user": "nobody",
"jobid": "*",
}
]
)
assert len(jobs) == 1
job = jobs[0]
jobid = job["jobid"]
job_location_args = [
{
"jobid": jobid,
"nodes": job["nodes"],
"queue": job["queue"],
"utility_score": 1,
"threshold": 1,
"walltime": job["walltime"],
"attrs": {},
}
]
locations = simulator.find_job_location(job_location_args, [])
assert locations.has_key(jobid)
location = locations[jobid]
cqm.run_jobs([{"jobid": jobid}], location)
r = cqm.get_jobs([{"jobid": jobid, "state": "*", "is_active": True}])
if not r:
assert not "the job didn't start"
time.sleep(20)
r = cqm.get_jobs([{"jobid": jobid, "state": "*", "is_active": True}])
if len(r) != 1:
assert not "the job has stopped running prematurely"
start_time = time.time()
while True:
r = cqm.get_jobs([{"jobid": jobid, "state": "*", "is_active": True}])
if r:
if time.time() - start_time > 240:
#.........这里部分代码省略.........