本文整理汇总了Python中toil.job.Job.addChild方法的典型用法代码示例。如果您正苦于以下问题:Python Job.addChild方法的具体用法?Python Job.addChild怎么用?Python Job.addChild使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类toil.job.Job
的用法示例。
在下文中一共展示了Job.addChild方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testConcurrencyWithDisk
# 需要导入模块: from toil.job import Job [as 别名]
# 或者: from toil.job.Job import addChild [as 别名]
def testConcurrencyWithDisk(self):
"""
Tests that the batch system is allocating disk resources properly
"""
tempDir = self._createTempDir('testFiles')
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.workDir = tempDir
from toil import physicalDisk
availableDisk = physicalDisk('', toilWorkflowDir=options.workDir)
options.batchSystem = self.batchSystemName
counterPath = os.path.join(tempDir, 'counter')
resetCounters(counterPath)
value, maxValue = getCounters(counterPath)
assert (value, maxValue) == (0, 0)
root = Job()
# Physically, we're asking for 50% of disk and 50% of disk + 500bytes in the two jobs. The
# batchsystem should not allow the 2 child jobs to run concurrently.
root.addChild(Job.wrapFn(measureConcurrency, counterPath, self.sleepTime, cores=1,
memory='1M', disk=availableDisk/2))
root.addChild(Job.wrapFn(measureConcurrency, counterPath, self.sleepTime, cores=1,
memory='1M', disk=(availableDisk / 2) + 500))
Job.Runner.startToil(root, options)
_, maxValue = getCounters(counterPath)
self.assertEqual(maxValue, 1)
示例2: testPromisedRequirementStatic
# 需要导入模块: from toil.job import Job [as 别名]
# 或者: from toil.job.Job import addChild [as 别名]
def testPromisedRequirementStatic(self):
"""
Asserts that promised core resources are allocated properly using a static DAG
"""
for coresPerJob in self.allocatedCores:
tempDir = self._createTempDir('testFiles')
counterPath = self.getCounterPath(tempDir)
root = Job()
one = Job.wrapFn(getOne, cores=0.1, memory='32M', disk='1M')
thirtyTwoMb = Job.wrapFn(getThirtyTwoMb, cores=0.1, memory='32M', disk='1M')
root.addChild(one)
root.addChild(thirtyTwoMb)
for _ in range(self.cpuCount):
root.addFollowOn(Job.wrapFn(batchSystemTest.measureConcurrency, counterPath,
cores=PromisedRequirement(lambda x: x * coresPerJob, one.rv()),
memory=PromisedRequirement(thirtyTwoMb.rv()),
disk='1M'))
Job.Runner.startToil(root, self.getOptions(tempDir))
_, maxValue = batchSystemTest.getCounters(counterPath)
self.assertEqual(maxValue, self.cpuCount / coresPerJob)
示例3: testNestedResourcesDoNotBlock
# 需要导入模块: from toil.job import Job [as 别名]
# 或者: from toil.job.Job import addChild [as 别名]
def testNestedResourcesDoNotBlock(self):
"""
Resources are requested in the order Memory > Cpu > Disk.
Test that inavailability of cpus for one job that is scheduled does not block another job
that can run.
"""
tempDir = self._createTempDir('testFiles')
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.workDir = tempDir
options.maxCores = 4
from toil import physicalMemory
availableMemory = physicalMemory()
options.batchSystem = self.batchSystemName
outFile = os.path.join(tempDir, 'counter')
open(outFile, 'w').close()
root = Job()
blocker = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=30, writeVal='b',
cores=2, memory='1M', disk='1M')
firstJob = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=5, writeVal='fJ',
cores=1, memory='1M', disk='1M')
secondJob = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=10,
writeVal='sJ', cores=1, memory='1M', disk='1M')
# Should block off 50% of memory while waiting for it's 3 cores
firstJobChild = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=0,
writeVal='fJC', cores=3, memory=int(availableMemory/2), disk='1M')
# These two shouldn't be able to run before B because there should be only
# (50% of memory - 1M) available (firstJobChild should be blocking 50%)
secondJobChild = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=5,
writeVal='sJC', cores=2, memory=int(availableMemory/1.5),
disk='1M')
secondJobGrandChild = Job.wrapFn(_resourceBlockTestAuxFn, outFile=outFile, sleepTime=5,
writeVal='sJGC', cores=2, memory=int(availableMemory/1.5),
disk='1M')
root.addChild(blocker)
root.addChild(firstJob)
root.addChild(secondJob)
firstJob.addChild(firstJobChild)
secondJob.addChild(secondJobChild)
secondJobChild.addChild(secondJobGrandChild)
"""
The tree is:
root
/ | \
b fJ sJ
| |
fJC sJC
|
sJGC
But the order of execution should be
root > b , fJ, sJ > sJC > sJGC > fJC
since fJC cannot run till bl finishes but sJC and sJGC can(fJC blocked by disk). If the
resource acquisition is written properly, then fJC which is scheduled before sJC and sJGC
should not block them, and should only run after they finish.
"""
Job.Runner.startToil(root, options)
with open(outFile) as oFH:
outString = oFH.read()
# The ordering of b, fJ and sJ is non-deterministic since they are scheduled at the same
# time. We look for all possible permutations.
possibleStarts = tuple([''.join(x) for x in itertools.permutations(['b', 'fJ', 'sJ'])])
assert outString.startswith(possibleStarts)
assert outString.endswith('sJCsJGCfJC')