本文整理汇总了Python中toil.job.Job类的典型用法代码示例。如果您正苦于以下问题:Python Job类的具体用法?Python Job怎么用?Python Job使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Job类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: makeWorkflow
def makeWorkflow():
job = Job()
r1 = job.addService(TestServiceSerialization("woot1"))
r2 = job.addService(TestServiceSerialization("woot2"))
r3 = job.addService(TestServiceSerialization("woot3"))
job.addChildFn(fnTest, [ r1, r2, r3 ], outFile)
return job
示例2: __init__
def __init__(self, tree, event, sleepTime, startTime, cpu):
Job.__init__(self, cpu=cpu)
self.tree = tree
self.event = event
self.sleepTime = sleepTime
self.startTime = startTime
self.cpu = cpu
示例3: main
def main():
"""Restarts a toil workflow.
"""
##########################################
#Construct the arguments.
##########################################
parser = getBasicOptionParser()
parser.add_argument("--version", action='version', version=version)
parser.add_argument("jobStore", type=str,
help=("Store in which to place job management files \
and the global accessed temporary files"
"(If this is a file path this needs to be globally accessible "
"by all machines running jobs).\n"
"If the store already exists and restart is false an"
" ExistingJobStoreException exception will be thrown."))
options = parseBasicOptions(parser)
##########################################
#Now run the toil construction/leader
##########################################
setLoggingFromOptions(options)
options.restart = True
with setupToil(options) as (config, batchSystem, jobStore):
jobStore.clean(Job._loadRootJob(jobStore))
mainLoop(config, batchSystem, jobStore, Job._loadRootJob(jobStore))
示例4: testEncapsulation
def testEncapsulation(self):
"""
Tests the Job.encapsulation method, which uses the EncapsulationJob
class.
"""
# Temporary file
outFile = getTempFile(rootDir=self._createTempDir())
try:
# Encapsulate a job graph
a = T.wrapJobFn(encapsulatedJobFn, "A", outFile)
a = a.encapsulate()
# Now add children/follow to the encapsulated graph
d = T.wrapFn(f, a.rv(), outFile)
e = T.wrapFn(f, d.rv(), outFile)
a.addChild(d)
a.addFollowOn(e)
# Create the runner for the workflow.
options = T.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logLevel = "INFO"
# Run the workflow, the return value being the number of failed jobs
T.Runner.startToil(a, options)
# Check output
self.assertEquals(open(outFile, 'r').readline(), "ABCDE")
finally:
os.remove(outFile)
示例5: testJobConcurrency
def testJobConcurrency(self):
"""
Tests that the batch system is allocating core resources properly for concurrent tasks.
"""
for cores_per_job in self.allocated_cores:
temp_dir = self._createTempDir('testFiles')
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.workDir = temp_dir
options.maxCores = self.cpu_count
options.batchSystem = self.batchSystemName
counter_path = os.path.join(temp_dir, 'counter')
resetCounters(counter_path)
value, max_value = getCounters(counter_path)
assert (value, max_value) == (0, 0)
root = Job()
for _ in range(self.cpu_count):
root.addFollowOn(Job.wrapFn(measureConcurrency, counter_path, self.sleep_time,
cores=cores_per_job, memory='1M', disk='1Mi'))
Job.Runner.startToil(root, options)
_, max_value = getCounters(counter_path)
self.assertEqual(max_value, self.cpu_count / cores_per_job)
示例6: __init__
def __init__(self, tree, event, sleepTime, startTime, cores):
Job.__init__(self, cores=cores)
self.tree = tree
self.event = event
self.sleepTime = sleepTime
self.startTime = startTime
self.cores = cores
示例7: testEncapsulation
def testEncapsulation(self):
"""
Tests the Job.encapsulation method, which uses the EncapsulationJob
class.
"""
#Temporary file
outFile = getTempFile(rootDir=os.getcwd())
#Make a job graph
a = T.wrapFn(f, "A", outFile)
b = a.addChildFn(f, a.rv(), outFile)
c = a.addFollowOnFn(f, b.rv(), outFile)
#Encapsulate it
a = a.encapsulate()
#Now add children/follow to the encapsulated graph
d = T.wrapFn(f, c.rv(), outFile)
e = T.wrapFn(f, d.rv(), outFile)
a.addChild(d)
a.addFollowOn(e)
#Create the runner for the workflow.
options = T.Runner.getDefaultOptions()
options.logLevel = "INFO"
#Run the workflow, the return value being the number of failed jobs
self.assertEquals(T.Runner.startToil(a, options), 0)
T.Runner.cleanup(options) #This removes the jobStore
#Check output
self.assertEquals(open(outFile, 'r').readline(), "ABCDE")
#Cleanup
os.remove(outFile)
示例8: testCacheEjection
def testCacheEjection(self):
"""
Test cache always always ejects least recently created file
"""
# Makes three jobs that create an output file each which they write to filestore. The combined size of any two
# files is always less that cacheSize but the combined size of all 3 is always more so 1 file always has to be
# ejected. Test to ensure that A is always ejected regardless of size.
# Make a temp directory for the test
test_dir = self._createTempDir()
for test in xrange(10):
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.logLevel = "DEBUG"
options.cacheSize = 100000
options.retryCount=100
options.badWorker=0.5
options.badWorkerFailInterval = 1.0
# Create a temp file to write teh test results
handle, logfile = tempfile.mkstemp(dir=test_dir)
os.close(handle)
file_sizes = [50000, 40000, 30000]
# Randomize to (potentially) test all combinations
random.shuffle(file_sizes)
# Run the workflow. A, B and C do teh cache operations, and D prints test status to tempFile
A = Job.wrapJobFn(fileTestJob, file_sizes[0])
B = Job.wrapJobFn(fileTestJob, file_sizes[0])
C = Job.wrapJobFn(fileTestJob, file_sizes[0])
D = Job.wrapJobFn(fileTestCache, A.rv(), B.rv(), C.rv(), logfile)
A.addChild(B)
B.addChild(C)
C.addChild(D)
Job.Runner.startToil(A, options)
# Assert jobs passed by reading test results from tempFile
with open(logfile, 'r') as outfile:
for test_status in outfile:
assert test_status.strip() == 'True'
示例9: _deleteLocallyReadFilesFn
def _deleteLocallyReadFilesFn(self, readAsMutable):
self.options.retryCount = 0
A = Job.wrapJobFn(self._writeFileToJobStore, isLocalFile=True, memory='10M')
B = Job.wrapJobFn(self._removeReadFileFn, A.rv(), readAsMutable=readAsMutable,
memory='20M')
A.addChild(B)
Job.Runner.startToil(A, self.options)
示例10: runNewCheckpointIsLeafVertexTest
def runNewCheckpointIsLeafVertexTest(self, createWorkflowFn):
"""
Test verification that a checkpoint job is a leaf vertex using both
valid and invalid cases.
:param createWorkflowFn: function to create and new workflow and return a tuple of:
0) the workflow root job
1) a checkpoint job to test within the workflow
"""
logger.info('Test checkpoint job that is a leaf vertex')
self.runCheckpointVertexTest(*createWorkflowFn(),
expectedException=None)
logger.info('Test checkpoint job that is not a leaf vertex due to the presence of a service')
self.runCheckpointVertexTest(*createWorkflowFn(),
checkpointJobService=TrivialService("LeafTestService"),
expectedException=JobGraphDeadlockException)
logger.info('Test checkpoint job that is not a leaf vertex due to the presence of a child job')
self.runCheckpointVertexTest(*createWorkflowFn(),
checkpointJobChild=Job.wrapJobFn(
simpleJobFn, "LeafTestChild"),
expectedException=JobGraphDeadlockException)
logger.info('Test checkpoint job that is not a leaf vertex due to the presence of a follow-on job')
self.runCheckpointVertexTest(*createWorkflowFn(),
checkpointJobFollowOn=Job.wrapJobFn(
simpleJobFn,
"LeafTestFollowOn"),
expectedException=JobGraphDeadlockException)
示例11: testPromiseRequirementRaceStatic
def testPromiseRequirementRaceStatic(self):
"""
Checks for a race condition when using promised requirements and child job functions.
"""
A = Job.wrapJobFn(logDiskUsage, 'A', sleep=5, disk=PromisedRequirement(1024))
B = Job.wrapJobFn(logDiskUsage, 'B', disk=PromisedRequirement(lambda x: x + 1024, A.rv()))
A.addChild(B)
Job.Runner.startToil(A, self.getOptions(self._createTempDir('testFiles')))
示例12: testReadCachHitFileFromJobStore
def testReadCachHitFileFromJobStore(self):
"""
Read a file from the file store that has a corresponding cached copy. Ensure the number
of links on the file are appropriate.
"""
A = Job.wrapJobFn(self._writeFileToJobStore, isLocalFile=True)
B = Job.wrapJobFn(self._readFromJobStore, isCachedFile=True, cacheReadFile=None,
fsID=A.rv())
A.addChild(B)
Job.Runner.startToil(A, self.options)
示例13: testAddChildEncapsulate
def testAddChildEncapsulate(self):
"""
Make sure that the encapsulate child does not have two pareents
with unique roots.
"""
# Temporary file
a = T.wrapFn(noOp)
b = T.wrapFn(noOp)
a.addChild(b).encapsulate()
self.assertEquals(len(a.getRootJobs()), 1)
示例14: _deleteLocallyReadFilesFn
def _deleteLocallyReadFilesFn(self, readAsMutable):
self.options.retryCount = 0
A = Job.wrapJobFn(self._writeFileToJobStore, isLocalFile=True, memory='10M')
B = Job.wrapJobFn(self._removeReadFileFn, A.rv(), readAsMutable=readAsMutable, memory='20M')
A.addChild(B)
try:
Job.Runner.startToil(A, self.options)
except FailedJobsException as err:
self.assertEqual(err.numberOfFailedJobs, 2)
errMsg = self._parseAssertionError(self.options.logFile)
if 'explicitly' not in errMsg:
self.fail('Shouldn\'t see this')
示例15: testControlledFailedWorkerRetry
def testControlledFailedWorkerRetry(self):
"""
Conduct a couple of job store operations. Then die. Ensure that the restarted job is
tracking values in the cache state file appropriately.
"""
workdir = self._createTempDir(purpose='nonLocalDir')
self.options.retryCount = 1
F = Job.wrapJobFn(self._controlledFailTestFn, jobDisk=2*1024*1024*1024, testDir=workdir,
disk='2G')
G = Job.wrapJobFn(self._probeJobReqs, sigmaJob=100, disk='100M')
F.addChild(G)
Job.Runner.startToil(F, self.options)