本文整理汇总了Python中toil.job.Job._loadJob方法的典型用法代码示例。如果您正苦于以下问题:Python Job._loadJob方法的具体用法?Python Job._loadJob怎么用?Python Job._loadJob使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类toil.job.Job
的用法示例。
在下文中一共展示了Job._loadJob方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from toil.job import Job [as 别名]
# 或者: from toil.job.Job import _loadJob [as 别名]
#.........这里部分代码省略.........
jobWrapper.services = [] # Empty the services
# Update the jobStore to avoid doing this twice on failure and make this clean.
jobStore.update(jobWrapper)
# Otherwise, the job and successors are done, and we can cleanup stuff we couldn't clean
# because of the job being a checkpoint
else:
logger.debug("The checkpoint jobs seems to have completed okay, removing any checkpoint files to delete.")
#Delete any remnant files
map(jobStore.deleteFile, filter(jobStore.fileExists, jobWrapper.checkpointFilesToDelete))
##########################################
#Setup the stats, if requested
##########################################
if config.stats:
startTime = time.time()
startClock = getTotalCpuTime()
#Make a temporary file directory for the jobWrapper
#localTempDir = makePublicDir(os.path.join(localWorkerTempDir, "localTempDir"))
startTime = time.time()
while True:
##########################################
#Run the jobWrapper, if there is one
##########################################
if jobWrapper.command is not None:
assert jobWrapper.command.startswith( "_toil " )
logger.debug("Got a command to run: %s" % jobWrapper.command)
#Load the job
job = Job._loadJob(jobWrapper.command, jobStore)
# If it is a checkpoint job, save the command
if job.checkpoint:
jobWrapper.checkpoint = jobWrapper.command
# Need to fix all this for non shared cache runs
if config.disableSharedCache:
#Cleanup the cache from the previous job
cleanCacheFn(job.effectiveRequirements(jobStore.config).cache)
# Create a fileStore object for the job
fileStore = FileStore(jobStore, jobWrapper, localWorkerTempDir, blockFn)
with job._executor(jobWrapper=jobWrapper,
stats=statsDict if config.stats else None,
fileStore=fileStore):
with fileStore.open(job):
# Get the next block function and list that will contain any messages
blockFn = fileStore._blockFn
job._runner(jobWrapper=jobWrapper, jobStore=jobStore, fileStore=fileStore)
# Accumulate messages from this job & any subsequent chained jobs
statsDict.workers.logsToMaster += fileStore.loggingMessages
if config.disableSharedCache:
#Set the clean cache function
cleanCacheFn = fileStore._cleanLocalTempDir
else:
#The command may be none, in which case
#the jobWrapper is either a shell ready to be deleted or has
#been scheduled after a failure to cleanup
break
示例2: main
# 需要导入模块: from toil.job import Job [as 别名]
# 或者: from toil.job.Job import _loadJob [as 别名]
#.........这里部分代码省略.........
#However, if they are gone then we can remove them from the stack.
#This is the only way to flush successors that have previously been run
#, as jobs are, as far as possible, read only in the leader.
jobWrapper.stack.pop()
#This cleans the old log file which may
#have been left if the jobWrapper is being retried after a jobWrapper failure.
oldLogFile = jobWrapper.logJobStoreFileID
jobWrapper.logJobStoreFileID = None
jobStore.update(jobWrapper) #Update first, before deleting the file
if oldLogFile != None:
jobStore.delete(oldLogFile)
#Make a temporary file directory for the jobWrapper
localTempDir = makePublicDir(os.path.join(localWorkerTempDir, "localTempDir"))
##########################################
#Setup the stats, if requested
##########################################
if config.stats:
startTime = time.time()
startClock = getTotalCpuTime()
startTime = time.time()
while True:
##########################################
#Run the jobWrapper, if there is one
##########################################
if jobWrapper.command != None:
if jobWrapper.command.startswith( "_toil " ):
#Load the job
job = Job._loadJob(jobWrapper.command, jobStore)
#Cleanup the cache from the previous job
cleanCacheFn(job.effectiveRequirements(jobStore.config).cache)
#Create a fileStore object for the job
fileStore = Job.FileStore(jobStore, jobWrapper, localTempDir,
blockFn)
#Get the next block function and list that will contain any messages
blockFn = fileStore._blockFn
messages = fileStore.loggingMessages
job._execute(jobWrapper=jobWrapper,
stats=statsDict if config.stats else None,
localTempDir=localTempDir,
jobStore=jobStore,
fileStore=fileStore)
#Set the clean cache function
cleanCacheFn = fileStore._cleanLocalTempDir
else: #Is another command (running outside of jobs may be deprecated)
#Cleanup the cache from the previous job
cleanCacheFn(0)
system(jobWrapper.command)
#Set a dummy clean cache fn
cleanCacheFn = lambda x : None
else:
#The command may be none, in which case
#the jobWrapper is either a shell ready to be deleted or has
#been scheduled after a failure to cleanup
break
示例3: main
# 需要导入模块: from toil.job import Job [as 别名]
# 或者: from toil.job.Job import _loadJob [as 别名]
#.........这里部分代码省略.........
# However, if they are gone then we can remove them from the stack.
# This is the only way to flush successors that have previously been run
# , as jobs are, as far as possible, read only in the leader.
job.stack.pop()
# This cleans the old log file which may
# have been left if the job is being retried after a job failure.
if job.logJobStoreFileID != None:
job.clearLogFile(jobStore)
##########################################
# Setup the stats, if requested
##########################################
if config.stats:
startTime = time.time()
startClock = getTotalCpuTime()
stats = ET.Element("worker")
else:
stats = None
startTime = time.time()
while True:
##########################################
# Run the job, if there is one
##########################################
if job.command != None:
if job.command[:11] == "scriptTree ":
# Make a temporary file directory for the job
localTempDir = makePublicDir(os.path.join(localWorkerTempDir, "localTempDir"))
# Is a job command
messages = Job._loadJob(job.command, jobStore)._execute(
jobWrapper=job, stats=stats, localTempDir=localTempDir, jobStore=jobStore
)
# Remove the temporary file directory
shutil.rmtree(localTempDir)
else: # Is another command (running outside of jobs may be deprecated)
system(job.command)
messages = []
else:
# The command may be none, in which case
# the job is just a shell ready to be deleted
assert len(job.stack) == 0
messages = []
break
##########################################
# Establish if we can run another job within the worker
##########################################
# No more jobs to run so quit
if len(job.stack) == 0:
break
# Get the next set of jobs to run
jobs = job.stack[-1]
assert len(jobs) > 0
# If there are 2 or more jobs to run in parallel we quit
if len(jobs) >= 2:
logger.debug("No more jobs can run in series by this worker," " it's got %i children", len(jobs) - 1)
break