本文整理汇总了Python中toil.common.Toil.resumeJobStore方法的典型用法代码示例。如果您正苦于以下问题:Python Toil.resumeJobStore方法的具体用法?Python Toil.resumeJobStore怎么用?Python Toil.resumeJobStore使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类toil.common.Toil
的用法示例。
在下文中一共展示了Toil.resumeJobStore方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import resumeJobStore [as 别名]
def main():
""" Reports stats on the workflow, use with --stats option to toil.
"""
parser = getBasicOptionParser()
initializeOptions(parser)
options = parseBasicOptions(parser)
checkOptions(options, parser)
jobStore = Toil.resumeJobStore(options.jobStore)
stats = getStats(jobStore)
collatedStatsTag = processData(jobStore.config, stats)
reportData(collatedStatsTag, options)
示例2: testMultipleJobsPerWorkerStats
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import resumeJobStore [as 别名]
def testMultipleJobsPerWorkerStats(self):
"""
Tests case where multiple jobs are run on 1 worker to insure that all jobs report back their data
"""
options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
options.clean = 'never'
options.stats = True
Job.Runner.startToil(RunTwoJobsPerWorker(), options)
jobStore = Toil.resumeJobStore(options.jobStore)
stats = getStats(jobStore)
collatedStats = processData(jobStore.config, stats)
self.assertTrue(len(collatedStats.job_types) == 2,
"Some jobs are not represented in the stats")
示例3: main
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import resumeJobStore [as 别名]
def main():
parser = getBasicOptionParser()
parser.add_argument("jobStore", type=str,
help="The location of the job store used by the workflow whose jobs should "
"be killed." + jobStoreLocatorHelp)
parser.add_argument("--version", action='version', version=version)
options = parseBasicOptions(parser)
jobStore = Toil.resumeJobStore(options.jobStore)
logger.info("Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore)
####This behaviour is now broken
batchSystem = Toil.createBatchSystem(jobStore.config) #This should automatically kill the existing jobs.. so we're good.
for jobID in batchSystem.getIssuedBatchJobIDs(): #Just in case we do it again.
batchSystem.killBatchJobs(jobID)
logger.info("All jobs SHOULD have been killed")
示例4: main
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import resumeJobStore [as 别名]
def main():
"""Reports the state of the toil.
"""
##########################################
#Construct the arguments.
##########################################
parser = getBasicOptionParser()
parser.add_argument("jobStore", type=str,
help="The location of a job store that holds the information about the "
"workflow whose status is to be reported on." + jobStoreLocatorHelp)
parser.add_argument("--verbose", dest="verbose", action="store_true",
help="Print loads of information, particularly all the log files of \
jobs that failed. default=%(default)s",
default=False)
parser.add_argument("--failIfNotComplete", dest="failIfNotComplete", action="store_true",
help="Return exit value of 1 if toil jobs not all completed. default=%(default)s",
default=False)
parser.add_argument("--version", action='version', version=version)
options = parseBasicOptions(parser)
logger.info("Parsed arguments")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
##########################################
#Do some checks.
##########################################
logger.info("Checking if we have files for Toil")
assert options.jobStore is not None
##########################################
#Survey the status of the job and report.
##########################################
jobStore = Toil.resumeJobStore(options.jobStore)
try:
rootJob = jobStore.loadRootJob()
except JobException:
print('The root job of the job store is absent, the workflow completed successfully.',
file=sys.stderr)
sys.exit(0)
toilState = ToilState(jobStore, rootJob )
# The first element of the toilState.updatedJobs tuple is the jobWrapper we want to inspect
totalJobs = set(toilState.successorCounts.keys()) | \
{jobTuple[0] for jobTuple in toilState.updatedJobs}
failedJobs = [ job for job in totalJobs if job.remainingRetryCount == 0 ]
print('There are %i active jobs, %i parent jobs with children, and %i totally failed jobs '
'currently in %s.' % (len(toilState.updatedJobs), len(toilState.successorCounts),
len(failedJobs), options.jobStore), file=sys.stderr)
if options.verbose: #Verbose currently means outputting the files that have failed.
for job in failedJobs:
if job.logJobStoreFileID is not None:
with job.getLogFileHandle(jobStore) as logFileHandle:
logStream(logFileHandle, job.jobStoreID, logger.warn)
else:
print('Log file for job %s is absent.' % job.jobStoreID, file=sys.stderr)
if len(failedJobs) == 0:
print('There are no failed jobs to report.', file=sys.stderr)
if (len(toilState.updatedJobs) + len(toilState.successorCounts)) != 0 and \
options.failIfNotComplete:
sys.exit(1)
示例5: main
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import resumeJobStore [as 别名]
def main():
logging.basicConfig()
##########################################
#Import necessary modules
##########################################
# This is assuming that worker.py is at a path ending in "/toil/worker.py".
sourcePath = os.path.dirname(os.path.dirname(__file__))
if sourcePath not in sys.path:
sys.path.append(sourcePath)
#Now we can import all the necessary functions
from toil.lib.bioio import setLogLevel
from toil.lib.bioio import getTotalCpuTime
from toil.lib.bioio import getTotalCpuTimeAndMemoryUsage
from toil.job import Job
try:
import boto
except ImportError:
pass
else:
# boto is installed, monkey patch it now
from bd2k.util.ec2.credentials import enable_metadata_credential_caching
enable_metadata_credential_caching()
##########################################
#Input args
##########################################
jobStoreLocator = sys.argv[1]
jobStoreID = sys.argv[2]
# we really want a list of job names but the ID will suffice if the job graph can't
# be loaded. If we can discover the name, we will replace this initial entry
listOfJobs = [jobStoreID]
##########################################
#Load the jobStore/config file
##########################################
jobStore = Toil.resumeJobStore(jobStoreLocator)
config = jobStore.config
##########################################
#Create the worker killer, if requested
##########################################
logFileByteReportLimit = config.maxLogFileSize
if config.badWorker > 0 and random.random() < config.badWorker:
def badWorker():
#This will randomly kill the worker process at a random time
time.sleep(config.badWorkerFailInterval * random.random())
os.kill(os.getpid(), signal.SIGKILL) #signal.SIGINT)
#TODO: FIX OCCASIONAL DEADLOCK WITH SIGINT (tested on single machine)
t = Thread(target=badWorker)
# Ideally this would be a daemon thread but that causes an intermittent (but benign)
# exception similar to the one described here:
# http://stackoverflow.com/questions/20596918/python-exception-in-thread-thread-1-most-likely-raised-during-interpreter-shutd
# Our exception is:
# Exception in thread Thread-1 (most likely raised during interpreter shutdown):
# <type 'exceptions.AttributeError'>: 'NoneType' object has no attribute 'kill'
# This attribute error is caused by the call os.kill() and apparently unavoidable with a
# daemon
t.start()
##########################################
#Load the environment for the jobGraph
##########################################
#First load the environment for the jobGraph.
with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
environment = cPickle.load(fileHandle)
for i in environment:
if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
os.environ[i] = environment[i]
# sys.path is used by __import__ to find modules
if "PYTHONPATH" in environment:
for e in environment["PYTHONPATH"].split(':'):
if e != '':
sys.path.append(e)
setLogLevel(config.logLevel)
toilWorkflowDir = Toil.getWorkflowDir(config.workflowID, config.workDir)
##########################################
#Setup the temporary directories.
##########################################
# Dir to put all this worker's temp files in.
localWorkerTempDir = tempfile.mkdtemp(dir=toilWorkflowDir)
os.chmod(localWorkerTempDir, 0o755)
##########################################
#Setup the logging
##########################################
#This is mildly tricky because we don't just want to
#redirect stdout and stderr for this Python process; we want to redirect it
#for this process and all children. Consequently, we can't just replace
#.........这里部分代码省略.........
示例6: main
# 需要导入模块: from toil.common import Toil [as 别名]
# 或者: from toil.common.Toil import resumeJobStore [as 别名]
def main():
"""Reports the state of the toil.
"""
##########################################
#Construct the arguments.
##########################################
parser = getBasicOptionParser()
parser.add_argument("jobStore", type=str,
help="The location of a job store that holds the information about the "
"workflow whose status is to be reported on." + jobStoreLocatorHelp)
parser.add_argument("--verbose", dest="verbose", action="store_true",
help="Print loads of information, particularly all the log files of \
jobs that failed. default=%(default)s",
default=False)
parser.add_argument("--failIfNotComplete", dest="failIfNotComplete", action="store_true",
help="Return exit value of 1 if toil jobs not all completed. default=%(default)s",
default=False)
parser.add_argument("--version", action='version', version=version)
options = parseBasicOptions(parser)
logger.info("Parsed arguments")
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
##########################################
#Do some checks.
##########################################
logger.info("Checking if we have files for Toil")
assert options.jobStore is not None
config = Config()
config.setOptions(options)
##########################################
#Survey the status of the job and report.
##########################################
jobStore = Toil.resumeJobStore(config.jobStore)
try:
rootJob = jobStore.loadRootJob()
except JobException:
print('The root job of the job store is absent, the workflow completed successfully.',
file=sys.stderr)
sys.exit(0)
def traverseGraph(jobGraph):
foundJobStoreIDs = set()
totalJobs = []
def inner(jobGraph):
if jobGraph.jobStoreID in foundJobStoreIDs:
return
foundJobStoreIDs.add(jobGraph.jobStoreID)
totalJobs.append(jobGraph)
# Traverse jobs in stack
for jobs in jobGraph.stack:
for successorJobStoreID in [x.jobStoreID for x in jobs]:
if (successorJobStoreID not in foundJobStoreIDs and jobStore.exists(successorJobStoreID)):
inner(jobStore.load(successorJobStoreID))
# Traverse service jobs
for jobs in jobGraph.services:
for serviceJobStoreID in [x.jobStoreID for x in jobs]:
if jobStore.exists(serviceJobStoreID):
assert serviceJobStoreID not in foundJobStoreIDs
foundJobStoreIDs.add(serviceJobStoreID)
totalJobs.append(jobStore.load(serviceJobStoreID))
inner(jobGraph)
return totalJobs
logger.info('Traversing the job graph. This may take a couple minutes.')
totalJobs = traverseGraph(rootJob)
failedJobs = []
hasChildren = []
hasServices = []
services = []
currentlyRunnning = []
for job in totalJobs:
if job.logJobStoreFileID is not None:
failedJobs.append(job)
if job.stack:
hasChildren.append(job)
elif job.remainingRetryCount != 0 and job.logJobStoreFileID != 0 and job.command:
# The job has no children, hasn't failed, and has a command to run. This indicates that the job is
# likely currently running, or at least could be run.
currentlyRunnning.append(job)
if job.services:
hasServices.append(job)
if job.startJobStoreID or job.terminateJobStoreID or job.errorJobStoreID:
# these attributes are only set in service jobs
services.append(job)
logger.info('There are %i unfinished jobs, %i parent jobs with children, %i jobs with services, %i services, '
'and %i totally failed jobs currently in %s.' %
#.........这里部分代码省略.........