当前位置: 首页>>代码示例>>Python>>正文


Python common.Toil类代码示例

本文整理汇总了Python中toil.common.Toil的典型用法代码示例。如果您正苦于以下问题:Python Toil类的具体用法?Python Toil怎么用?Python Toil使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Toil类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

def main():
    parser = getBasicOptionParser()

    parser.add_argument(
        "jobStore",
        type=str,
        help=(
            "Store in which to place job management files \
              and the global accessed temporary files"
            "(If this is a file path this needs to be globally accessible "
            "by all machines running jobs).\n"
            "If the store already exists and restart is false an"
            " ExistingJobStoreException exception will be thrown."
        ),
    )
    parser.add_argument("--version", action="version", version=version)
    options = parseBasicOptions(parser)

    jobStore = Toil.loadOrCreateJobStore(options.jobStore)

    logger.info("Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore)
    ####This behaviour is now broken
    batchSystem = Toil.createBatchSystem(
        jobStore.config
    )  # This should automatically kill the existing jobs.. so we're good.
    for jobID in batchSystem.getIssuedBatchJobIDs():  # Just in case we do it again.
        batchSystem.killBatchJobs(jobID)
    logger.info("All jobs SHOULD have been killed")
开发者ID:broadinstitute,项目名称:toil,代码行数:28,代码来源:toilKill.py

示例2: JobGraphTest

class JobGraphTest(ToilTest):
    
    def setUp(self):
        super(JobGraphTest, self).setUp()
        self.jobStorePath = self._getTestJobStorePath()
        parser = ArgumentParser()
        Job.Runner.addToilOptions(parser)
        options = parser.parse_args(args=[self.jobStorePath])
        self.toil = Toil(options)
        self.assertEquals( self.toil, self.toil.__enter__() )

    def tearDown(self):
        self.toil.__exit__(None, None, None)
        self.toil._jobStore.destroy()
        self.assertFalse(os.path.exists(self.jobStorePath))
        super(JobGraphTest, self).tearDown()
    
    def testJob(self):       
        """
        Tests functions of a job.
        """ 
    
        command = "by your command"
        memory = 2^32
        disk = 2^32
        cores = 1
        preemptable = 1
        jobStoreID = 100
        remainingRetryCount = 5
        predecessorNumber = 0
        
        j = JobGraph(command=command, memory=memory, cores=cores, disk=disk, preemptable=preemptable,
                     jobStoreID=jobStoreID, remainingRetryCount=remainingRetryCount,
                     predecessorNumber=predecessorNumber, jobName='testJobGraph', unitName='noName')
        
        #Check attributes
        #
        self.assertEquals(j.command, command)
        self.assertEquals(j.memory, memory)
        self.assertEquals(j.disk, disk)
        self.assertEquals(j.cores, cores)
        self.assertEquals(j.preemptable, preemptable)
        self.assertEquals(j.jobStoreID, jobStoreID)
        self.assertEquals(j.remainingRetryCount, remainingRetryCount)
        self.assertEquals(j.predecessorNumber, predecessorNumber)
        self.assertEquals(j.stack, [])
        self.assertEquals(j.predecessorsFinished, set())
        self.assertEquals(j.logJobStoreFileID, None)
        
        #Check equals function
        j2 = JobGraph(command=command, memory=memory, cores=cores, disk=disk,
                      preemptable=preemptable,
                      jobStoreID=jobStoreID, remainingRetryCount=remainingRetryCount,
                      predecessorNumber=predecessorNumber, jobName='testJobGraph', unitName='noName')
        self.assertEquals(j, j2)
        #Change an attribute and check not equal
        j.predecessorsFinished = {"1", "2"}
        self.assertNotEquals(j, j2)
开发者ID:Duke-GCB,项目名称:toil,代码行数:58,代码来源:jobGraphTest.py

示例3: main

def main():
    """Removes the JobStore from a toil run.
    """

    ##########################################
    #Construct the arguments.
    ##########################################

    parser = getBasicOptionParser()
    parser.add_argument("jobStore", type=str,
                      help=("Store in which to place job management files \
                      and the global accessed temporary files"
                      "(If this is a file path this needs to be globally accessible "
                      "by all machines running jobs).\n"
                      "If the store already exists and restart is false an"
                      " ExistingJobStoreException exception will be thrown."))
    parser.add_argument("--version", action='version', version=version)
    options = parseBasicOptions(parser)
    logger.info("Parsed arguments")

    ##########################################
    #Survey the status of the job and report.
    ##########################################
    logger.info("Checking if we have files for toil")
    try:
        jobStore = Toil.loadOrCreateJobStore(options.jobStore)
    except JobStoreCreationException:
        logger.info("The specified JobStore does not exist, it may have already been deleted")
        sys.exit(0)

    logger.info("Deleting the JobStore")
    jobStore.deleteJobStore()
开发者ID:awesome-python,项目名称:toil,代码行数:32,代码来源:toilClean.py

示例4: main

def main():
    parser = getBasicOptionParser()

    parser.add_argument("jobStore", type=str,
                        help="The location of the job store used by the workflow whose jobs should "
                             "be killed." + jobStoreLocatorHelp)
    parser.add_argument("--version", action='version', version=version)
    options = parseBasicOptions(parser)

    jobStore = Toil.resumeJobStore(options.jobStore)

    logger.info("Starting routine to kill running jobs in the toil workflow: %s" % options.jobStore)
    ####This behaviour is now broken
    batchSystem = Toil.createBatchSystem(jobStore.config) #This should automatically kill the existing jobs.. so we're good.
    for jobID in batchSystem.getIssuedBatchJobIDs(): #Just in case we do it again.
        batchSystem.killBatchJobs(jobID)
    logger.info("All jobs SHOULD have been killed")
开发者ID:python-toolbox,项目名称:fork_toil,代码行数:17,代码来源:toilKill.py

示例5: setUp

 def setUp(self):
     super(JobWrapperTest, self).setUp()
     self.jobStorePath = self._getTestJobStorePath()
     parser = ArgumentParser()
     Job.Runner.addToilOptions(parser)
     options = parser.parse_args(args=[self.jobStorePath])
     self.toil = Toil(options)
     self.assertEquals( self.toil, self.toil.__enter__() )
开发者ID:HPCBio,项目名称:toil,代码行数:8,代码来源:jobWrapperTest.py

示例6: _getResultsFileName

 def _getResultsFileName(self, toilPath):
     """
     Get a path for the batch systems to store results. GridEngine, slurm,
     and LSF currently use this and only work if locator is file.
     """
     # Use  parser to extract the path and type
     locator, filePath = Toil.parseLocator(toilPath)
     assert locator == "file"
     return os.path.join(filePath, "results.txt")
开发者ID:chapmanb,项目名称:toil,代码行数:9,代码来源:abstractBatchSystem.py

示例7: main

def main():
    parser = getBasicOptionParser()
    parser.add_argument("jobStore", type=str,
                        help="The location of the job store to delete. " + jobStoreLocatorHelp)
    parser.add_argument("--version", action='version', version=version)
    options = parseBasicOptions(parser)
    logger.info("Attempting to delete the job store")
    jobStore = Toil.getJobStore(options.jobStore)
    jobStore.destroy()
    logger.info("Successfully deleted the job store")
开发者ID:python-toolbox,项目名称:fork_toil,代码行数:10,代码来源:toilClean.py

示例8: main

def main():
    """ Reports stats on the workflow, use with --stats option to toil.
    """
    parser = getBasicOptionParser()
    initializeOptions(parser)
    options = parseBasicOptions(parser)
    checkOptions(options, parser)
    jobStore = Toil.loadOrCreateJobStore(options.jobStore)
    stats = getStats(options)
    collatedStatsTag = processData(jobStore.config, stats, options)
    reportData(collatedStatsTag, options)
开发者ID:PureQsh,项目名称:toil,代码行数:11,代码来源:toilStats.py

示例9: workerCleanup

    def workerCleanup(info):
        """
        Cleans up the worker node on batch system shutdown. Also see :meth:`supportsWorkerCleanup`.

        :param WorkerCleanupInfo info: A named tuple consisting of all the relevant information
               for cleaning up the worker.
        """
        assert isinstance(info, WorkerCleanupInfo)
        workflowDir = Toil.getWorkflowDir(info.workflowID, info.workDir)
        if (info.cleanWorkDir == 'always'
            or info.cleanWorkDir in ('onSuccess', 'onError') and os.listdir(workflowDir) == []):
            shutil.rmtree(workflowDir)
开发者ID:eaglexmw,项目名称:toil,代码行数:12,代码来源:abstractBatchSystem.py

示例10: testMultipleJobsPerWorkerStats

    def testMultipleJobsPerWorkerStats(self):
        """
        Tests case where multiple jobs are run on 1 worker to insure that all jobs report back their data
        """
        options = Job.Runner.getDefaultOptions(self._getTestJobStorePath())
        options.clean = "never"
        options.stats = True
        Job.Runner.startToil(RunTwoJobsPerWorker(), options)

        jobStore = Toil.loadOrCreateJobStore(options.jobStore)
        stats = getStats(options)
        collatedStats = processData(jobStore.config, stats, options)
        self.assertTrue(len(collatedStats.job_types) == 2, "Some jobs are not represented in the stats")
开发者ID:broadinstitute,项目名称:toil,代码行数:13,代码来源:utilsTest.py

示例11: setupBinaries

def setupBinaries(options):
    """Ensure that Cactus's C/C++ components are ready to run, and set up the environment."""
    if options.latest:
        os.environ["CACTUS_USE_LATEST"] = "1"
    if options.binariesMode is not None:
        # Mode is specified on command line
        mode = options.binariesMode
    else:
        # Might be specified through the environment, or not, in which
        # case the default is to use Docker.
        mode = os.environ.get("CACTUS_BINARIES_MODE", "docker")
    os.environ["CACTUS_BINARIES_MODE"] = mode
    if mode == "docker":
        # Verify Docker exists on the target system
        from distutils.spawn import find_executable
        if find_executable('docker') is None:
            raise RuntimeError("The `docker` executable wasn't found on the "
                               "system. Please install Docker if possible, or "
                               "use --binariesMode local and add cactus's bin "
                               "directory to your PATH.")
    # If running without Docker, verify that we can find the Cactus executables
    elif mode == "local":
        from distutils.spawn import find_executable
        if find_executable('cactus_caf') is None:
            raise RuntimeError("Cactus isn't using Docker, but it can't find "
                               "the Cactus binaries. Please add Cactus's bin "
                               "directory to your PATH (and run `make` in the "
                               "Cactus directory if you haven't already).")
        if find_executable('ktserver') is None:
            raise RuntimeError("Cactus isn't using Docker, but it can't find "
                               "`ktserver`, the KyotoTycoon database server. "
                               "Please install KyotoTycoon "
                               "(https://github.com/alticelabs/kyoto) "
                               "and add the binary to your PATH, or use the "
                               "Docker mode.")
    else:
        assert mode == "singularity"
        jobStoreType, locator = Toil.parseLocator(options.jobStore)
        if jobStoreType != "file":
            raise RuntimeError("Singularity mode is only supported when using the FileJobStore.")
        if options.containerImage:
            imgPath = os.path.abspath(options.containerImage)
            os.environ["CACTUS_USE_LOCAL_SINGULARITY_IMG"] = "1"
        else:
            # When SINGULARITY_CACHEDIR is set, singularity will refuse to store images in the current directory
            if 'SINGULARITY_CACHEDIR' in os.environ:
                imgPath = os.path.join(os.environ['SINGULARITY_CACHEDIR'], "cactus.img")
            else:
                imgPath = os.path.join(os.path.abspath(locator), "cactus.img")
        os.environ["CACTUS_SINGULARITY_IMG"] = imgPath
开发者ID:benedictpaten,项目名称:cactus,代码行数:50,代码来源:cactus_progressive.py

示例12: getStats

def getStats(options):
    """ Collect and return the stats and config data.
    """
    def aggregateStats(fileHandle,aggregateObject):
        try:
            stats = json.load(fileHandle, object_hook=Expando)
            for key in stats.keys():
                if key in aggregateObject:
                    aggregateObject[key].append(stats[key])
                else:
                    aggregateObject[key]=[stats[key]]
        except ValueError:
            logger.critical("File %s contains corrupted json. Skipping file." % fileHandle)
            pass  # The file is corrupted.

    jobStore = Toil.loadOrCreateJobStore(options.jobStore)
    aggregateObject = Expando()
    callBack = partial(aggregateStats, aggregateObject=aggregateObject)
    jobStore.readStatsAndLogging(callBack, readAll=True)
    return aggregateObject
开发者ID:PureQsh,项目名称:toil,代码行数:20,代码来源:toilStats.py

示例13: main

def main():
    logging.basicConfig()

    ##########################################
    #Import necessary modules 
    ##########################################
    
    # This is assuming that worker.py is at a path ending in "/toil/worker.py".
    sourcePath = os.path.dirname(os.path.dirname(__file__))
    if sourcePath not in sys.path:
        sys.path.append(sourcePath)
    
    #Now we can import all the necessary functions
    from toil.lib.bioio import setLogLevel
    from toil.lib.bioio import getTotalCpuTime
    from toil.lib.bioio import getTotalCpuTimeAndMemoryUsage
    from toil.lib.bioio import makePublicDir
    from toil.lib.bioio import system
    from toil.job import Job
    
    ########################################## 
    #Input args
    ##########################################
    
    jobStoreString = sys.argv[1]
    jobStoreID = sys.argv[2]
    
    ##########################################
    #Load the jobStore/config file
    ##########################################
    
    jobStore = Toil.loadOrCreateJobStore(jobStoreString)
    config = jobStore.config
    
    ##########################################
    #Create the worker killer, if requested
    ##########################################

    if config.badWorker > 0 and random.random() < config.badWorker:
        def badWorker():
            #This will randomly kill the worker process at a random time 
            time.sleep(config.badWorkerFailInterval * random.random())
            os.kill(os.getpid(), signal.SIGKILL) #signal.SIGINT)
            #TODO: FIX OCCASIONAL DEADLOCK WITH SIGINT (tested on single machine)
        t = Thread(target=badWorker)
        # Ideally this would be a daemon thread but that causes an intermittent (but benign)
        # exception similar to the one described here:
        # http://stackoverflow.com/questions/20596918/python-exception-in-thread-thread-1-most-likely-raised-during-interpreter-shutd
        # Our exception is:
        #    Exception in thread Thread-1 (most likely raised during interpreter shutdown):
        #    <type 'exceptions.AttributeError'>: 'NoneType' object has no attribute 'kill'
        # This attribute error is caused by the call os.kill() and apparently unavoidable with a
        # daemon
        t.start()

    ##########################################
    #Load the environment for the jobWrapper
    ##########################################
    
    #First load the environment for the jobWrapper.
    with jobStore.readSharedFileStream("environment.pickle") as fileHandle:
        environment = cPickle.load(fileHandle)
    for i in environment:
        if i not in ("TMPDIR", "TMP", "HOSTNAME", "HOSTTYPE"):
            os.environ[i] = environment[i]
    # sys.path is used by __import__ to find modules
    if "PYTHONPATH" in environment:
        for e in environment["PYTHONPATH"].split(':'):
            if e != '':
                sys.path.append(e)

    setLogLevel(config.logLevel)

    toilWorkflowDir = Toil.getWorkflowDir(config.workflowID, config.workDir)

    ##########################################
    #Setup the temporary directories.
    ##########################################
        
    # Dir to put all this worker's temp files in.
    localWorkerTempDir = tempfile.mkdtemp(dir=toilWorkflowDir)
    os.chmod(localWorkerTempDir, 0755)

    ##########################################
    #Setup the logging
    ##########################################

    #This is mildly tricky because we don't just want to
    #redirect stdout and stderr for this Python process; we want to redirect it
    #for this process and all children. Consequently, we can't just replace
    #sys.stdout and sys.stderr; we need to mess with the underlying OS-level
    #file descriptors. See <http://stackoverflow.com/a/11632982/402891>
    
    #When we start, standard input is file descriptor 0, standard output is
    #file descriptor 1, and standard error is file descriptor 2.

    #What file do we want to point FDs 1 and 2 to?
    tempWorkerLogPath = os.path.join(localWorkerTempDir, "worker_log.txt")
    
    #Save the original stdout and stderr (by opening new file descriptors to the
#.........这里部分代码省略.........
开发者ID:anukat2015,项目名称:toil,代码行数:101,代码来源:worker.py

示例14: main

def main():
    """Reports the state of the toil.
    """
    
    ##########################################
    #Construct the arguments.
    ##########################################  
    
    parser = getBasicOptionParser()
    
    parser.add_argument("jobStore", type=str,
              help=("Store in which to place job management files \
              and the global accessed temporary files"
              "(If this is a file path this needs to be globally accessible "
              "by all machines running jobs).\n"
              "If the store already exists and restart is false an"
              " JobStoreCreationException exception will be thrown."))
    
    parser.add_argument("--verbose", dest="verbose", action="store_true",
                      help="Print loads of information, particularly all the log files of \
                      jobs that failed. default=%(default)s",
                      default=False)
    
    parser.add_argument("--failIfNotComplete", dest="failIfNotComplete", action="store_true",
                      help="Return exit value of 1 if toil jobs not all completed. default=%(default)s",
                      default=False)
    parser.add_argument("--version", action='version', version=version)
    options = parseBasicOptions(parser)
    logger.info("Parsed arguments")
    
    if len(sys.argv) == 1:
        parser.print_help()
        sys.exit(0)
    
    ##########################################
    #Do some checks.
    ##########################################
    
    logger.info("Checking if we have files for Toil")
    assert options.jobStore is not None

    ##########################################
    #Survey the status of the job and report.
    ##########################################  
    
    jobStore = Toil.loadOrCreateJobStore(options.jobStore)
    try:
        rootJob = jobStore.loadRootJob()
    except JobException:
        print('The root job of the job store is absent, the workflow completed successfully.',
              file=sys.stderr)
        sys.exit(0)
    
    toilState = ToilState(jobStore, rootJob )

    # The first element of the toilState.updatedJobs tuple is the jobWrapper we want to inspect
    totalJobs = set(toilState.successorCounts.keys()) | \
                {jobTuple[0] for jobTuple in toilState.updatedJobs}

    failedJobs = [ job for job in totalJobs if job.remainingRetryCount == 0 ]

    print('There are %i active jobs, %i parent jobs with children, and %i totally failed jobs '
          'currently in %s.' % (len(toilState.updatedJobs), len(toilState.successorCounts),
                                len(failedJobs), options.jobStore), file=sys.stderr)
    
    if options.verbose: #Verbose currently means outputting the files that have failed.
        for job in failedJobs:
            if job.logJobStoreFileID is not None:
                with job.getLogFileHandle(jobStore) as logFileHandle:
                    logStream(logFileHandle, job.jobStoreID, logger.warn)
            else:
                print('Log file for job %s is absent.' % job.jobStoreID, file=sys.stderr)
        if len(failedJobs) == 0:
            print('There are no failed jobs to report.', file=sys.stderr)
    
    if (len(toilState.updatedJobs) + len(toilState.successorCounts)) != 0 and \
        options.failIfNotComplete:
        sys.exit(1)
开发者ID:PureQsh,项目名称:toil,代码行数:78,代码来源:toilStatus.py

示例15: physicalDisk

def physicalDisk(config, toilWorkflowDir=None):
    if toilWorkflowDir is None:
        from toil.common import Toil
        toilWorkflowDir = Toil.getWorkflowDir(config.workflowID, config.workDir)
    diskStats = os.statvfs(toilWorkflowDir)
    return diskStats.f_frsize * diskStats.f_bavail
开发者ID:Duke-GCB,项目名称:toil,代码行数:6,代码来源:__init__.py


注:本文中的toil.common.Toil类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。