当前位置: 首页>>代码示例>>Python>>正文


Python logger.info函数代码示例

本文整理汇总了Python中sonLib.bioio.logger.info函数的典型用法代码示例。如果您正苦于以下问题:Python info函数的具体用法?Python info怎么用?Python info使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了info函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: parasolRestart

def parasolRestart():
    """Function starts the parasol hub and node.
    """
    parasolStop()
    while True:
        machineList = os.path.join(workflowRootPath(), "jobTree", "machineList")
        #pathEnvVar = os.environ["PATH"]
        os.system("paraNode start -hub=localhost") 
        #-umask=002 -userPath=%s -sysPath=%s" % (pathEnvVar, pathEnvVar))
        os.system("paraHub %s subnet=127.0.0 &" % (machineList,))
        tempFile = getTempFile()
        dead = True
        try:
            popen("parasol status", tempFile)
            fileHandle = open(tempFile, 'r')
            line = fileHandle.readline()
            while line != '':
                if "Nodes dead" in line:
                    print line
                    if int(line.split()[-1]) == 0:
                        dead = False
                line = fileHandle.readline()
            fileHandle.close()
        except RuntimeError:
            pass
        os.remove(tempFile)
        if not dead:
            break
        else:
            logger.info("Tried to restart the parasol process, but failed, will try again")
            parasolStop()
            time.sleep(5)
    logger.info("Restarted the parasol process")
开发者ID:decarlin,项目名称:jobTree,代码行数:33,代码来源:jobTreeParasolCrashTest.py

示例2: run

    def run(self):
        self.logToMaster("RepSize\n")
        stime = time.time()
        name2sample = {}
        for sam in os.listdir(self.sampledir):
            filepath = os.path.join(self.sampledir, sam, sam)
            sample = pickle.load(gzip.open(filepath, 'rb'))
            name2sample[sam] = sample
        logger.info("RepSize, done loading %d samples in %.4f s." %
                    (len(name2sample), (time.time() - stime)))
        stime = time.time()

        # Get summary of samples' sizes:
        group2samples = self.options.group2samples
        group2avr = libcommon.get_group_avr(name2sample, group2samples)
        logger.info("RepSize, done computing group_avr in %.4f s." %
                    (time.time() - stime))
        
        txtfile = os.path.join(self.options.outdir, "clonesize.txt")
        repsize.repsize_table(name2sample, txtfile, group2avr, group2samples)
        texfile = os.path.join(self.options.outdir, "clonesize.tex")
        repsize.repsize_table(name2sample, texfile, group2avr, group2samples,
                              True)
        self.addChildTarget(diversity.DiversityRarefaction(self.sampledir,
                                                           self.options))
开发者ID:ngannguyen,项目名称:aimseqtk,代码行数:25,代码来源:aimseq.py

示例3: run

 def run(self):
     ##########################################
     #Setup a file tree.
     ##########################################
         
     tempFileTree = TempFileTree(os.path.join(self.getGlobalTempDir(), getRandomAlphaNumericString()))   
     
     fileTreeRootFile = tempFileTree.getTempFile()
 
     makeFileTree(fileTreeRootFile, \
                  self.depth, tempFileTree)
     
     treePointer = tempFileTree.getTempFile()
     
     makeTreePointer(fileTreeRootFile, treePointer)
     
     logger.info("We've set up the file tree")
     
     if random.random() > 0.5:
         raise RuntimeError()
     
     ##########################################
     #Issue the child and follow on jobs
     ##########################################
     
     self.addChildTarget(ChildTarget(treePointer))
     
     self.setFollowOnTarget(DestructFileTree(tempFileTree))
     
     logger.info("We've added the child target and finished SetupFileTree.run()")
开发者ID:ArtRand,项目名称:jobTree,代码行数:30,代码来源:scriptTreeTest_Wrapper.py

示例4: runComparisonOfBlastScriptVsNaiveBlast

    def runComparisonOfBlastScriptVsNaiveBlast(self, blastMode):
        """We compare the output with a naive run of the blast program, to check the results are nearly
        equivalent.
        """
        encodeRegions = [ "ENm00" + str(i) for i in xrange(1,2) ] #, 2) ] #Could go to six
        species = ("human", "mouse", "dog")
        #Other species to try "rat", "monodelphis", "macaque", "chimp"
        for encodeRegion in encodeRegions:
            regionPath = os.path.join(self.encodePath, encodeRegion)
            for i in xrange(len(species)):
                species1 = species[i]
                for species2 in species[i+1:]:
                    seqFile1 = os.path.join(regionPath, "%s.%s.fa" % (species1, encodeRegion))
                    seqFile2 = os.path.join(regionPath, "%s.%s.fa" % (species2, encodeRegion))

                    #Run simple blast
                    runNaiveBlast(seqFile1, seqFile2, self.tempOutputFile, self.tempDir)
                    logger.info("Ran the naive blast okay")
                    
                    #Run cactus blast pipeline
                    toilDir = os.path.join(getTempDirectory(self.tempDir), "toil")
                    if blastMode == "allAgainstAll":
                        runCactusBlast(sequenceFiles=[ seqFile1, seqFile2 ],
                                       alignmentsFile=self.tempOutputFile2, toilDir=toilDir,
                                       chunkSize=500000, overlapSize=10000)
                    else:
                        runCactusBlast(sequenceFiles=[ seqFile1 ], alignmentsFile=self.tempOutputFile2,
                                       toilDir=toilDir, chunkSize=500000, overlapSize=10000,
                                       targetSequenceFiles=[ seqFile2 ])
                    logger.info("Ran cactus_blast okay")
                    logger.critical("Comparing cactus_blast and naive blast; using mode: %s" % blastMode)
                    checkCigar(self.tempOutputFile)
                    checkCigar(self.tempOutputFile2)
                    compareResultsFile(self.tempOutputFile, self.tempOutputFile2)
开发者ID:benedictpaten,项目名称:cactus,代码行数:34,代码来源:blastTest.py

示例5: main

def main():
   usg = "Usage: %prog [options]\n"
   parser = OptionParser(usage=usg)
   parser.add_option("-d", "--simList", dest="sim", help="List of simulation directories. Default: simulations.lst", default="simulations.lst")
   parser.add_option("-c", "--configStartFile", dest="config", help="cactus_workflow_config.xml", default="cactus_workflow_config.xml")
   parser.add_option("-o", "--outputDir", dest="outputDir", help="Directory for the outputs of the runs. Default: out", default="out/")
   parser.add_option("-m", "--simTrueMafDir", dest="simTrueMafDir", help="Directory for 'true' mafs of the simulations. Default: sim/", default="sim/")
   parser.add_option("-t", "--tree", dest="tree", help="Phylogeny tree of the species of interest, in Newick format.Default: tree", default="tree")
   parser.add_option("-s", "--species", dest="species", help="List of species in the order as they appear in the  Newick tree. Default: species.lst", default="species.lst")
   parser.add_option("-j", "--job", dest="jobFile", help="Job file containing command to run.", default=None)
   (options, args) = parser.parse_args()
   #Process options:
   options.outputDir = modify_dirname(options.outputDir)
   check_dir(options.outputDir)
   options.tree = getFirstLine(options.tree)
   #assert options.tree == ''
   options.species = getFirstLine(options.species).split()
   #assert len(options.species) == 0
   options.sim = getList(options.sim)
   #assert len(options.sim) == 0
   #options.config = getList(options.config)
   #assert len(options.config) == 0
   logger.info("Processed options\n")
   #Tuning
   cactusTuningWrapper = CactusTuningWrapper(options)
   cactusTuningWrapper.execute(options.jobFile)
开发者ID:benedictpaten,项目名称:cactusTools,代码行数:26,代码来源:cactus_tuning.py

示例6: run

   def run(self):
      #--------------------------------------------
      #Run cactus & evaluations for each simulation
      #--------------------------------------------
      logger.info("CactusTuningSimulationsWrapper: going to issue cactus runs for all simulations for parameter %s\n" %(self.paraFile))
      simNum = 0
      for sim in self.options.sim:
         sim = modify_dirname(sim)
         simName = getRootDir(sim)
         
         #Get path to sequence file of each species
	 sequenceFiles = " ".join([ os.path.join(sim, spc) for spc in self.options.species ])
         logger.info("Got sequence files: %s\n" % (sequenceFiles))

	 #add child
      	 #self.addChildTarget(CactusWorkflowWrapper(sim, simNum, self.paraFile, self.outDir, sequenceFiles, self.options.tree))
      	 self.addChildTarget(CactusWorkflowWrapper(sim, simName, self.options.simTrueMafDir, self.paraFile, self.outDir, sequenceFiles, self.options.tree))
         logger.info("Added child CactusWorkflowWrapper for sim %s and confi %s\n" % (sim, self.paraFile))
         simNum += 1
	
      #----------------------------------------------------------------
      #Done running cactus & evaluations steps for all the simulations. 
      #Now Merge results & clean up.
      #----------------------------------------------------------------
      logger.info("Done running cactus & evaluations for parameter %s. Now merge results and clean up.\n" %(self.paraFile))
      self.setFollowOnTarget(CactusMergeResultsAndCleanup(simNum, self.outDir, self.options))
      logger.info("Added CactusMergeResultsAndCleanup as FollowOnTarget for %s\n" %(self.outDir))
开发者ID:benedictpaten,项目名称:cactusTools,代码行数:27,代码来源:cactus_tuning.py

示例7: run

 def run(self):
     localTempDir = self.getLocalTempDir()
     i = 0
     localfiles = []
     for f in self.files:
         if not os.path.exists(f): #HACK
             continue
         localname = os.path.join(localTempDir, "%s%d.bam" %(os.path.basename(f).split('.')[0], i))
         system("scp -C %s %s" %(f, localname))
         localfiles.append(localname)
         i += 1
     mergeFile = os.path.join(localTempDir, "merge.bam")
     if len(localfiles) == 1:
         system("mv %s %s" %(localfiles[0], mergeFile))
     else:
         bamStr = " ".join(localfiles)
         logger.info("Merging bams...\n")
         mergeCmd = "samtools merge %s %s" %(mergeFile, bamStr)
         system( mergeCmd )
     
     sortPrefix = os.path.join(localTempDir, "mergeSorted")
     sortCmp = "samtools sort %s %s" %( mergeFile, sortPrefix )
     system( sortCmp )
     
     system( "cp %s.bam %s" %(sortPrefix, self.outdir) )
     #Get Snps info:
     self.setFollowOnTarget( Snp(self.outdir, self.options) )
开发者ID:ngannguyen,项目名称:referenceViz,代码行数:27,代码来源:mapReadsToRef.py

示例8: runWorkflow_multipleExamples

def runWorkflow_multipleExamples(inputGenFunction,
                                 testNumber=1, 
                                 testRestrictions=(TestStatus.TEST_SHORT, TestStatus.TEST_MEDIUM, \
                                                   TestStatus.TEST_LONG, TestStatus.TEST_VERY_LONG,),
                               inverseTestRestrictions=False,
                               batchSystem="single_machine",
                               buildAvgs=False, buildReference=False,
                               buildReferenceSequence=False,
                               buildCactusPDF=False, buildAdjacencyPDF=False,
                               buildReferencePDF=False,
                               makeCactusTreeStats=False, makeMAFs=False,
                               configFile=None, buildJobTreeStats=False):
    """A wrapper to run a number of examples.
    """
    if (inverseTestRestrictions and TestStatus.getTestStatus() not in testRestrictions) or \
        (not inverseTestRestrictions and TestStatus.getTestStatus() in testRestrictions):
        for test in xrange(testNumber): 
            tempDir = getTempDirectory(os.getcwd())
            sequences, newickTreeString = inputGenFunction(regionNumber=test, tempDir=tempDir)
            runWorkflow_TestScript(sequences, newickTreeString,
                                   batchSystem=batchSystem,
                                   buildAvgs=buildAvgs, buildReference=buildReference, 
                                   buildCactusPDF=buildCactusPDF, buildAdjacencyPDF=buildAdjacencyPDF,
                                   makeCactusTreeStats=makeCactusTreeStats, makeMAFs=makeMAFs, configFile=configFile,
                                   buildJobTreeStats=buildJobTreeStats)
            system("rm -rf %s" % tempDir)
            logger.info("Finished random test %i" % test)
开发者ID:benedictpaten,项目名称:cactusTools,代码行数:27,代码来源:test.py

示例9: run

    def run(self):
        geneFile = os.path.join(self.getLocalTempDir(), "refgene.bed")
        system("cp %s %s" %(self.geneFile, geneFile))

        system("cactus_genemapChain -c %s -o \"%s\" -s \"%s\" -g \"%s\"" \
                %(self.dbStr, self.outputFile, self.refSpecies, geneFile))
        logger.info("Done genemapChain for %s\n" %self.region)
开发者ID:benedictpaten,项目名称:cactusTools,代码行数:7,代码来源:cactus_runGenemap.py

示例10: parseJobFile

def parseJobFile(absFileName):
    try:
        job = readJob(absFileName)
        return job
    except IOError:
        logger.info("Encountered error while parsing job file %s, so we will ignore it" % absFileName)
    return None
开发者ID:decarlin,项目名称:jobTree,代码行数:7,代码来源:jobTreeStatus.py

示例11: main

def main():
    ##########################################
    #Construct the arguments.
    ##########################################

    parser = OptionParser()
 
    parser.add_option("--haplotypeSequences", dest="haplotypeSequences")
    parser.add_option("--newickTree", dest="newickTree")
    parser.add_option("--assembliesDir", dest="assembliesDir")
    parser.add_option("--outputDir", dest="outputDir")
    parser.add_option("--configFile", dest="configFile")
    parser.add_option("--minimumNsForScaffoldGap", dest="minimumNsForScaffoldGap")
    parser.add_option("--assemblyEventString", dest="assemblyEventString")
    parser.add_option("--haplotype1EventString", dest="haplotype1EventString")
    parser.add_option("--haplotype2EventString", dest="haplotype2EventString")
    parser.add_option("--contaminationEventString", dest="contaminationEventString")
    parser.add_option("--featureBedFiles", dest="featureBedFiles")
    parser.add_option("--geneBedFiles", dest="geneBedFiles")
    
    Stack.addJobTreeOptions(parser)

    options, args = parser.parse_args()
    setLoggingFromOptions(options)

    if len(args) != 0:
        raise RuntimeError("Unrecognised input arguments: %s" % " ".join(args))

    Stack(MakeAlignments(newickTree=options.newickTree, 
                         haplotypeSequences=options.haplotypeSequences.split(), 
                         assembliesDir=options.assembliesDir, 
                         outputDir=options.outputDir, 
                         configFile=options.configFile, 
                         options=options)).startJobTree(options)
    logger.info("Done with job tree")
开发者ID:benedictpaten,项目名称:assemblaScripts,代码行数:35,代码来源:pipeline.py

示例12: run

 def run(self):
     logger.info("Preparing sequence for preprocessing")
     # chunk it up
     inChunkDirectory = makeSubDir(os.path.join(self.getGlobalTempDir(), "preprocessChunksIn"))
     inChunkList = [
         chunk
         for chunk in popenCatch(
             "cactus_blast_chunkSequences %s %i 0 %s %s"
             % (getLogLevelString(), self.prepOptions.chunkSize, inChunkDirectory, self.inSequencePath)
         ).split("\n")
         if chunk != ""
     ]
     outChunkDirectory = makeSubDir(os.path.join(self.getGlobalTempDir(), "preprocessChunksOut"))
     outChunkList = []
     # For each input chunk we create an output chunk, it is the output chunks that get concatenated together.
     for i in xrange(len(inChunkList)):
         outChunkList.append(os.path.join(outChunkDirectory, "chunk_%i" % i))
         # Calculate the number of chunks to use
         inChunkNumber = int(max(1, math.ceil(len(inChunkList) * self.prepOptions.proportionToSample)))
         assert inChunkNumber <= len(inChunkList) and inChunkNumber > 0
         # Now get the list of chunks flanking and including the current chunk
         j = max(0, i - inChunkNumber / 2)
         inChunks = inChunkList[j : j + inChunkNumber]
         if len(inChunks) < inChunkNumber:  # This logic is like making the list circular
             inChunks += inChunkList[: inChunkNumber - len(inChunks)]
         assert len(inChunks) == inChunkNumber
         self.addChildTarget(
             PreprocessChunk(
                 self.prepOptions, inChunks, float(inChunkNumber) / len(inChunkList), inChunkList[i], outChunkList[i]
             )
         )
     # follow on to merge chunks
     self.setFollowOnTarget(MergeChunks(self.prepOptions, outChunkList, self.outSequencePath))
开发者ID:benedictpaten,项目名称:cactus,代码行数:33,代码来源:cactus_preprocessor.py

示例13: progressiveWithSubtreeRootFunction

    def progressiveWithSubtreeRootFunction(self, experimentFile, toilDir,
                                           batchSystem, buildAvgs,
                                           buildReference,
                                           buildHal,
                                           buildFasta,
                                           toilStats):
        """Choose an arbitrary subtree from the larger species tree to run the
        alignment on. This function is necessary to keep
        runWorkflow_multipleExamples general (specifying a subtree
        root doesn't make sense for runCactusWorkflow).
        """
        # Get valid internal nodes that are the root of the subtree we
        # want to align
        expWrapper = ExperimentWrapper(ET.parse(experimentFile).getroot())
        tree = expWrapper.getTree()
        validNodes = []
        for node in tree.postOrderTraversal():
            if tree.hasName(node) and not tree.isLeaf(node):
                validNodes.append(tree.getName(node))

        # Choose a random valid subtree root (NB: the entire species
        # tree is a valid subtree)
        subtreeRoot = random.choice(validNodes)
        logger.info("Chose subtree root %s to test from species tree "
                    "%s" % (subtreeRoot, NXNewick().writeString(tree)))

        self.progressiveFunction(experimentFile, toilDir,
                                 batchSystem, buildAvgs,
                                 buildReference,
                                 buildHal,
                                 buildFasta,
                                 toilStats, subtreeRoot)
开发者ID:benedictpaten,项目名称:cactus,代码行数:32,代码来源:cactus_progressiveTest.py

示例14: run

    def run(self):
        # filter by size
        starttime = time.time()
        opts = self.opts
        clones = pickle.load(gzip.open(self.samplefile, 'rb'))
        if (opts.mincount > 1 or opts.maxcount > 0 or opts.minfreq > 0 or
            opts.maxfreq > 0):
            clones = filter_by_size(clones, opts.mincount, opts.maxcount,
                                    opts.minfreq, opts.maxfreq)
        msg = ("Filter_by_size for file %s done in %.4f s" %
                                 (self.samplefile, time.time() - starttime))
        logger.info(msg)
        starttime = time.time()

        # filter by status
        pclones = filter_by_status(clones, True)
        npclones = filter_by_status(clones, False)
        
        filename = os.path.basename(self.samplefile)
        if pclones:
            pdir = os.path.join(self.outdir, "productive", self.name)
            system("mkdir -p %s" % pdir)
            pfile = os.path.join(pdir, filename)
            pickle.dump(pclones, gzip.open(pfile, "wb"))
        if npclones:    
            npdir = os.path.join(self.outdir, "non_productive", self.name)
            system("mkdir -p %s" % npdir)
            npfile = os.path.join(npdir, filename)
            pickle.dump(npclones, gzip.open(npfile, "wb"))
        msg = ("Filter_by_status for file %s done in %.4f s" %
                                 (self.samplefile, time.time() - starttime))
        logger.info(msg)
        self.setFollowOnTarget(libcommon.CleanupFile(self.samplefile))
开发者ID:ngannguyen,项目名称:aimseqtk,代码行数:33,代码来源:sample.py

示例15: issueJobs

 def issueJobs(self, jobCommands):
     """Issues parasol with job commands.
     """
     issuedJobs = {}
     for jobCommand, memory, cpu, logFile in jobCommands:
         assert memory != None
         assert cpu != None
         assert logFile != None
         pattern = re.compile("your job ([0-9]+).*")
         command = "parasol -verbose -ram=%i -cpu=%i -results=%s add job '%s'" % (memory, cpu, self.parasolResultsFile, jobCommand)
         while True:
             #time.sleep(0.1) #Sleep to let parasol catch up #Apparently unnecessary
             popenParasolCommand(command, self.scratchFile)
             fileHandle = open(self.scratchFile, 'r')
             line = fileHandle.readline()
             fileHandle.close()
             match = pattern.match(line)
             if match != None: #This is because parasol add job will return success, even if the job was not properly issued!
                 break
             else:
                 logger.info("We failed to properly add the job, we will try again after a sleep")
                 time.sleep(5)
         jobID = int(match.group(1))
         logger.debug("Got the job id: %s from line: %s" % (jobID, line))
         assert jobID not in issuedJobs.keys()
         issuedJobs[jobID] = jobCommand
         logger.debug("Issued the job command: %s with job id: %i " % (command, jobID))
     return issuedJobs
开发者ID:decarlin,项目名称:jobTree,代码行数:28,代码来源:parasol.py


注:本文中的sonLib.bioio.logger.info函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。