当前位置: 首页>>代码示例>>Python>>正文


Python Pipeline类代码示例

本文整理汇总了Python中Pipeline的典型用法代码示例。如果您正苦于以下问题:Python Pipeline类的具体用法?Python Pipeline怎么用?Python Pipeline使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了Pipeline类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: process_pxd

 def process_pxd(self, source_desc, scope, module_name):
     import Pipeline
     if isinstance(source_desc, FileSourceDescriptor) and source_desc._file_type == 'pyx':
         source = CompilationSource(source_desc, module_name, os.getcwd())
         result_sink = create_default_resultobj(source, self.options)
         pipeline = Pipeline.create_pyx_as_pxd_pipeline(self, result_sink)
         result = Pipeline.run_pipeline(pipeline, source)
     else:
         pipeline = Pipeline.create_pxd_pipeline(self, scope, module_name)
         result = Pipeline.run_pipeline(pipeline, source_desc)
     return result
开发者ID:dtcaciuc,项目名称:cython,代码行数:11,代码来源:Main.py

示例2: get_tree

    def get_tree(self, entries_only=False, cython_scope=None):
        from AnalysedTreeTransforms import AutoTestDictTransform
        # The AutoTestDictTransform creates the statement "__test__ = {}",
        # which when copied into the main ModuleNode overwrites
        # any __test__ in user code; not desired
        excludes = [AutoTestDictTransform]

        import Pipeline, ParseTreeTransforms
        context = CythonUtilityCodeContext(self.name)
        context.prefix = self.prefix
        context.cython_scope = cython_scope
        #context = StringParseContext(self.name)
        tree = parse_from_strings(self.name, self.impl, context=context,
                                  allow_struct_enum_decorator=True)
        pipeline = Pipeline.create_pipeline(context, 'pyx', exclude_classes=excludes)

        if entries_only:
            p = []
            for t in pipeline:
                p.append(t)
                if isinstance(p, ParseTreeTransforms.AnalyseDeclarationsTransform):
                    break

            pipeline = p

        transform = ParseTreeTransforms.CnameDirectivesTransform(context)
        # InterpretCompilerDirectives already does a cdef declarator check
        #before = ParseTreeTransforms.DecoratorTransform
        before = ParseTreeTransforms.InterpretCompilerDirectives
        pipeline = Pipeline.insert_into_pipeline(pipeline, transform,
                                                 before=before)

        if self.from_scope:
            def scope_transform(module_node):
                module_node.scope.merge_in(self.from_scope)
                return module_node

            transform = ParseTreeTransforms.AnalyseDeclarationsTransform
            pipeline = Pipeline.insert_into_pipeline(pipeline, scope_transform,
                                                     before=transform)

        (err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False)
        assert not err, err
        return tree
开发者ID:MarkLodato,项目名称:cython,代码行数:44,代码来源:UtilityCode.py

示例3: run_pipeline

def run_pipeline(source, options, full_module_name=None, context=None):
    import Pipeline

    source_ext = os.path.splitext(source)[1]
    options.configure_language_defaults(source_ext[1:]) # py/pyx
    if context is None:
        context = options.create_context()

    # Set up source object
    cwd = os.getcwd()
    abs_path = os.path.abspath(source)
    full_module_name = full_module_name or context.extract_module_name(source, options)

    if options.relative_path_in_code_position_comments:
        rel_path = full_module_name.replace('.', os.sep) + source_ext
        if not abs_path.endswith(rel_path):
            rel_path = source # safety measure to prevent printing incorrect paths
    else:
        rel_path = abs_path
    source_desc = FileSourceDescriptor(abs_path, rel_path)
    source = CompilationSource(source_desc, full_module_name, cwd)

    # Set up result object
    result = create_default_resultobj(source, options)

    if options.annotate is None:
        # By default, decide based on whether an html file already exists.
        html_filename = os.path.splitext(result.c_file)[0] + ".html"
        if os.path.exists(html_filename):
            line = codecs.open(html_filename, "r", encoding="UTF-8").readline()
            if line.startswith(u'<!-- Generated by Cython'):
                options.annotate = True

    # Get pipeline
    if source_ext.lower() == '.py' or not source_ext:
        pipeline = Pipeline.create_py_pipeline(context, options, result)
    else:
        pipeline = Pipeline.create_pyx_pipeline(context, options, result)

    context.setup_errors(options, result)
    err, enddata = Pipeline.run_pipeline(pipeline, source)
    context.teardown_errors(err, options, result)
    return result
开发者ID:87,项目名称:cython,代码行数:43,代码来源:Main.py

示例4: filterAlignments

def filterAlignments(infile, outfile):
    '''
    filter alignments to retain only those that
    have > 99% identity to the reference
    '''
    to_cluster = True
    statement = '''delta-filter -q -i 99 %(infile)s > %(outfile)s'''
    P.run()
开发者ID:BioinformaticsArchive,项目名称:cgat,代码行数:8,代码来源:pipeline_metagenomebenchmark.py

示例5: buildAlignmentCoordinates

def buildAlignmentCoordinates(infile, outfile):
    '''
    build coordinates file from alignment delta
    file
    '''
    to_cluster = True
    statement = '''show-coords -T -r %(infile)s > %(outfile)s'''
    P.run()
开发者ID:BioinformaticsArchive,项目名称:cgat,代码行数:8,代码来源:pipeline_metagenomebenchmark.py

示例6: downloadSCOP

def downloadSCOP( infile, outfile ):
    '''download the latest scop sequence set (< 40% identical)'''
    
    statement = '''
    wget -O %(outfile)s "http://astral.berkeley.edu/seq.cgi?get=scopdom-seqres-gd-sel-gs-bib;ver=1.75;item=seqs;cut=40"
    '''
    
    P.run()
开发者ID:AndreasHeger,项目名称:adda,代码行数:8,代码来源:pairsdb.py

示例7: buildMask

def buildMask( infile, outfile ):
    '''build seg mask for protein sequences.'''

    to_cluster = True

    statement = '''
    segmasker -in %(infile)s
              -infmt fasta 
              -parse_seqids 
              -outfmt maskinfo_asn1_bin 
              -out %(outfile)s
    >& %(outfile)s.log
    '''
    P.run()
开发者ID:AndreasHeger,项目名称:adda,代码行数:14,代码来源:pairsdb.py

示例8: createAlignmentBedFiles

def createAlignmentBedFiles(infile, outfile):
    '''
    create bed files - the intervals are with respect to the 
    reference genome
    intervals are merged to form a non redundant alignment set
    '''
    # has to be output from show coords in tab format
    # also have to be sorted for mergeBed
    to_cluster = True
    statement = '''cat %(infile)s
                   | python %(scriptsdir)s/nucmer2bed.py -t bed4 --log=%(outfile)s.log 
                   | mergeBed -i - 
                   | gzip > %(outfile)s'''
    P.run()
开发者ID:BioinformaticsArchive,项目名称:cgat,代码行数:14,代码来源:pipeline_metagenomebenchmark.py

示例9: alignContigsToReference

def alignContigsToReference(outfile, param1, param2):
    '''
    align the contigs to the reference genomes
    using nucmer
    '''
    to_cluster = True

    reffile, contigfile = param1, param2
    pattern = P.snip(os.path.basename(outfile), ".delta")
    statement = '''nucmer -p %(pattern)s %(reffile)s %(contigfile)s'''
    P.run()
    outf = os.path.basename(outfile)
    statement = '''mv %(outf)s alignment.dir'''
    P.run()
开发者ID:BioinformaticsArchive,项目名称:cgat,代码行数:14,代码来源:pipeline_metagenomebenchmark.py

示例10: splitFasta

def splitFasta( infiles, outfiles):
    '''split fasta file.'''
    
    infile = infiles[0]
    chunk_size = 500
    statement = '''
    cat %(infile)s
    | perl /ifs/devel/andreas/cgat/split_fasta.pl 
       -a blast.dir/chunk_%%s.fasta
       %(chunk_size)i 
    > split.log
    '''
    
    P.run()
开发者ID:AndreasHeger,项目名称:adda,代码行数:14,代码来源:pairsdb.py

示例11: downloadPFAM

def downloadPFAM( infile, outfiles ):
    '''download the latest PFAM domain sequence set'''
    
    outfile1, outfile2 = outfiles
    statement = '''
    wget -O %(outfile1)s "ftp://ftp.sanger.ac.uk/pub/databases/Pfam/current_release/Pfam-A.fasta.gz";
    '''

    P.run()

    statement = '''
    wget -O %(outfile2)s "ftp://ftp.sanger.ac.uk/pub/databases/Pfam/current_release/Pfam-A.seed.gz";
    '''

    P.run()
开发者ID:AndreasHeger,项目名称:adda,代码行数:15,代码来源:pairsdb.py

示例12: prepareDatabase

def prepareDatabase( infiles, outfile ):
    '''prepare the blast database.'''

    fastafile, maskfile = infiles
    to_cluster = True
    statement = '''
    makeblastdb 
            -in %(fastafile)s
            -dbtype prot 
            -parse_seqids
            -mask_data %(maskfile)s
            -out nrdb50
            -title "Uniref Protein Database"
    >& %(outfile)s
    '''
    P.run()
开发者ID:AndreasHeger,项目名称:adda,代码行数:16,代码来源:pairsdb.py

示例13: loadCoverageData

def loadCoverageData(infile, outfile):
    '''
    load coverage data into database
    '''
    to_cluster = True
    tablename = P.toTable(outfile)
    database = os.path.join(PARAMS["results_resultsdir"], PARAMS["database"])
    dbh = sqlite3.connect(database)
    cc = dbh.cursor()
    temp = P.getTempFile()
    temp.write("contig_id\tacoverage\n")
    for data in cc.execute("""SELECT contig_id, AVG(coverage) FROM %s GROUP BY contig_id""" % tablename).fetchall():
        temp.write("\t".join(list(data)) + "\n")
    temp.close()
    P.load(temp.name, outfile)
    os.unlink(temp.name)
开发者ID:BioinformaticsArchive,项目名称:cgat,代码行数:16,代码来源:pipeline_metagenomebenchmark.py

示例14: removeBlastUnfinished

def removeBlastUnfinished( infiles, outfile ):
    '''remove aborted blast runs.'''

    deleted = 0

    for infile in infiles:
        line = IOTools.getLastLine( infile )
        
        if not re.search( "job finished", line ):
            fn = infile[:-len(".log")]
            if os.path.exists( fn ):
                P.info("deleting %s" % fn )
                os.unlink( fn )
                deleted += 1

    P.info("deleted %i files" % deleted)
开发者ID:AndreasHeger,项目名称:adda,代码行数:16,代码来源:pairsdb.py

示例15: checkBlastRuns

def checkBlastRuns( infiles, outfile ):
    '''check if output files are complete.
    '''
    
    outf = IOTools.openFile( outfile, "w" )

    outf.write( "chunkid\tquery_first\tquery_last\tfound_first\tfound_last\tfound_total\tfound_results\thas_finished\tattempts\t%s\n" %\
                    "\t".join(Logfile.RuntimeInformation._fields))

    for infile in infiles:
        E.debug( "processing %s" % infile)
        chunkid = P.snip( os.path.basename( infile ), ".blast.gz" )
        logfile = infile + ".log"
        chunkfile = P.snip( infile, ".blast.gz" ) + ".fasta"

        with IOTools.openFile( infile ) as inf:
            l = inf.readline()
            ids = set()
            total_results = 0
            for l in inf:
                if l.startswith("#//"): continue
                ids.add( int(l.split("\t")[0] ) )
                total_results += 1
            found_first = min(ids)
            found_last = max(ids)
            found_total = len(ids)

        l = IOTools.getFirstLine( chunkfile )
        query_first = l[1:-1]
        l2 = IOTools.getLastLine( chunkfile, nlines = 2).split("\n")
        query_last = l2[0][1:]

        logresults = Logfile.parse( logfile )
        
        outf.write( "\t".join( map(str, (\
                        chunkid, query_first, query_last,
                        found_first, found_last,
                        found_total, total_results,
                        logresults[-1].has_finished,
                        len(logresults),
                        "\t".join( map(str, logresults[-1]) ) ) ) ) + "\n" )
        
    outf.close()
开发者ID:AndreasHeger,项目名称:adda,代码行数:43,代码来源:pairsdb.py


注:本文中的Pipeline类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。