当前位置: 首页>>代码示例>>Python>>正文


Python FileList.extend方法代码示例

本文整理汇总了Python中pycbc.workflow.core.FileList.extend方法的典型用法代码示例。如果您正苦于以下问题:Python FileList.extend方法的具体用法?Python FileList.extend怎么用?Python FileList.extend使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pycbc.workflow.core.FileList的用法示例。


在下文中一共展示了FileList.extend方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_gating_node

# 需要导入模块: from pycbc.workflow.core import FileList [as 别名]
# 或者: from pycbc.workflow.core.FileList import extend [as 别名]
def make_gating_node(workflow, datafind_files, outdir=None, tags=None):
    '''
    Generate jobs for autogating the data for PyGRB runs.

    Parameters
    ----------
    workflow: pycbc.workflow.core.Workflow
        An instanced class that manages the constructed workflow.
    datafind_files : pycbc.workflow.core.FileList
        A FileList containing the frame files to be gated.
    outdir : string
        Path of the output directory
    tags : list of strings
        If given these tags are used to uniquely name and identify output files
        that would be produced in multiple calls to this function.

    Returns
    --------
    condition_strain_nodes : list
        List containing the pycbc.workflow.core.Node objects representing the
        autogating jobs.
    condition_strain_outs : pycbc.workflow.core.FileList
        FileList containing the pycbc.workflow.core.File objects representing
        the gated frame files.
    '''

    cp = workflow.cp
    if tags is None:
        tags = []
    
    condition_strain_class = select_generic_executable(workflow,
                                                       "condition_strain")
    condition_strain_nodes = []
    condition_strain_outs = FileList([])
    for ifo in workflow.ifos:
        input_files = FileList([datafind_file for datafind_file in \
                                datafind_files if datafind_file.ifo == ifo])
        condition_strain_jobs = condition_strain_class(cp, "condition_strain",
                ifo=ifo, out_dir=outdir, tags=tags)
        condition_strain_node, condition_strain_out = \
                condition_strain_jobs.create_node(input_files, tags=tags)
        condition_strain_nodes.append(condition_strain_node)
        condition_strain_outs.extend(FileList([condition_strain_out]))

    return condition_strain_nodes, condition_strain_outs
开发者ID:bema-ligo,项目名称:pycbc,代码行数:47,代码来源:grb_utils.py

示例2: setup_minifollowups

# 需要导入模块: from pycbc.workflow.core import FileList [as 别名]
# 或者: from pycbc.workflow.core.FileList import extend [as 别名]
def setup_minifollowups(workflow, out_dir, frame_files,
                             coinc_file, tmpltbank_file, data_type, tags=None):
    ''' This performs a series of followup jobs on the num_events-th loudest
    events.
    '''

    logging.info('Entering minifollowups module')

    if tags == None: tags = []

    # create a FileList that will contain all output files
    output_filelist = FileList([])

    # check if minifollowups section exists
    # if not then do not do add minifollowup jobs to the workflow
    if not workflow.cp.has_section('workflow-minifollowups'):
      logging.info('There is no [workflow-minifollowups] section in configuration file')
      logging.info('Leaving minifollowups')
      return output_filelist

    # loop over number of loudest events to be followed up
    num_events = int(workflow.cp.get_opt_tags('workflow-minifollowups', 'num-events', ''))
    for num_event in range(num_events):

        # increment by 1 for human readability
        num_event += 1

        # get output directory for this event
        tag_str = '_'.join(tags)
        output_dir = out_dir['result/loudest_event_%d_of_%d_%s'%(num_event, num_events, tag_str)]

        # make a pycbc_mf_table node for this event
        table_exe = MinifollowupsTableExecutable(workflow.cp, 'mf_table',
                        workflow.ifo_string, output_dir, tags=tags)
        table_node = table_exe.create_node(workflow.analysis_time, coinc_file,
                        tmpltbank_file, data_type, num_event)
        workflow.add_node(table_node)
        output_filelist.extend(table_node.output_files)

    logging.info('Leaving minifollowups module')

    return output_filelist
开发者ID:alex-nielsen,项目名称:pycbc,代码行数:44,代码来源:minifollowups.py

示例3: get_coh_PTF_files

# 需要导入模块: from pycbc.workflow.core import FileList [as 别名]
# 或者: from pycbc.workflow.core.FileList import extend [as 别名]
def get_coh_PTF_files(cp, ifos, run_dir, bank_veto=False, summary_files=False):
    """
    Retrieve files needed to run coh_PTF jobs within a PyGRB workflow

    Parameters
    ----------
    cp : pycbc.workflow.configuration.WorkflowConfigParser object
    The parsed configuration options of a pycbc.workflow.core.Workflow.

    ifos : str
    String containing the analysis interferometer IDs.

    run_dir : str
    The run directory, destination for retrieved files.

    bank_veto : Boolean
    If true, will retrieve the bank_veto_bank.xml file.

    summary_files : Boolean
    If true, will retrieve the summary page style files.

    Returns
    -------
    file_list : pycbc.workflow.FileList object
    A FileList containing the retrieved files.
    """
    if os.getenv("LAL_SRC") is None:
        raise ValueError("The environment variable LAL_SRC must be set to a "
                         "location containing the file lalsuite.git")
    else:
        lalDir = os.getenv("LAL_SRC")
        sci_seg = segments.segment(int(cp.get("workflow", "start-time")),
                                   int(cp.get("workflow", "end-time")))
        file_list = FileList([])

        # Bank veto
        if bank_veto:
            shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
                        "bank_veto_bank.xml" % lalDir, "%s" % run_dir)
            bank_veto_url = "file://localhost%s/bank_veto_bank.xml" % run_dir
            bank_veto = File(ifos, "bank_veto_bank", sci_seg,
                             file_url=bank_veto_url)
            bank_veto.PFN(bank_veto.cache_entry.path, site="local")
            file_list.extend(FileList([bank_veto]))

        if summary_files:
            # summary.js file
            shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
                        "coh_PTF_html_summary.js" % lalDir, "%s" % run_dir)
            summary_js_url = "file://localhost%s/coh_PTF_html_summary.js" \
                             % run_dir
            summary_js = File(ifos, "coh_PTF_html_summary_js", sci_seg,
                              file_url=summary_js_url)
            summary_js.PFN(summary_js.cache_entry.path, site="local")
            file_list.extend(FileList([summary_js]))

            # summary.css file
            shutil.copy("%s/lalapps/src/ring/coh_PTF_config_files/" \
                        "coh_PTF_html_summary.css" % lalDir, "%s" % run_dir)
            summary_css_url = "file://localhost%s/coh_PTF_html_summary.css" \
                              % run_dir
            summary_css = File(ifos, "coh_PTF_html_summary_css", sci_seg,
                               file_url=summary_css_url)
            summary_css.PFN(summary_css.cache_entry.path, site="local")
            file_list.extend(FileList([summary_css]))

        return file_list
开发者ID:RorySmith,项目名称:pycbc,代码行数:69,代码来源:grb_utils.py

示例4: setup_postproc_coh_PTF_workflow

# 需要导入模块: from pycbc.workflow.core import FileList [as 别名]
# 或者: from pycbc.workflow.core.FileList import extend [as 别名]
def setup_postproc_coh_PTF_workflow(workflow, trig_files, trig_cache,
                                    inj_trig_files, inj_files, inj_trig_caches,
                                    inj_caches, config_file, output_dir,
                                    html_dir, segment_dir, ifos, inj_tags=[],
                                    tags=[]):
    """
    This module sets up the post-processing stage in the workflow, using a
    coh_PTF style set up. This consists of running trig_combiner to find
    coherent triggers, and injfinder to look for injections. It then runs
    a horizon_dist job, trig_cluster to cluster triggers, and injcombiner to
    calculate injection statistics. Finally, efficiency and sbv_plotter jobs
    calculate efficiency and signal based veto statistics and make plots.
    
    workflow : pycbc.workflow.core.Workflow
        The Workflow instance that the jobs will be added to.
    trig_files : pycbc.workflow.core.FileList
        A FileList containing the combined databases.
   
    Returns
    --------
    
    """
    cp = workflow.cp
    full_segment = trig_files[0].segment
    trig_name = cp.get("workflow", "trigger-name")
    grb_string = "GRB" + trig_name
    num_trials = int(cp.get("trig_combiner", "num-trials"))

    pp_outs = FileList([])
    pp_nodes = []

    # Set up needed exe classes
    trig_combiner_exe = os.path.basename(cp.get("executables",
                                                "trig_combiner"))
    trig_combiner_class = select_generic_executable(workflow, "trig_combiner")

    trig_cluster_exe = os.path.basename(cp.get("executables", "trig_cluster"))
    trig_cluster_class = select_generic_executable(workflow, "trig_cluster")


    sbv_plotter_exe = os.path.basename(cp.get("executables", "sbv_plotter"))
    sbv_plotter_class = select_generic_executable(workflow, "sbv_plotter")
    
    efficiency_exe = os.path.basename(cp.get("executables", "efficiency"))
    efficiency_class = select_generic_executable(workflow, "efficiency")
    """
    horizon_dist_exe = os.path.basename(cp.get("executables",
                                               "horizon_dist"))
    horizon_dist_class = select_generic_executable(workflow,
                                                   "horizon_dist")
    """
    html_summary_exe = os.path.basename(cp.get("executables", "html_summary"))
    html_summary_class = select_generic_executable(workflow, "html_summary")

    # Set up trig_combiner job
    trig_combiner_out_tags = ["OFFSOURCE", "ONSOURCE", "ALL_TIMES"]
    trig_combiner_jobs = trig_combiner_class(cp, "trig_combiner", ifo=ifos, 
                                             out_dir=output_dir, tags=tags)
    trig_combiner_node, trig_combiner_outs = trig_combiner_jobs.create_node(\
            trig_files, segment_dir, out_tags=trig_combiner_out_tags,
            tags=tags)
    pp_nodes.append(trig_combiner_node)
    workflow.add_node(trig_combiner_node)
    pp_outs.extend(trig_combiner_outs)

    # Initialise trig_cluster class
    trig_cluster_outs = FileList([])
    trig_cluster_jobs = trig_cluster_class(cp, "trig_cluster", ifo=ifos,
                                           out_dir=output_dir, tags=tags)

    # Set up injfinder jobs
    if cp.has_section("workflow-injections"):
        injfinder_nodes = []
        injcombiner_parent_nodes = []

        injfinder_exe = os.path.basename(cp.get("executables", "injfinder"))
        injfinder_class = select_generic_executable(workflow, "injfinder")
        injfinder_jobs = injfinder_class(cp, "injfinder", ifo=ifos,
                                         out_dir=output_dir, tags=tags)

        injcombiner_exe = os.path.basename(cp.get("executables",
                                                  "injcombiner"))
        injcombiner_class = select_generic_executable(workflow, "injcombiner")
        injcombiner_jobs = injcombiner_class(cp, "injcombiner", ifo=ifos,
                                             out_dir=output_dir, tags=tags)

        injfinder_outs = FileList([])
        for inj_tag in inj_tags:
            triggers = FileList([file for file in inj_trig_files \
                                 if inj_tag in file.tag_str])
            injections = FileList([file for file in inj_files \
                                   if inj_tag in file.tag_str])
            trig_cache = [file for file in inj_trig_caches \
                          if inj_tag in file.tag_str][0]
            inj_cache = [file for file in inj_caches \
                         if inj_tag in file.tag_str][0]
            injfinder_node, curr_outs = injfinder_jobs.create_node(\
                    triggers, injections, segment_dir, tags=[inj_tag])
            injfinder_nodes.append(injfinder_node)
            pp_nodes.append(injfinder_node)
#.........这里部分代码省略.........
开发者ID:alex-nielsen,项目名称:pycbc,代码行数:103,代码来源:postprocessing_cohptf.py

示例5: setup_coincidence_workflow_ligolw_thinca

# 需要导入模块: from pycbc.workflow.core import FileList [as 别名]
# 或者: from pycbc.workflow.core.FileList import extend [as 别名]

#.........这里部分代码省略.........
        
            # Determine segments to accept coincidences.
            # If cache is not the first or last in the timeseries, check if the
            # two closes caches in the timeseries and see if their extent
            # match. If they match, they're adjacent and use the time where
            # they meet as a bound for accepting coincidences. If they're not
            # adjacent, then there is no bound for accepting coincidences.
            coincStart, coincEnd = None, None
            if idx and (cafe_cache.extent[0] == cafe_caches[idx-1].extent[1]):
                coincStart = cafe_cache.extent[0]
            if idx + 1 - len(cafe_caches) and \
                        (cafe_cache.extent[1] == cafe_caches[idx+1].extent[0]):
                coincEnd = cafe_cache.extent[1]
            coincSegment = (coincStart, coincEnd)
        
            # Need to create a list of the File(s) contained in the cache.
            # Assume that if we have partitioned input then if *one* job in the
            # partitioned input is an input then *all* jobs will be.
            if not parallelize_split_input:
                inputTrigFiles = FileList([])
                for object in cafe_cache.objects:
                    inputTrigFiles.append(object.workflow_file)
        
                llw_files = inputTrigFiles + [dqSegFile] + [tisiOutFile]
        
                # Now we can create the nodes
                node = ligolwadd_job.create_node(cafe_cache.extent, llw_files)
                ligolwAddFile = node.output_files[0]
                ligolwAddOuts.append(ligolwAddFile)
                workflow.add_node(node)
                for category in veto_cats:
                    node = ligolwthinca_job[category].create_node(\
                                cafe_cache.extent, coincSegment, ligolwAddFile)
                    ligolwThincaOuts += \
                        node.output_files.find_output_without_tag('DIST_STATS')
                    ligolwThincaLikelihoodOuts += \
                           node.output_files.find_output_with_tag('DIST_STATS')
                    workflow.add_node(node)
                    if coinc_post_cluster:
                        node = cluster_job[category].create_node(\
                                       cafe_cache.extent, ligolwThincaOuts[-1])
                        ligolwClusterOuts += node.output_files
                        workflow.add_node(node)
            else:
                for key in inspiral_outs_dict.keys():
                    curr_tags = ["JOB%d" %(key)]
                    curr_list = inspiral_outs_dict[key]
                    inputTrigFiles = FileList([])
                    for object in cafe_cache.objects:
                        inputTrigFiles.append(
                                  curr_list[object.workflow_file.thinca_index])
        
                    llw_files = inputTrigFiles + [dqSegFile] + [tisiOutFile]

                    # Now we can create the nodes
                    node = ligolwadd_job.create_node(cafe_cache.extent,
                                                     llw_files, tags=curr_tags)
                    ligolwAddFile = node.output_files[0]
                    ligolwAddOuts.append(ligolwAddFile)
                    workflow.add_node(node)
                    if workflow.cp.has_option_tags("workflow-coincidence",
                          "coincidence-write-likelihood",curr_thinca_job_tags):
                        write_likelihood=True
                    else:
                        write_likelihood=False
                    for category in veto_cats:
                        node = ligolwthinca_job[category].create_node(\
                             cafe_cache.extent, coincSegment, ligolwAddFile,
                             tags=curr_tags, write_likelihood=write_likelihood)
                        ligolwThincaOuts += \
                               node.output_files.find_output_without_tag(\
                                                                  'DIST_STATS')
                        ligolwThincaLikelihoodOuts += \
                              node.output_files.find_output_with_tag(\
                                                                  'DIST_STATS')
                        workflow.add_node(node)
                        if coinc_post_cluster:
                            node = cluster_job[category].create_node(\
                                       cafe_cache.extent, ligolwThincaOuts[-1])
                            ligolwClusterOuts += node.output_files
                            workflow.add_node(node)

            other_returns = {}
            other_returns['LIGOLW_ADD'] = ligolwAddOuts
            other_returns['DIST_STATS'] = ligolwThincaLikelihoodOuts
        
            if coinc_post_cluster:
                main_return = ligolwClusterOuts
                other_returns['THINCA'] = ligolwThincaOuts
            else:
                main_return = ligolwThincaOuts
        
            logging.debug("Done")
            coinc_outs.extend(main_return)
            for key, file_list in other_returns.items():
                if other_outs.has_key(key):
                    other_outs[key].extend(other_returns[key])
                else:
                    other_outs[key] = other_returns[key]
    return coinc_outs, other_outs
开发者ID:alex-nielsen,项目名称:pycbc,代码行数:104,代码来源:coincidence.py

示例6: setup_postproc_coh_PTF_offline_workflow

# 需要导入模块: from pycbc.workflow.core import FileList [as 别名]
# 或者: from pycbc.workflow.core.FileList import extend [as 别名]

#.........这里部分代码省略.........
    # Set up needed exe classes
    trig_combiner_class = select_generic_executable(workflow, "trig_combiner")

    trig_cluster_class = select_generic_executable(workflow, "trig_cluster")

    sbv_plotter_class = select_generic_executable(workflow, "sbv_plotter")
    
    efficiency_class = select_generic_executable(workflow, "efficiency")

    #horizon_dist_class = select_generic_executable(workflow, "horizon_dist")

    html_summary_class = select_generic_executable(workflow, "html_summary")

    # Set up injection jobs if desired
    if do_injections:
        workflow, injfinder_nodes, injfinder_outs, fm_cache, \
                injcombiner_nodes, injcombiner_outs, injcombiner_out_tags, \
                inj_sbv_plotter_parent_nodes, pp_nodes, pp_outs = \
                setup_coh_PTF_injections_pp(workflow, inj_trig_files,
                        inj_files, inj_trig_caches, inj_caches, pp_nodes,
                        pp_outs, inj_tags, output_dir, segment_dir, ifos,
                        tags=tags)

        # Initialise injection_efficiency class
        inj_efficiency_jobs = efficiency_class(cp, "inj_efficiency", ifo=ifos,
                                               out_dir=output_dir, tags=tags)

    # Set up main trig_combiner class and tags
    trig_combiner_out_tags = ["OFFSOURCE", "ONSOURCE", "ALL_TIMES"]
    slides = all("COHERENT_NO_INJECTIONS" in t.name for t in trig_files) and \
            cp.has_option_tag("inspiral", "do-short-slides",
                              "coherent_no_injections")
    if slides:
        trig_combiner_out_tags.extend(["ZEROLAG_OFF", "ZEROLAG_ALL"])
    
    trig_combiner_jobs = trig_combiner_class(cp, "trig_combiner", ifo=ifos, 
                                             out_dir=output_dir, tags=tags)

    # Do first stage of trig_combiner and trig_cluster jobs if desired
    if workflow.cp.has_option("workflow-postproc", "do-two-stage-clustering"):
        logging.info("Doing two-stage clustering.")
        trig_combiner_s1_jobs = trig_combiner_class(cp, "trig_combiner",
                ifo=ifos, out_dir=output_dir, tags=tags+["INTERMEDIATE"])

        num_stage_one_jobs = int(workflow.cp.get("workflow-postproc",
            "num-stage-one-cluster-jobs"))
        num_inputs_per_job = -(-len(trig_files) // num_stage_one_jobs)
        split_trig_files = (trig_files[p:p + num_inputs_per_job] for p in \
                            xrange(0, len(trig_files), num_inputs_per_job))
        trig_cluster_s1_jobs = trig_cluster_class(cp, "trig_cluster", ifo=ifos,
                out_dir=output_dir, tags=tags+["INTERMEDIATE"])
        trig_cluster_s1_nodes = []
        trig_cluster_s1_outs = FileList([])
        for j, s1_inputs in zip(range(num_stage_one_jobs), split_trig_files):
            trig_combiner_s1_node, trig_combiner_s1_outs = \
                    trig_combiner_s1_jobs.create_node(s1_inputs,
                            segment_dir, workflow.analysis_time,
                            out_tags=trig_combiner_out_tags, tags=tags+[str(j)])
            pp_nodes.append(trig_combiner_s1_node)
            workflow.add_node(trig_combiner_s1_node)

            unclust_file = [f for f in trig_combiner_s1_outs \
                            if "ALL_TIMES" in f.tag_str][0]
            trig_cluster_s1_node, curr_outs = trig_cluster_s1_jobs.create_node(\
                    unclust_file)
            trig_cluster_s1_outs.extend(curr_outs)
开发者ID:ligo-cbc,项目名称:pycbc-pylal,代码行数:70,代码来源:pygrb_cohptf_pp.py

示例7: setup_coh_PTF_injections_pp

# 需要导入模块: from pycbc.workflow.core import FileList [as 别名]
# 或者: from pycbc.workflow.core.FileList import extend [as 别名]
def setup_coh_PTF_injections_pp(wf, inj_trigger_files, inj_files,
                                inj_trigger_caches, inj_caches,
                                pp_nodes, pp_outs, inj_tags, out_dir, seg_dir,
                                ifos, tags=None):
    """
    Set up post processing for injections
    """
    injfinder_nodes = []
    injcombiner_parent_nodes = []
    inj_sbv_plotter_parent_nodes = []
    full_segment = inj_trigger_files[0].segment

    injfinder_exe = os.path.basename(wf.cp.get("executables", "injfinder"))
    injfinder_class = select_generic_executable(wf, "injfinder")
    injfinder_jobs = injfinder_class(wf.cp, "injfinder", ifo=ifos,
                                     out_dir=out_dir, tags=tags)

    injcombiner_exe = os.path.basename(wf.cp.get("executables", "injcombiner"))
    injcombiner_class = select_generic_executable(wf, "injcombiner")
    injcombiner_jobs = injcombiner_class(wf.cp, "injcombiner", ifo=ifos,
                                         out_dir=out_dir, tags=tags)

    injfinder_outs = FileList([])
    for inj_tag in inj_tags:
        triggers = FileList([file for file in inj_trigger_files \
                             if inj_tag in file.tag_str])
        injections = FileList([file for file in inj_files \
                               if inj_tag in file.tag_str])
        trig_cache = [file for file in inj_trigger_caches \
                      if inj_tag in file.tag_str][0]
        inj_cache = [file for file in inj_caches \
                     if inj_tag in file.tag_str][0]
        injfinder_node, curr_outs = injfinder_jobs.create_node(\
                triggers, injections, seg_dir, tags=[inj_tag])
        injfinder_nodes.append(injfinder_node)
        pp_nodes.append(injfinder_node)
        wf.add_node(injfinder_node)
        injfinder_outs.extend(curr_outs)
        if "DETECTION" not in curr_outs[0].tagged_description:
            injcombiner_parent_nodes.append(injfinder_node)
        else:
            inj_sbv_plotter_parent_nodes.append(injfinder_node)

    pp_outs.extend(injfinder_outs)

    # Make injfinder output cache
    fm_cache = File(ifos, "foundmissed", full_segment,
                    extension="lcf", directory=out_dir)
    fm_cache.PFN(fm_cache.cache_entry.path, site="local")
    injfinder_outs.convert_to_lal_cache().tofile(\
            open(fm_cache.storage_path, "w"))
    pp_outs.extend(FileList([fm_cache]))

    # Set up injcombiner jobs
    injcombiner_outs = FileList([f for f in injfinder_outs \
                                 if "DETECTION" in f.tag_str])
    injcombiner_tags = [inj_tag for inj_tag in inj_tags \
                        if "DETECTION" not in inj_tag]
    injcombiner_out_tags = [i.tag_str.rsplit('_', 1)[0] for i in \
                            injcombiner_outs if "FOUND" in i.tag_str]
    injcombiner_nodes = []

    for injcombiner_tag in injcombiner_tags:
        max_inc = wf.cp.get_opt_tags("injections", "max-inc",
                                     [injcombiner_tag])
        inj_str = injcombiner_tag.replace("INJ", "")
        inputs = FileList([f for f in injfinder_outs \
                           if injcombiner_tag in f.tagged_description])
        injcombiner_node, curr_outs = injcombiner_jobs.create_node(\
                fm_cache, inputs, inj_str, max_inc, wf.analysis_time)
        injcombiner_nodes.append(injcombiner_node)
        injcombiner_out_tags.append("%s_FILTERED_%s"
                                    % (inj_str.split(max_inc)[0], max_inc))
        injcombiner_outs.extend(curr_outs)
        pp_outs.extend(curr_outs)
        pp_nodes.append(injcombiner_node)
        wf.add_node(injcombiner_node)
        for parent_node in injcombiner_parent_nodes:
            dep = dax.Dependency(parent=parent_node._dax_node,
                                 child=injcombiner_node._dax_node)
            wf._adag.addDependency(dep)

    return (wf, injfinder_nodes, injfinder_outs, fm_cache, injcombiner_nodes,
            injcombiner_outs, injcombiner_out_tags,
            inj_sbv_plotter_parent_nodes, pp_nodes, pp_outs)
开发者ID:ligo-cbc,项目名称:pycbc-pylal,代码行数:87,代码来源:pygrb_cohptf_pp.py


注:本文中的pycbc.workflow.core.FileList.extend方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。