当前位置: 首页>>代码示例>>Python>>正文


Python data_map.DataMap类代码示例

本文整理汇总了Python中lofarpipe.support.data_map.DataMap的典型用法代码示例。如果您正苦于以下问题:Python DataMap类的具体用法?Python DataMap怎么用?Python DataMap使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了DataMap类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: plugin_main

def plugin_main(args, **kwargs):
    """
    Matchs the hosts in one datamap with those in another

    Parameters
    ----------
    mapfile_in : str, optional
        Filename of datamap to adjust
    mapfile_to_match : str, optional
        Filename of datamap to match

    """
    mapfile_in = kwargs['mapfile_in']
    mapfile_to_match = kwargs['mapfile_to_match']

    map_in = DataMap.load(mapfile_in)
    map_in.iterator = DataMap.SkipIterator
    map_to_match = DataMap.load(mapfile_to_match)
    map_to_match.iterator = DataMap.SkipIterator

    hosts_to_match = []
    for item in map_to_match:
        hosts_to_match.append(item.host)

    for item, host in zip(map_in, hosts_to_match):
        item.host = host

    map_in.save(mapfile_in)
开发者ID:lofar-astron,项目名称:factor,代码行数:28,代码来源:PipelineStep_matchHosts.py

示例2: go

    def go(self):
        super(imager_create_dbs, self).go()

        # get assoc_theta, convert from empty string if needed 
        assoc_theta = self.inputs["assoc_theta"]
        if assoc_theta == "":
            assoc_theta = None

        # Load mapfile data from files
        self.logger.info(self.inputs["slice_paths_mapfile"])
        slice_paths_map = MultiDataMap.load(self.inputs["slice_paths_mapfile"])
        input_map = DataMap.load(self.inputs['args'][0])
        source_list_map = DataMap.load(self.inputs['source_list_map_path'])

        if self._validate_input_data(input_map, slice_paths_map):
            return 1

        # Run the nodes with now collected inputs
        jobs, output_map = self._run_create_dbs_node(
                 input_map, slice_paths_map, assoc_theta,
                 source_list_map)

        # Collect the output of the node scripts write to (map) files
        return self._collect_and_assign_outputs(jobs, output_map,
                                    slice_paths_map)
开发者ID:jjdmol,项目名称:LOFAR,代码行数:25,代码来源:imager_create_dbs.py

示例3: _load_mapfiles

    def _load_mapfiles(self):
        """
        Load data map file, instrument map file, and sky map file.
        Update the 'skip' fields in these map files: if 'skip' is True in any
        of the maps, then 'skip' must be set to True in all maps.
        """
        self.logger.debug("Loading map files:"
            "\n\tdata map: %s\n\tinstrument map: %s\n\tsky map: %s" % (
                self.inputs['args'][0], 
                self.inputs['instrument_mapfile'],
                self.inputs['sky_mapfile']
            )
        )
        self.data_map = DataMap.load(self.inputs['args'][0])
        self.inst_map = DataMap.load(self.inputs['instrument_mapfile'])
        self.sky_map = DataMap.load(self.inputs['sky_mapfile'])

        if not validate_data_maps(self.data_map, self.inst_map, self.sky_map):
            self.logger.error("Validation of input data mapfiles failed")
            return False

        # Update the skip fields of the three maps. If 'skip' is True in any of
        # these maps, then 'skip' must be set to True in all maps.
        for x, y, z in zip(self.data_map, self.inst_map, self.sky_map):
            x.skip = y.skip = z.skip = (x.skip or y.skip or z.skip)
        
        return True
开发者ID:saiyanprince,项目名称:pyimager,代码行数:27,代码来源:bbs_reducer.py

示例4: _get_io_product_specs

 def _get_io_product_specs(self):
     """
     Get input- and output-data product specifications from the
     parset-file, and do some sanity checks.
     """
     dps = self.parset.makeSubset(
         self.parset.fullModuleName('DataProducts') + '.'
     )
     self.input_data = DataMap([
         tuple(os.path.join(location, filename).split(':')) + (skip,)
             for location, filename, skip in zip(
                 dps.getStringVector('Input_Correlated.locations'),
                 dps.getStringVector('Input_Correlated.filenames'),
                 dps.getBoolVector('Input_Correlated.skip'))
     ])
     self.logger.debug("%d Input_Correlated data products specified" %
                       len(self.input_data))
     self.output_data = DataMap([
         tuple(os.path.join(location, filename).split(':')) + (skip,)
             for location, filename, skip in zip(
                 dps.getStringVector('Output_Correlated.locations'),
                 dps.getStringVector('Output_Correlated.filenames'),
                 dps.getBoolVector('Output_Correlated.skip'))
     ])
     self.logger.debug("%d Output_Correlated data products specified" %
                       len(self.output_data))
     # Sanity checks on input- and output data product specifications
     if not validate_data_maps(self.input_data, self.output_data):
         raise PipelineException(
             "Validation of input/output data product specification failed!"
         )
开发者ID:saiyanprince,项目名称:pyimager,代码行数:31,代码来源:preprocessing_pipeline.py

示例5: plugin_main

def plugin_main(args, **kwargs):
    """
    Prunes entries from a mapfile

    Parameters
    ----------
    mapfile_in : str
        Filename of datamap to trim
    prune_str : str
        Entries starting with this string will be removed.

    Returns
    -------
    result : dict
        New datamap filename

    """
    mapfile_in = kwargs['mapfile_in']
    prune_str = kwargs['prune_str'].lower()
    mapfile_dir = kwargs['mapfile_dir']
    filename = kwargs['filename']
    prunelen = len(prune_str)

    map_out = DataMap([])
    map_in = DataMap.load(mapfile_in)

    for i, item in enumerate(map_in):
        if item.file[:prunelen].lower() != prune_str:
            map_out.data.append(DataProduct(item.host, item.file, item.skip))

    fileid = os.path.join(mapfile_dir, filename)
    map_out.save(fileid)
    result = {'mapfile': fileid}

    return result
开发者ID:AHorneffer,项目名称:prefactor,代码行数:35,代码来源:PipelineStep_pruneMapfile.py

示例6: plugin_main

def plugin_main(args, **kwargs):
    """
    Copies each entry of mapfile_in as often as the the length of the corresponding 
    group into a new mapfile

    Parameters
    ----------
    mapfile_in : str
        Name of the input mapfile to be expanded. (E.g. with the skymodels for the 
        different groups.)
    mapfile_groups : str
        Name of the multi-mapfile with the given groups. Number of groups need
        to be the same as the number of files in mapfile_in. 
    mapfile_dir : str
        Directory for output mapfile
    filename: str
        Name of output mapfile
    ignore_dummies: str (optional)
        If true, do not count dummy entries when expanding

    Returns
    -------
    result : dict
        Output datamap filename

    """
    mapfile_dir = kwargs['mapfile_dir']
    filename = kwargs['filename']

    try:
        ignore_dummies = str(kwargs['ignore_dummies'])	# if the user has defined a dummy preference, follow it, otherwise count dummies as usual
        ignore_dummies = ignore_dummies in ['true', 'True', '1', 'T', 't']
    except:
        ignore_dummies = False

    inmap = DataMap.load(kwargs['mapfile_in'])
    groupmap = MultiDataMap.load(kwargs['mapfile_groups'])

    if len(inmap) != len(groupmap):
        raise ValueError('PipelineStep_mapfileSingleToGroup: length of {0} and {1} differ'.format(kwargs['mapfile_in'],kwargs['mapfile_groups']))

    map_out = DataMap([])
    inindex = 0

    if ignore_dummies:
        for groupID in xrange(len(groupmap)):
            for fileID in xrange(len(groupmap[groupID].file)):
                if (groupmap[groupID].file)[fileID] != 'dummy_entry':
                        map_out.data.append(DataProduct(inmap[groupID].host, inmap[groupID].file, (inmap[groupID].skip or groupmap[groupID].skip) ))
    else:
        for groupID in xrange(len(groupmap)):
            for fileID in xrange(len(groupmap[groupID].file)):
                map_out.data.append(DataProduct(inmap[groupID].host, inmap[groupID].file, (inmap[groupID].skip or groupmap[groupID].skip) ))

    fileid = os.path.join(mapfile_dir, filename)
    map_out.save(fileid)
    result = {'mapfile': fileid}

    return result
开发者ID:varenius,项目名称:lofar-lb,代码行数:59,代码来源:PipelineStep_mapfileSingleToGroupX.py

示例7: plugin_main

def plugin_main(args, **kwargs):
    """
    Makes a mapfile for selfcal images (assuming standard naming conventions)

    Parameters
    ----------
    selfcal_dir : str
        Full path of selfcal directory
    hosts : list or str
        List of hosts/nodes. May be given as a list or as a string (e.g.,
        '[host1, host2]'
    mapfile_dir : str
        Directory for output mapfile
    filename: str
        Name of output mapfile

    Returns
    -------
    result : dict
        Output datamap filename

    """
    selfcal_dir = kwargs['selfcal_dir']
    if type(kwargs['hosts']) is str:
        hosts = kwargs['hosts'].strip('[]').split(',')
        hosts = [h.strip() for h in hosts]
    mapfile_dir = kwargs['mapfile_dir']
    filename = kwargs['filename']

    if os.path.exists(selfcal_dir):
        selfcal_images = glob.glob(os.path.join(selfcal_dir, '*.wsclean_image[01]2-MFS-image.fits'))
        tec_iter_images = glob.glob(os.path.join(selfcal_dir, '*.wsclean_image22_iter*-MFS-image.fits'))
        if len(tec_iter_images) == 0:
            tec_iter_images = glob.glob(os.path.join(selfcal_dir, '*.wsclean_image22-MFS-image.fits'))
        selfcal_images += tec_iter_images
        selfcal_images += glob.glob(os.path.join(selfcal_dir, '*.wsclean_image[3]2-MFS-image.fits'))
        selfcal_images += glob.glob(os.path.join(selfcal_dir, '*.wsclean_image42_iter*-MFS-image.fits'))
        if len(selfcal_images) == 0:
            selfcal_images = glob.glob(os.path.join(selfcal_dir, '*.wsclean_image[01]2-image.fits'))
            tec_iter_images = glob.glob(os.path.join(selfcal_dir, '*.wsclean_image22_iter*-image.fits'))
            if len(tec_iter_images) == 0:
                tec_iter_images = glob.glob(os.path.join(selfcal_dir, '*.wsclean_image22-image.fits'))
            selfcal_images += tec_iter_images
            selfcal_images += glob.glob(os.path.join(selfcal_dir, '*.wsclean_image[3]2-image.fits'))
            selfcal_images += glob.glob(os.path.join(selfcal_dir, '*.wsclean_image42_iter*-image.fits'))
        selfcal_images.sort()
    else:
        selfcal_images = []

    # Save image list as a string to the output mapfile
    image_list = '[{0}]'.format(','.join(selfcal_images))
    map_out = DataMap([])
    map_out.data.append(DataProduct(hosts[0], image_list, False))

    fileid = os.path.join(mapfile_dir, filename)
    map_out.save(fileid)
    result = {'mapfile': fileid}

    return result
开发者ID:lofar-astron,项目名称:factor,代码行数:59,代码来源:PipelineStep_addSelfcalImagesMapfile.py

示例8: test_skip_iterator

 def test_skip_iterator(self):
     data_map = DataMap(self.new_style_map)
     data_map.iterator = DataMap.SkipIterator
     unskipped = [item for item in data_map]
     self.assertEqual(len(unskipped), 2)
     self.assertTrue(all(isinstance(item, DataProduct) for item in unskipped))
     self.assertEqual(unskipped[0].host, 'locus002')
     self.assertEqual(unskipped[0].file, 'L12345_SB102.MS')
开发者ID:jjdmol,项目名称:LOFAR,代码行数:8,代码来源:data_map_test.py

示例9: test_tuple_iterator

 def test_tuple_iterator(self):
     data_map = DataMap(self.new_style_map)
     data_map.iterator = DataMap.TupleIterator
     tuples = [item for item in data_map]
     self.assertEqual(len(tuples), 4)
     self.assertTrue(all(isinstance(item, tuple) for item in tuples))
     self.assertTrue(all(len(item) == 2 for item in tuples))
     self.assertEqual(tuples[0], ('locus001', 'L12345_SB101.MS'))
开发者ID:jjdmol,项目名称:LOFAR,代码行数:8,代码来源:data_map_test.py

示例10: test_append_item_non_skip

    def test_append_item_non_skip(self):
        data_map = DataMap(self.new_style_map)
        data_map.append(("host","file", False))

        data_map.iterator = DataMap.TupleIterator
        tuples = [item for item in data_map]
        self.assertEqual(len(tuples), 5)
        self.assertTrue(all(isinstance(item, tuple) for item in tuples))
        self.assertTrue(all(len(item) == 2 for item in tuples))
        self.assertEqual(tuples[-1], ('host', 'file'))
开发者ID:jjdmol,项目名称:LOFAR,代码行数:10,代码来源:data_map_test.py

示例11: __init__

 def __init__(self):
     """
     Initialize our data members.
     """
     super(bbs_reducer, self).__init__()
     self.bbs_map = list()
     self.jobs = list()
     self.data_map = DataMap()
     self.inst_map = DataMap()
     self.sky_map = DataMap()
开发者ID:saiyanprince,项目名称:pyimager,代码行数:10,代码来源:bbs_reducer.py

示例12: plugin_main

def plugin_main(args, **kwargs):
    """
    Appends a string to filenames in a mapfile

    Parameters
    ----------
    mapfile_in : str
        Filename of datamap to append to
    append : str
        String to append
    append_index : bool
        If True, append a unique index to each file
    mapfile_dir : str
        Directory for output mapfile
    filename: str
        Name of output mapfile

    Returns
    -------
    result : dict
        New datamap filename

    """
    mapfile_in = kwargs['mapfile_in']

    if 'append_index' in kwargs:
        append_index = kwargs['append_index']
        if type(append_index) is str:
            if append_index.lower() == 'true':
                append_index = True
            else:
                append_index = False
    else:
        append_index = False

    append_str = kwargs['append']
    if append_str == 'None':
        append_str = ''
    mapfile_dir = kwargs['mapfile_dir']
    filename = kwargs['filename']

    map_out = DataMap([])
    map_in = DataMap.load(mapfile_in)

    for i, item in enumerate(map_in):
        if append_index:
            map_out.data.append(DataProduct(item.host, item.file+append_str+'_{}'.format(i), item.skip))
        else:
            map_out.data.append(DataProduct(item.host, item.file+append_str, item.skip))

    fileid = os.path.join(mapfile_dir, filename)
    map_out.save(fileid)
    result = {'mapfile': fileid}

    return result
开发者ID:lofar-astron,项目名称:factor,代码行数:55,代码来源:PipelineStep_appendMapfile.py

示例13: plugin_main

def plugin_main(args, **kwargs):
    """
    Makes a mapfile for list of files

    Parameters
    ----------
    files : list or str
        List of files or mapfile with such a list as the only entry. May be
        given as a list of strings or as a string (e.g.,
        '[s1.skymodel, s2.skymodel]'
    hosts : list or str
        List of hosts/nodes. May be given as a list or as a string (e.g.,
        '[host1, host2]'
    mapfile_dir : str
        Directory for output mapfile
    filename: str
        Name of output mapfile

    Returns
    -------
    result : dict
        Output datamap filename

    """
    if type(kwargs['files']) is str:
        try:
            # Check if input is mapfile containing list as a string
            map_in = DataMap.load(kwargs['files'])
            in_files = [item.file for item in map_in]
            files = []
            for f in in_files:
                files += f.strip('[]').split(',')
        except:
            files = kwargs['files']
            files = files.strip('[]').split(',')
        files = [f.strip() for f in files]
    if type(kwargs['hosts']) is str:
        hosts = kwargs['hosts'].strip('[]').split(',')
        hosts = [h.strip() for h in hosts]
    mapfile_dir = kwargs['mapfile_dir']
    filename = kwargs['filename']

    for i in range(len(files)-len(hosts)):
        hosts.append(hosts[i])

    map_out = DataMap([])
    for h, f in zip(hosts, files):
        map_out.data.append(DataProduct(h, f, False))

    fileid = os.path.join(mapfile_dir, filename)
    map_out.save(fileid)
    result = {'mapfile': fileid}

    return result
开发者ID:AHorneffer,项目名称:prefactor,代码行数:54,代码来源:PipelineStep_addListMapfile.py

示例14: plugin_main

def plugin_main(args, **kwargs):
    """
    Makes a mapfile by compressing input mapfile items into one item

    Parameters
    ----------
    mapfile_in : str
        Filename of datamap containing MS files
    mapfile_dir : str
        Directory for output mapfile
    filename: str
        Name of output mapfile
    list_format : bool, optional
        If True, the compreseed item will use a Python list format (e.g.,
        '[file1, file2, ...]'. If False, it will be a space-separated list (e.g.,
        'file1 file2 ...'

    Returns
    -------
    result : dict
        New parmdb datamap filename

    """
    mapfile_in = kwargs['mapfile_in']
    mapfile_dir = kwargs['mapfile_dir']
    filename = kwargs['filename']
    if 'list_format' in kwargs:
        list_format = kwargs['list_format']
    else:
        list_format = True
    if type(list_format) is str:
        if list_format.lower() == 'true':
            list_format = True
        else:
            list_format = False

    map_in = DataMap.load(mapfile_in)
    map_out = DataMap([])
    map_in.iterator = DataMap.SkipIterator
    file_list = [item.file for item in map_in]
    if list_format:
        newlist = '[{0}]'.format(','.join(file_list))
    else:
        newlist = '{0}'.format(' '.join(file_list))

    # Just assign host of first file to compressed file
    hosts = [item.host for item in map_in]
    map_out.data.append(DataProduct(hosts[0], newlist, False))

    fileid = os.path.join(mapfile_dir, filename)
    map_out.save(fileid)
    result = {'mapfile': fileid}

    return result
开发者ID:astrofle,项目名称:factor,代码行数:54,代码来源:PipelineStep_compressMapfile.py

示例15: __init__

 def __init__(self):
     """
     Initialize member variables and call superclass init function
     """
     control.__init__(self)
     self.input_data = DataMap()
     self.target_data = DataMap()
     self.output_data = DataMap()
     self.scratch_directory = None
     self.parset_dir = None
     self.mapfile_dir = None
开发者ID:jjdmol,项目名称:LOFAR,代码行数:11,代码来源:msss_imager_pipeline.py


注:本文中的lofarpipe.support.data_map.DataMap类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。