当前位置: 首页>>代码示例>>Python>>正文


Python tables.Filters方法代码示例

本文整理汇总了Python中tables.Filters方法的典型用法代码示例。如果您正苦于以下问题:Python tables.Filters方法的具体用法?Python tables.Filters怎么用?Python tables.Filters使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tables的用法示例。


在下文中一共展示了tables.Filters方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_carray

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def create_carray(h5f, chrom, data_type):
    if data_type == "uint8":
        atom = tables.UInt8Atom(dflt=0)
    elif data_type == "uint16":
        atom = tables.UInt16Atom(dflt=0)
    else:
        raise NotImplementedError("unsupported datatype %s" % data_type)

    zlib_filter = tables.Filters(complevel=1, complib="zlib")

    # create CArray for this chromosome
    shape = [chrom.length]
    carray = h5f.create_carray(h5f.root, chrom.name,
                              atom, shape, filters=zlib_filter)

    return carray 
开发者ID:bmvdgeijn,项目名称:WASP,代码行数:18,代码来源:bam2h5.py

示例2: test_write_container

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def test_write_container(temp_h5_file):
    r0tel = R0CameraContainer()
    mc = MCEventContainer()
    mc.reset()
    r0tel.waveform = np.random.uniform(size=(50, 10))
    r0tel.meta["test_attribute"] = 3.14159
    r0tel.meta["date"] = "2020-10-10"

    with HDF5TableWriter(
        temp_h5_file, group_name="R0", filters=tables.Filters(complevel=7)
    ) as writer:
        writer.exclude("tel_002", ".*samples")  # test exclusion of columns

        for ii in range(100):
            r0tel.waveform[:] = np.random.uniform(size=(50, 10))
            mc.energy = 10 ** np.random.uniform(1, 2) * u.TeV
            mc.core_x = np.random.uniform(-1, 1) * u.m
            mc.core_y = np.random.uniform(-1, 1) * u.m

            writer.write("tel_001", r0tel)
            writer.write("tel_002", r0tel)  # write a second table too
            writer.write("MC", mc) 
开发者ID:cta-observatory,项目名称:ctapipe,代码行数:24,代码来源:test_hdf5.py

示例3: __init__

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def __init__(self,
                 X=None,
                 topo_view=None,
                 y=None,
                 view_converter=None,
                 axes=('b', 0, 1, 'c'),
                 rng=_default_seed,
                 X_labels=None,
                 y_labels=None):
        super_self = super(DenseDesignMatrixPyTables, self)
        super_self.__init__(X=X,
                            topo_view=topo_view,
                            y=y,
                            view_converter=view_converter,
                            axes=axes,
                            rng=rng,
                            X_labels=X_labels,
                            y_labels=y_labels)
        self._check_labels()
        ensure_tables()
        if not hasattr(self, 'filters'):
            self.filters = tables.Filters(complib='blosc', complevel=5) 
开发者ID:zchengquan,项目名称:TextDetector,代码行数:24,代码来源:dense_design_matrix.py

示例4: fetch_svhn_extra

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def fetch_svhn_extra(source_paths, target_path):
    extra_path = source_paths[0]

    print('Converting {} to HDF5 (compressed)...'.format(extra_path))
    f_out = tables.open_file(target_path, mode='w')
    g_out = f_out.create_group(f_out.root, 'svhn', 'SVHN data')
    filters = tables.Filters(complevel=9, complib='blosc')
    X_u8_arr = f_out.create_earray(
        g_out, 'extra_X_u8', tables.UInt8Atom(), (0, 3, 32, 32),
        filters=filters)
    y_arr = f_out.create_earray(
        g_out, 'extra_y', tables.Int32Atom(), (0,), filters=filters)

    # Load in the extra data Matlab file
    _insert_svhn_matlab_to_h5(X_u8_arr, y_arr, extra_path)

    f_out.close()

    return target_path 
开发者ID:Britefury,项目名称:batchup,代码行数:21,代码来源:svhn.py

示例5: create_hdf5_arr_table

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def create_hdf5_arr_table(hdf_file, group, array_name,
        dtype=np.dtype('float64'), shape=(), arr=None,
        complib='blosc', complevel=5):
    atom = tables.Atom.from_dtype(dtype)
    if arr is not None:
        shape = arr.shape
#     filters = tables.Filters(complib=complib, complevel=complevel)
    if not is_table_in_group(group, array_name):
        try:
            ds = hdf_file.create_carray(group, array_name, atom, shape)
        except:
            ds = hdf_file.createCArray(group, array_name, atom, shape)
    else:
        ds = group._v_children[array_name]

    if arr is not None:
        ds[:] = arr
    return ds 
开发者ID:pelednoam,项目名称:mmvt,代码行数:20,代码来源:tables_utils.py

示例6: save

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def save(self, h5file, title=None, index_dtype='uint32', data_dtype=float, incidence_only=True, complib='zlib'):
        if self.finalized:
            h5fh = tables.open_file(h5file, 'w', title=title)
            fil  = tables.Filters(complevel=1, complib=complib)
            h5fh.set_node_attr(h5fh.root, 'incidence_only', incidence_only)
            h5fh.set_node_attr(h5fh.root, 'mtype', 'csc_matrix')
            h5fh.set_node_attr(h5fh.root, 'shape', self.shape)
            for hid in xrange(self.shape[1]):
                hgroup = h5fh.create_group(h5fh.root, 'h%d' % hid, 'Sparse matrix components for Haplotype %d' % hid)
                spmat = self.data[hid]
                i1 = h5fh.create_carray(hgroup, 'indptr', obj=spmat.indptr.astype(index_dtype), filters=fil)
                i2 = h5fh.create_carray(hgroup, 'indices', obj=spmat.indices.astype(index_dtype), filters=fil)
                if not incidence_only:
                    d = h5fh.create_carray(hgroup, 'data', obj=spmat.data.astype(data_dtype), filters=fil)
            h5fh.flush()
            h5fh.close()
        else:
            raise RuntimeError('The matrix is not finalized.') 
开发者ID:churchill-lab,项目名称:emase,代码行数:20,代码来源:Sparse3DMatrix.py

示例7: _get_compression_filters

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def _get_compression_filters(compression='default'):
    if compression == 'default':
        config = conf.config()
        compression = config.get('io', 'compression')
    elif compression is True:
        compression = 'zlib'

    if (compression is False or compression is None or
            compression == 'none' or compression == 'None'):
        ff = None
    else:
        if isinstance(compression, (tuple, list)):
            compression, level = compression
        else:
            level = 9

        try:
            ff = tables.Filters(complevel=level, complib=compression,
                                shuffle=True)
        except Exception:
            warnings.warn(("(deepdish.io.save) Missing compression method {}: "
                           "no compression will be used.").format(compression))
            ff = None
    return ff 
开发者ID:uchicago-cs,项目名称:deepdish,代码行数:26,代码来源:hdf5io.py

示例8: __init__

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def __init__(self, filename, filter_kwds=None, mode="r", title='', metadata=None, create_directories=False):
        self._opened = False
        if isinstance(filename, (str, os.PathLike)):
            # filename is a path to open
            self.filename = filename
            # Note sure how else to deal with str / unicode requirements in pytables
            # See this issue: https://github.com/PyTables/PyTables/issues/522
            import sys
            if filter_kwds:
                if sys.version_info[0] == 2 and 'complib' in filter_kwds:
                    filter_kwds['complib'] = filter_kwds['complib'].encode()
                filters = tables.Filters(**filter_kwds)
            else:
                filters = None

            # Create directories for the filename if required
            if create_directories:
                try:
                    os.makedirs(os.path.dirname(filename))
                except OSError as exception:
                    import errno
                    if exception.errno != errno.EEXIST:
                        raise

            self.file = tables.open_file(filename, mode=mode, filters=filters, title=title)
            self._opened = True
        elif isinstance(filename, tables.File):
            # filename is a pytables file
            self.file = filename
            assert(self.file.isopen)
            self.filename = self.file.filename
            self._opened = False
        else:
            raise TypeError("{} must be initalised with a filename to open or an open tables.File".format(self.__class__.__name__))

        # now update metadata if given
        if metadata is not None and self.file.mode != 'r':
            for k, v in metadata.items():
                setattr(self.file.root._v_attrs, k, v) 
开发者ID:pywr,项目名称:pywr,代码行数:41,代码来源:h5tools.py

示例9: append

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def append(self, ndarray):
        name = self.get_name()
        comp_filter = tables.Filters(**self.compression)
        tarray = self.data_file.create_carray(self.group, name, obj=ndarray,
                                             filters=comp_filter)
        self.data_file.flush()
        super(PyTablesList, self).append(tarray)
        #print(self.prefix+str(self.size), ndarray)
        self.size += 1
        self.group._v_attrs.size = self.size 
开发者ID:tritemio,项目名称:FRETBursts,代码行数:12,代码来源:pytables_array_list.py

示例10: safe_hdf

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def safe_hdf(array, name):
    if os.path.isfile(name + '.hdf') and not args.overwrite:
        logger.warning("Not saving %s, already exists." % (name + '.hdf'))
    else:
        if os.path.isfile(name + '.hdf'):
            logger.info("Overwriting %s." % (name + '.hdf'))
        else:
            logger.info("Saving to %s." % (name + '.hdf'))
        with tables.openFile(name + '.hdf', 'w') as f:
            atom = tables.Atom.from_dtype(array.dtype)
            filters = tables.Filters(complib='blosc', complevel=5)
            ds = f.createCArray(f.root, name.replace('.', ''), atom,
                                array.shape, filters=filters)
            ds[:] = array 
开发者ID:sebastien-j,项目名称:LV_groundhog,代码行数:16,代码来源:preprocess.py

示例11: setUp

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def setUp(self):
        num_rows = 500
        filters = tables.Filters(complib='blosc', complevel=5)
        h5file = tables.open_file(
            'tmp.h5', mode='w', title='Test', filters=filters)
        group = h5file.create_group("/", 'Data')
        atom = tables.UInt8Atom()
        y = h5file.create_carray(group, 'y', atom=atom, title='Data targets',
                                 shape=(num_rows, 1), filters=filters)
        for i in range(num_rows):
            y[i] = i
        h5file.flush()
        h5file.close()
        self.dataset = PytablesDataset('tmp.h5', ('y',), 20, 500)
        self.dataset_default = PytablesDataset('tmp.h5', ('y',)) 
开发者ID:rizar,项目名称:attention-lvcsr,代码行数:17,代码来源:test_hdf5.py

示例12: write_snp_index_h5

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def write_snp_index_h5(self):
        atom = tables.Int16Atom(dflt=0)
    
        zlib_filter = tables.Filters(complevel=1, complib="zlib")
        
        snp_index_h5 = tables.open_file(self.snp_index_filename, "w")    

        snp_index = 0

        chrom_arrays = {}
        chrom_lengths = self.get_chrom_lengths()
        
        for snp in self.snp_list:
            if snp[0] in chrom_arrays:
                carray = chrom_arrays[snp[0]]
            else:
                # create CArray for this chromosome
                shape = [chrom_lengths[snp[0]]]
                carray = snp_index_h5.create_carray(snp_index_h5.root,
                                                   snp[0], atom, shape,
                                                   filters=zlib_filter)
                carray[:] = -1
                chrom_arrays[snp[0]] = carray

            pos = snp[1]
            carray[pos-1] = snp_index
            snp_index += 1
            
        self.write_hap_samples(snp_index_h5)

        snp_index_h5.close() 
开发者ID:bmvdgeijn,项目名称:WASP,代码行数:33,代码来源:test_find_intersecting_snps.py

示例13: create_carray

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def create_carray(self, h5f, chrom, atom):
        zlib_filter = tables.Filters(complevel=1, complib="zlib")

        # create CArray for this chromosome
        shape = [chrom.length]
        carray = h5f.create_carray(h5f.root, chrom.name,
                                  atom, shape, filters=zlib_filter)

        return carray 
开发者ID:bmvdgeijn,项目名称:WASP,代码行数:11,代码来源:get_target_regions.py

示例14: test_filters

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def test_filters():
    from tables import Filters, open_file

    class TestContainer(Container):
        value = Field(-1, "test")

    no_comp = Filters(complevel=0)
    zstd = Filters(complevel=5, complib="blosc:zstd")

    with tempfile.NamedTemporaryFile(suffix=".hdf5") as f:
        with HDF5TableWriter(
            f.name, group_name="data", mode="w", filters=no_comp
        ) as writer:
            assert writer._h5file.filters.complevel == 0

            c = TestContainer(value=5)
            writer.write("default", c)

            writer.filters = zstd
            writer.write("zstd", c)

            writer.filters = no_comp
            writer.write("nocomp", c)

        with open_file(f.name) as h5file:
            assert h5file.root.data.default.filters.complevel == 0
            assert h5file.root.data.zstd.filters.complevel == 5
            assert h5file.root.data.zstd.filters.complib == "blosc:zstd"
            assert h5file.root.data.nocomp.filters.complevel == 0 
开发者ID:cta-observatory,项目名称:ctapipe,代码行数:31,代码来源:test_hdf5.py

示例15: merge_all_files_into_pytables

# 需要导入模块: import tables [as 别名]
# 或者: from tables import Filters [as 别名]
def merge_all_files_into_pytables(file_dir, file_out):
    """
    process each file into pytables
    """
    start = None
    start = datetime.datetime.now()
    out_h5 = tables.openFile(file_out,
                             mode="w",
                             title="bars",
                             filters=tables.Filters(complevel=9,
                                                    complib='zlib'))
    table = None
    for file_in in glob.glob(file_dir + "/*.gz"):
        gzip_file = gzip.open(file_in)
        expected_header = ["dt", "sid", "open", "high", "low", "close",
                           "volume"]
        csv_reader = csv.DictReader(gzip_file)
        header = csv_reader.fieldnames
        if header != expected_header:
            logging.warn("expected header %s\n" % (expected_header))
            logging.warn("header_found %s" % (header))
            return

        for current_date, rows in parse_csv(csv_reader):
            table = out_h5.createTable("/TD", "date_" + current_date,
                                       OHLCTableDescription,
                                       expectedrows=len(rows),
                                       createparents=True)
            table.append(rows)
            table.flush()
        if table is not None:
            table.flush()
    end = datetime.datetime.now()
    diff = (end - start).seconds
    logging.debug("finished  it took %d." % (diff)) 
开发者ID:zhanghan1990,项目名称:zipline-chinese,代码行数:37,代码来源:data_source_tables_gen.py


注:本文中的tables.Filters方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。