当前位置: 首页>>代码示例>>Python>>正文


Python h5py.Dataset方法代码示例

本文整理汇总了Python中h5py.Dataset方法的典型用法代码示例。如果您正苦于以下问题:Python h5py.Dataset方法的具体用法?Python h5py.Dataset怎么用?Python h5py.Dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在h5py的用法示例。


在下文中一共展示了h5py.Dataset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: h5py_dataset_iterator

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def h5py_dataset_iterator(self, g, prefix=''):
    for key in g.keys():
      item = g[key]
      path = '{}/{}'.format(prefix, key)
      keys = [i for i in item.keys()]
      if isinstance(item[keys[0]], h5py.Dataset):  # test for dataset
        data = {'path': path}
        for k in keys:
          if not isinstance(item[k], h5py.Group):
            dataset = np.array(item[k].value)

            if type(dataset) is np.ndarray:
              if dataset.size != 0:
                if type(dataset[0]) is np.bytes_:
                  dataset = [a.decode('ascii') for a in dataset]

            data.update({k: dataset})

        yield data
      else:  # test for group (go down)
        for s in self.h5py_dataset_iterator(item, path):
          yield s 
开发者ID:deepchem,项目名称:deepchem,代码行数:24,代码来源:pyanitools.py

示例2: tohdf5

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def tohdf5(hdf5_file, x, group=None, dataset=None, **kwds):
    import h5py

    x = astensor(x)
    if isinstance(hdf5_file, h5py.Dataset):
        filename = hdf5_file.file.filename
        group = hdf5_file.parent.name
        dataset = hdf5_file.name.rsplit('/', 1)[1]
    elif isinstance(hdf5_file, h5py.File):
        filename = hdf5_file.filename
        if dataset is None:
            raise ValueError('`dataset` should be provided')
    elif isinstance(hdf5_file, str):
        filename = hdf5_file
        if dataset is None:
            raise ValueError('`dataset` should be provided')
    else:
        raise TypeError('`hdf5_file` passed has wrong type, '
                        'expect str, h5py.File or h5py.Dataset, '
                        'got {}'.format(type(hdf5_file)))

    op = TensorHDF5DataStore(filename=filename, group=group, dataset=dataset,
                             dataset_kwds=kwds)
    return op(x) 
开发者ID:mars-project,项目名称:mars,代码行数:26,代码来源:to_hdf5.py

示例3: _parse_units

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def _parse_units(file, _units):
    import h5py

    t_units = {}
    if isinstance(_units, h5py.Group):
        for name in _units.keys():
            value = _units[name]
            dict_val = []
            for val in value:
                if isinstance(file[val[0]], h5py.Dataset):
                    dict_val.append(file[val[0]][()])
                    t_units[name] = dict_val
                else:
                    break
        out = [dict(zip(t_units, col)) for col in zip(*t_units.values())]
    else:
        out = []
        for unit in _units:
            group = file[unit[()][0]]
            unit_dict = {}
            for k in group.keys():
                unit_dict[k] = group[k][()]
            out.append(unit_dict)

    return out 
开发者ID:SpikeInterface,项目名称:spikeextractors,代码行数:27,代码来源:hdsortsortingextractor.py

示例4: read_h5

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def read_h5(fn):
    """Read h5 file into dict.

    Dict keys are the group + dataset names, e.g. '/a/b/c/dset'. All keys start
    with a leading slash even if written without (see :func:`write_h5`).

    Parameters
    ----------
    fn : str
        filename

    Examples
    --------
    >>> read_h5('foo.h5').keys()
    ['/a/b/d1', '/a/b/d2', '/a/c/d3', '/x/y/z']
    """
    fh = h5py.File(fn, mode='r')
    dct = {}
    def get(name, obj, dct=dct):
        if isinstance(obj, h5py.Dataset):
            _name = name if name.startswith('/') else '/'+name
            dct[_name] = obj[()]
    fh.visititems(get)
    fh.close()
    return dct 
开发者ID:elcorto,项目名称:pwtools,代码行数:27,代码来源:io.py

示例5: save_parameters_as_hdf5

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def save_parameters_as_hdf5(model, filename='model.h5'):
    # Save the model parameters into a HDF5 archive
    chainer.serializers.save_hdf5(filename, model)
    print('model.h5 saved!\n')

    # Load the saved HDF5 using h5py
    print('--- The list of saved params in model.h5 ---')
    f = h5py.File('model.h5', 'r')
    for param_key, param in f.items():
        msg = '{}:'.format(param_key)
        if isinstance(param, h5py.Dataset):
            msg += ' {}'.format(param.shape)
        print(msg)
        if isinstance(param, h5py.Group):
            for child_key, child in param.items():
                print('  {}:{}'.format(child_key, child.shape))
    print('---------------------------------------------\n') 
开发者ID:chainer,项目名称:chainer,代码行数:19,代码来源:save.py

示例6: __init__

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def __init__(self, pop_grp, pop_name):
        self._data_grp = pop_grp['data']
        self._mapping = pop_grp['mapping']
        self._population = pop_name

        self._gid2data_table = {}
        if self._mapping is None:
            raise Exception('could not find /mapping group')

        gids_ds = self._mapping[self.node_ids_ds]  # ['node_ids']
        index_pointer_ds = self._mapping['index_pointer']
        for indx, gid in enumerate(gids_ds):
            self._gid2data_table[gid] = slice(index_pointer_ds[indx], index_pointer_ds[indx+1])

        time_ds = self._mapping['time']
        self._t_start = np.float(time_ds[0])
        self._t_stop = np.float(time_ds[1])
        self._dt = np.float(time_ds[2])
        self._n_steps = int((self._t_stop - self._t_start) / self._dt)

        self._custom_cols = {col: grp for col, grp in self._mapping.items() if
                             col not in self.sonata_columns and isinstance(grp, h5py.Dataset)} 
开发者ID:AllenInstitute,项目名称:sonata,代码行数:24,代码来源:compartment_reader.py

示例7: test_float

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def test_float(self):
        """ Scaleoffset filter works for floating point data """

        scalefac = 4
        shape = (100, 300)
        range = 20*10**scalefac
        testdata = (np.random.rand(*shape)-0.5)*range

        dset = self.f.create_dataset('foo', shape, dtype=float, scaleoffset=scalefac)

        # Dataset reports that scaleoffset is in use
        assert dset.scaleoffset is not None

        # Dataset round-trips
        dset[...] = testdata
        filename = self.f.filename
        self.f.close()
        self.f = h5py.File(filename, 'r')
        readdata = self.f['foo'][...]

        # Test that data round-trips to requested precision
        self.assertArrayEqual(readdata, testdata, precision=10**(-scalefac))

        # Test that the filter is actually active (i.e. compression is lossy)
        assert not (readdata == testdata).all() 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:27,代码来源:test_dataset.py

示例8: test_int

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def test_int(self):
        """ Scaleoffset filter works for integer data with default precision """

        nbits = 12
        shape = (100, 300)
        testdata = np.random.randint(0, 2**nbits-1, size=shape)

        # Create dataset; note omission of nbits (for library-determined precision)
        dset = self.f.create_dataset('foo', shape, dtype=int, scaleoffset=True)

        # Dataset reports scaleoffset enabled
        assert dset.scaleoffset is not None

        # Data round-trips correctly and identically
        dset[...] = testdata
        filename = self.f.filename
        self.f.close()
        self.f = h5py.File(filename, 'r')
        readdata = self.f['foo'][...]
        self.assertArrayEqual(readdata, testdata) 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:22,代码来源:test_dataset.py

示例9: test_int_with_minbits_lossy

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def test_int_with_minbits_lossy(self):
        """ Scaleoffset filter works for integer data with specified precision """

        nbits = 12
        shape = (100, 300)
        testdata = np.random.randint(0, 2**(nbits+1)-1, size=shape)

        dset = self.f.create_dataset('foo', shape, dtype=int, scaleoffset=nbits)

        # Dataset reports scaleoffset enabled with correct precision
        self.assertTrue(dset.scaleoffset == 12)

        # Data can be written and read
        dset[...] = testdata
        filename = self.f.filename
        self.f.close()
        self.f = h5py.File(filename, 'r')
        readdata = self.f['foo'][...]

        # Compression is lossy
        assert not (readdata == testdata).all() 
开发者ID:Relph1119,项目名称:GraphicDesignPatternByPython,代码行数:23,代码来源:test_dataset.py

示例10: __init__

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def __init__(self, repo_path: Path, schema_shape: tuple, schema_dtype: np.dtype):
        self.path: Path = repo_path
        self.schema_shape: tuple = schema_shape
        self.schema_dtype: np.dtype = schema_dtype
        self._dflt_backend_opts: Optional[dict] = None

        self.rFp: HDF5_01_MapTypes = {}
        self.wFp: HDF5_01_MapTypes = {}
        self.Fp: HDF5_01_MapTypes = ChainMap(self.rFp, self.wFp)
        self.rDatasets = SizedDict(maxsize=100)
        self.wdset: h5py.Dataset = None

        self.mode: Optional[str] = None
        self.hIdx: Optional[int] = None
        self.w_uid: Optional[str] = None
        self.hMaxSize: Optional[int] = None
        self.hNextPath: Optional[int] = None
        self.hColsRemain: Optional[int] = None

        self.STAGEDIR: Path = Path(self.path, DIR_DATA_STAGE, _FmtCode)
        self.REMOTEDIR: Path = Path(self.path, DIR_DATA_REMOTE, _FmtCode)
        self.DATADIR: Path = Path(self.path, DIR_DATA, _FmtCode)
        self.STOREDIR: Path = Path(self.path, DIR_DATA_STORE, _FmtCode)
        self.DATADIR.mkdir(exist_ok=True) 
开发者ID:tensorwerk,项目名称:hangar-py,代码行数:26,代码来源:hdf5_01.py

示例11: __init__

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def __init__(self, repo_path: Path, schema_shape: tuple, schema_dtype: np.dtype):
        self.path: Path = repo_path
        self.schema_shape: tuple = schema_shape
        self.schema_dtype: np.dtype = schema_dtype
        self._dflt_backend_opts: Optional[dict] = None

        self.rFp: HDF5_00_MapTypes = {}
        self.wFp: HDF5_00_MapTypes = {}
        self.Fp: HDF5_00_MapTypes = ChainMap(self.rFp, self.wFp)
        self.rDatasets = SizedDict(maxsize=100)
        self.wdset: Optional[h5py.Dataset] = None

        self.mode: Optional[str] = None
        self.hIdx: Optional[int] = None
        self.w_uid: Optional[str] = None
        self.hMaxSize: Optional[int] = None
        self.hNextPath: Optional[int] = None
        self.hColsRemain: Optional[int] = None

        self.STAGEDIR: Path = Path(self.path, DIR_DATA_STAGE, _FmtCode)
        self.REMOTEDIR: Path = Path(self.path, DIR_DATA_REMOTE, _FmtCode)
        self.STOREDIR: Path = Path(self.path, DIR_DATA_STORE, _FmtCode)
        self.DATADIR: Path = Path(self.path, DIR_DATA, _FmtCode)
        self.DATADIR.mkdir(exist_ok=True) 
开发者ID:tensorwerk,项目名称:hangar-py,代码行数:26,代码来源:hdf5_00.py

示例12: h5py_dataset_iterator

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def h5py_dataset_iterator(self,g, prefix=''):
        for key in g.keys():
            item = g[key]
            path = '{}/{}'.format(prefix, key)
            keys = [i for i in item.keys()]
            if isinstance(item[keys[0]], h5py.Dataset): # test for dataset
                data = {'path':path}
                for k in keys:
                    if not isinstance(item[k], h5py.Group):
                        dataset = np.array(item[k].value)

                        if type(dataset) is np.ndarray:
                            if dataset.size != 0:
                                if type(dataset[0]) is np.bytes_:
                                    dataset = [a.decode('ascii') for a in dataset]

                        data.update({k:dataset})

                yield data
            else: # test for group (go down)
                yield from self.h5py_dataset_iterator(item, path) 
开发者ID:isayev,项目名称:ANI1_dataset,代码行数:23,代码来源:pyanitools.py

示例13: test_force_dense_deprecated

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def test_force_dense_deprecated(tmp_path):
    dense_pth = tmp_path / "dense.h5ad"
    adata = AnnData(X=sparse.random(10, 10, format="csr"))
    adata.raw = adata

    with pytest.warns(FutureWarning):
        adata.write_h5ad(dense_pth, force_dense=True)
    with h5py.File(dense_pth, "r") as f:
        assert isinstance(f["X"], h5py.Dataset)
        assert isinstance(f["raw/X"], h5py.Dataset)

    dense = ad.read_h5ad(dense_pth)

    assert isinstance(dense.X, np.ndarray)
    assert isinstance(dense.raw.X, np.ndarray)
    assert_equal(adata, dense)


#######################################
# Dealing with uns adj matrices
####################################### 
开发者ID:theislab,项目名称:anndata,代码行数:23,代码来源:test_deprecations.py

示例14: test_sparse_to_dense_inplace

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def test_sparse_to_dense_inplace(tmp_path, spmtx_format):
    pth = tmp_path / "adata.h5ad"
    orig = gen_adata((50, 50), spmtx_format)
    orig.raw = orig
    orig.write(pth)
    backed = ad.read_h5ad(pth, backed="r+")
    backed.write(as_dense=("X", "raw/X"))
    new = ad.read_h5ad(pth)

    assert_equal(orig, new)
    assert_equal(backed, new)

    assert isinstance(new.X, np.ndarray)
    assert isinstance(new.raw.X, np.ndarray)
    assert isinstance(orig.X, spmtx_format)
    assert isinstance(orig.raw.X, spmtx_format)
    assert isinstance(backed.X, h5py.Dataset)
    assert isinstance(backed.raw.X, h5py.Dataset) 
开发者ID:theislab,项目名称:anndata,代码行数:20,代码来源:test_io_conversion.py

示例15: write_sparse_as_dense

# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import Dataset [as 别名]
def write_sparse_as_dense(f, key, value, dataset_kwargs=MappingProxyType({})):
    real_key = None  # Flag for if temporary key was used
    if key in f:
        if (
            isinstance(value, (h5py.Group, h5py.Dataset, SparseDataset))
            and value.file.filename == f.filename
        ):  # Write to temporary key before overwriting
            real_key = key
            # Transform key to temporary, e.g. raw/X -> raw/_X, or X -> _X
            key = re.sub(r"(.*)(\w(?!.*/))", r"\1_\2", key.rstrip("/"))
        else:
            del f[key]  # Wipe before write
    dset = f.create_dataset(key, shape=value.shape, dtype=value.dtype, **dataset_kwargs)
    compressed_axis = int(isinstance(value, sparse.csc_matrix))
    for idx in idx_chunks_along_axis(value.shape, compressed_axis, 1000):
        dset[idx] = value[idx].toarray()
    if real_key is not None:
        del f[real_key]
        f[real_key] = f[key]
        del f[key] 
开发者ID:theislab,项目名称:anndata,代码行数:22,代码来源:h5ad.py


注:本文中的h5py.Dataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。