當前位置: 首頁>>代碼示例>>Python>>正文


Python h5py.Dataset方法代碼示例

本文整理匯總了Python中h5py.Dataset方法的典型用法代碼示例。如果您正苦於以下問題:Python h5py.Dataset方法的具體用法?Python h5py.Dataset怎麽用?Python h5py.Dataset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在h5py的用法示例。


在下文中一共展示了h5py.Dataset方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: h5py_dataset_iterator

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def h5py_dataset_iterator(self, g, prefix=''):
    for key in g.keys():
      item = g[key]
      path = '{}/{}'.format(prefix, key)
      keys = [i for i in item.keys()]
      if isinstance(item[keys[0]], h5py.Dataset):  # test for dataset
        data = {'path': path}
        for k in keys:
          if not isinstance(item[k], h5py.Group):
            dataset = np.array(item[k].value)

            if type(dataset) is np.ndarray:
              if dataset.size != 0:
                if type(dataset[0]) is np.bytes_:
                  dataset = [a.decode('ascii') for a in dataset]

            data.update({k: dataset})

        yield data
      else:  # test for group (go down)
        for s in self.h5py_dataset_iterator(item, path):
          yield s 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:24,代碼來源:pyanitools.py

示例2: tohdf5

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def tohdf5(hdf5_file, x, group=None, dataset=None, **kwds):
    import h5py

    x = astensor(x)
    if isinstance(hdf5_file, h5py.Dataset):
        filename = hdf5_file.file.filename
        group = hdf5_file.parent.name
        dataset = hdf5_file.name.rsplit('/', 1)[1]
    elif isinstance(hdf5_file, h5py.File):
        filename = hdf5_file.filename
        if dataset is None:
            raise ValueError('`dataset` should be provided')
    elif isinstance(hdf5_file, str):
        filename = hdf5_file
        if dataset is None:
            raise ValueError('`dataset` should be provided')
    else:
        raise TypeError('`hdf5_file` passed has wrong type, '
                        'expect str, h5py.File or h5py.Dataset, '
                        'got {}'.format(type(hdf5_file)))

    op = TensorHDF5DataStore(filename=filename, group=group, dataset=dataset,
                             dataset_kwds=kwds)
    return op(x) 
開發者ID:mars-project,項目名稱:mars,代碼行數:26,代碼來源:to_hdf5.py

示例3: _parse_units

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def _parse_units(file, _units):
    import h5py

    t_units = {}
    if isinstance(_units, h5py.Group):
        for name in _units.keys():
            value = _units[name]
            dict_val = []
            for val in value:
                if isinstance(file[val[0]], h5py.Dataset):
                    dict_val.append(file[val[0]][()])
                    t_units[name] = dict_val
                else:
                    break
        out = [dict(zip(t_units, col)) for col in zip(*t_units.values())]
    else:
        out = []
        for unit in _units:
            group = file[unit[()][0]]
            unit_dict = {}
            for k in group.keys():
                unit_dict[k] = group[k][()]
            out.append(unit_dict)

    return out 
開發者ID:SpikeInterface,項目名稱:spikeextractors,代碼行數:27,代碼來源:hdsortsortingextractor.py

示例4: read_h5

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def read_h5(fn):
    """Read h5 file into dict.

    Dict keys are the group + dataset names, e.g. '/a/b/c/dset'. All keys start
    with a leading slash even if written without (see :func:`write_h5`).

    Parameters
    ----------
    fn : str
        filename

    Examples
    --------
    >>> read_h5('foo.h5').keys()
    ['/a/b/d1', '/a/b/d2', '/a/c/d3', '/x/y/z']
    """
    fh = h5py.File(fn, mode='r')
    dct = {}
    def get(name, obj, dct=dct):
        if isinstance(obj, h5py.Dataset):
            _name = name if name.startswith('/') else '/'+name
            dct[_name] = obj[()]
    fh.visititems(get)
    fh.close()
    return dct 
開發者ID:elcorto,項目名稱:pwtools,代碼行數:27,代碼來源:io.py

示例5: save_parameters_as_hdf5

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def save_parameters_as_hdf5(model, filename='model.h5'):
    # Save the model parameters into a HDF5 archive
    chainer.serializers.save_hdf5(filename, model)
    print('model.h5 saved!\n')

    # Load the saved HDF5 using h5py
    print('--- The list of saved params in model.h5 ---')
    f = h5py.File('model.h5', 'r')
    for param_key, param in f.items():
        msg = '{}:'.format(param_key)
        if isinstance(param, h5py.Dataset):
            msg += ' {}'.format(param.shape)
        print(msg)
        if isinstance(param, h5py.Group):
            for child_key, child in param.items():
                print('  {}:{}'.format(child_key, child.shape))
    print('---------------------------------------------\n') 
開發者ID:chainer,項目名稱:chainer,代碼行數:19,代碼來源:save.py

示例6: __init__

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def __init__(self, pop_grp, pop_name):
        self._data_grp = pop_grp['data']
        self._mapping = pop_grp['mapping']
        self._population = pop_name

        self._gid2data_table = {}
        if self._mapping is None:
            raise Exception('could not find /mapping group')

        gids_ds = self._mapping[self.node_ids_ds]  # ['node_ids']
        index_pointer_ds = self._mapping['index_pointer']
        for indx, gid in enumerate(gids_ds):
            self._gid2data_table[gid] = slice(index_pointer_ds[indx], index_pointer_ds[indx+1])

        time_ds = self._mapping['time']
        self._t_start = np.float(time_ds[0])
        self._t_stop = np.float(time_ds[1])
        self._dt = np.float(time_ds[2])
        self._n_steps = int((self._t_stop - self._t_start) / self._dt)

        self._custom_cols = {col: grp for col, grp in self._mapping.items() if
                             col not in self.sonata_columns and isinstance(grp, h5py.Dataset)} 
開發者ID:AllenInstitute,項目名稱:sonata,代碼行數:24,代碼來源:compartment_reader.py

示例7: test_float

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def test_float(self):
        """ Scaleoffset filter works for floating point data """

        scalefac = 4
        shape = (100, 300)
        range = 20*10**scalefac
        testdata = (np.random.rand(*shape)-0.5)*range

        dset = self.f.create_dataset('foo', shape, dtype=float, scaleoffset=scalefac)

        # Dataset reports that scaleoffset is in use
        assert dset.scaleoffset is not None

        # Dataset round-trips
        dset[...] = testdata
        filename = self.f.filename
        self.f.close()
        self.f = h5py.File(filename, 'r')
        readdata = self.f['foo'][...]

        # Test that data round-trips to requested precision
        self.assertArrayEqual(readdata, testdata, precision=10**(-scalefac))

        # Test that the filter is actually active (i.e. compression is lossy)
        assert not (readdata == testdata).all() 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:27,代碼來源:test_dataset.py

示例8: test_int

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def test_int(self):
        """ Scaleoffset filter works for integer data with default precision """

        nbits = 12
        shape = (100, 300)
        testdata = np.random.randint(0, 2**nbits-1, size=shape)

        # Create dataset; note omission of nbits (for library-determined precision)
        dset = self.f.create_dataset('foo', shape, dtype=int, scaleoffset=True)

        # Dataset reports scaleoffset enabled
        assert dset.scaleoffset is not None

        # Data round-trips correctly and identically
        dset[...] = testdata
        filename = self.f.filename
        self.f.close()
        self.f = h5py.File(filename, 'r')
        readdata = self.f['foo'][...]
        self.assertArrayEqual(readdata, testdata) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:22,代碼來源:test_dataset.py

示例9: test_int_with_minbits_lossy

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def test_int_with_minbits_lossy(self):
        """ Scaleoffset filter works for integer data with specified precision """

        nbits = 12
        shape = (100, 300)
        testdata = np.random.randint(0, 2**(nbits+1)-1, size=shape)

        dset = self.f.create_dataset('foo', shape, dtype=int, scaleoffset=nbits)

        # Dataset reports scaleoffset enabled with correct precision
        self.assertTrue(dset.scaleoffset == 12)

        # Data can be written and read
        dset[...] = testdata
        filename = self.f.filename
        self.f.close()
        self.f = h5py.File(filename, 'r')
        readdata = self.f['foo'][...]

        # Compression is lossy
        assert not (readdata == testdata).all() 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:23,代碼來源:test_dataset.py

示例10: __init__

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def __init__(self, repo_path: Path, schema_shape: tuple, schema_dtype: np.dtype):
        self.path: Path = repo_path
        self.schema_shape: tuple = schema_shape
        self.schema_dtype: np.dtype = schema_dtype
        self._dflt_backend_opts: Optional[dict] = None

        self.rFp: HDF5_01_MapTypes = {}
        self.wFp: HDF5_01_MapTypes = {}
        self.Fp: HDF5_01_MapTypes = ChainMap(self.rFp, self.wFp)
        self.rDatasets = SizedDict(maxsize=100)
        self.wdset: h5py.Dataset = None

        self.mode: Optional[str] = None
        self.hIdx: Optional[int] = None
        self.w_uid: Optional[str] = None
        self.hMaxSize: Optional[int] = None
        self.hNextPath: Optional[int] = None
        self.hColsRemain: Optional[int] = None

        self.STAGEDIR: Path = Path(self.path, DIR_DATA_STAGE, _FmtCode)
        self.REMOTEDIR: Path = Path(self.path, DIR_DATA_REMOTE, _FmtCode)
        self.DATADIR: Path = Path(self.path, DIR_DATA, _FmtCode)
        self.STOREDIR: Path = Path(self.path, DIR_DATA_STORE, _FmtCode)
        self.DATADIR.mkdir(exist_ok=True) 
開發者ID:tensorwerk,項目名稱:hangar-py,代碼行數:26,代碼來源:hdf5_01.py

示例11: __init__

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def __init__(self, repo_path: Path, schema_shape: tuple, schema_dtype: np.dtype):
        self.path: Path = repo_path
        self.schema_shape: tuple = schema_shape
        self.schema_dtype: np.dtype = schema_dtype
        self._dflt_backend_opts: Optional[dict] = None

        self.rFp: HDF5_00_MapTypes = {}
        self.wFp: HDF5_00_MapTypes = {}
        self.Fp: HDF5_00_MapTypes = ChainMap(self.rFp, self.wFp)
        self.rDatasets = SizedDict(maxsize=100)
        self.wdset: Optional[h5py.Dataset] = None

        self.mode: Optional[str] = None
        self.hIdx: Optional[int] = None
        self.w_uid: Optional[str] = None
        self.hMaxSize: Optional[int] = None
        self.hNextPath: Optional[int] = None
        self.hColsRemain: Optional[int] = None

        self.STAGEDIR: Path = Path(self.path, DIR_DATA_STAGE, _FmtCode)
        self.REMOTEDIR: Path = Path(self.path, DIR_DATA_REMOTE, _FmtCode)
        self.STOREDIR: Path = Path(self.path, DIR_DATA_STORE, _FmtCode)
        self.DATADIR: Path = Path(self.path, DIR_DATA, _FmtCode)
        self.DATADIR.mkdir(exist_ok=True) 
開發者ID:tensorwerk,項目名稱:hangar-py,代碼行數:26,代碼來源:hdf5_00.py

示例12: h5py_dataset_iterator

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def h5py_dataset_iterator(self,g, prefix=''):
        for key in g.keys():
            item = g[key]
            path = '{}/{}'.format(prefix, key)
            keys = [i for i in item.keys()]
            if isinstance(item[keys[0]], h5py.Dataset): # test for dataset
                data = {'path':path}
                for k in keys:
                    if not isinstance(item[k], h5py.Group):
                        dataset = np.array(item[k].value)

                        if type(dataset) is np.ndarray:
                            if dataset.size != 0:
                                if type(dataset[0]) is np.bytes_:
                                    dataset = [a.decode('ascii') for a in dataset]

                        data.update({k:dataset})

                yield data
            else: # test for group (go down)
                yield from self.h5py_dataset_iterator(item, path) 
開發者ID:isayev,項目名稱:ANI1_dataset,代碼行數:23,代碼來源:pyanitools.py

示例13: test_force_dense_deprecated

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def test_force_dense_deprecated(tmp_path):
    dense_pth = tmp_path / "dense.h5ad"
    adata = AnnData(X=sparse.random(10, 10, format="csr"))
    adata.raw = adata

    with pytest.warns(FutureWarning):
        adata.write_h5ad(dense_pth, force_dense=True)
    with h5py.File(dense_pth, "r") as f:
        assert isinstance(f["X"], h5py.Dataset)
        assert isinstance(f["raw/X"], h5py.Dataset)

    dense = ad.read_h5ad(dense_pth)

    assert isinstance(dense.X, np.ndarray)
    assert isinstance(dense.raw.X, np.ndarray)
    assert_equal(adata, dense)


#######################################
# Dealing with uns adj matrices
####################################### 
開發者ID:theislab,項目名稱:anndata,代碼行數:23,代碼來源:test_deprecations.py

示例14: test_sparse_to_dense_inplace

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def test_sparse_to_dense_inplace(tmp_path, spmtx_format):
    pth = tmp_path / "adata.h5ad"
    orig = gen_adata((50, 50), spmtx_format)
    orig.raw = orig
    orig.write(pth)
    backed = ad.read_h5ad(pth, backed="r+")
    backed.write(as_dense=("X", "raw/X"))
    new = ad.read_h5ad(pth)

    assert_equal(orig, new)
    assert_equal(backed, new)

    assert isinstance(new.X, np.ndarray)
    assert isinstance(new.raw.X, np.ndarray)
    assert isinstance(orig.X, spmtx_format)
    assert isinstance(orig.raw.X, spmtx_format)
    assert isinstance(backed.X, h5py.Dataset)
    assert isinstance(backed.raw.X, h5py.Dataset) 
開發者ID:theislab,項目名稱:anndata,代碼行數:20,代碼來源:test_io_conversion.py

示例15: write_sparse_as_dense

# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import Dataset [as 別名]
def write_sparse_as_dense(f, key, value, dataset_kwargs=MappingProxyType({})):
    real_key = None  # Flag for if temporary key was used
    if key in f:
        if (
            isinstance(value, (h5py.Group, h5py.Dataset, SparseDataset))
            and value.file.filename == f.filename
        ):  # Write to temporary key before overwriting
            real_key = key
            # Transform key to temporary, e.g. raw/X -> raw/_X, or X -> _X
            key = re.sub(r"(.*)(\w(?!.*/))", r"\1_\2", key.rstrip("/"))
        else:
            del f[key]  # Wipe before write
    dset = f.create_dataset(key, shape=value.shape, dtype=value.dtype, **dataset_kwargs)
    compressed_axis = int(isinstance(value, sparse.csc_matrix))
    for idx in idx_chunks_along_axis(value.shape, compressed_axis, 1000):
        dset[idx] = value[idx].toarray()
    if real_key is not None:
        del f[real_key]
        f[real_key] = f[key]
        del f[key] 
開發者ID:theislab,項目名稱:anndata,代碼行數:22,代碼來源:h5ad.py


注:本文中的h5py.Dataset方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。