本文整理匯總了Python中h5py.check_dtype方法的典型用法代碼示例。如果您正苦於以下問題:Python h5py.check_dtype方法的具體用法?Python h5py.check_dtype怎麽用?Python h5py.check_dtype使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類h5py
的用法示例。
在下文中一共展示了h5py.check_dtype方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_vlen_enum
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def test_vlen_enum(self):
fname = self.mktemp()
arr1 = [[1],[1,2]]
dt1 = h5py.special_dtype(vlen=h5py.special_dtype(
enum=('i', dict(foo=1, bar=2))))
with h5py.File(fname,'w') as f:
df1 = f.create_dataset('test', (len(arr1),), dtype=dt1)
df1[:] = np.array(arr1)
with h5py.File(fname,'r') as f:
df2 = f['test']
dt2 = df2.dtype
arr2 = [e.tolist() for e in df2[:]]
self.assertEqual(arr1, arr2)
self.assertEqual(h5py.check_dtype(enum=h5py.check_dtype(vlen=dt1)),
h5py.check_dtype(enum=h5py.check_dtype(vlen=dt2)))
示例2: _check_valid_netcdf_dtype
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def _check_valid_netcdf_dtype(self, dtype, stacklevel=3):
dtype = np.dtype(dtype)
if dtype == bool:
description = 'boolean'
elif dtype == complex:
description = 'complex'
elif h5py.check_dtype(enum=dtype) is not None:
description = 'enum'
elif h5py.check_dtype(ref=dtype) is not None:
description = 'reference'
elif h5py.check_dtype(vlen=dtype) not in {None, unicode, bytes}:
description = 'non-string variable length'
else:
description = None
if description is not None:
_invalid_netcdf_feature('{} dtypes'.format(description),
allow=self.invalid_netcdf,
file=self,
stacklevel=stacklevel + 1)
示例3: _get_group_info
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def _get_group_info(path, grouppath, keys):
with h5py.File(path, "r") as f:
grp = f[grouppath]
if keys is None:
keys = list(grp.keys())
nrows = len(grp[keys[0]])
categoricals = {}
for key in keys:
dt = h5py.check_dtype(enum=grp[key].dtype)
if dt is not None:
categoricals[key] = sorted(dt, key=dt.__getitem__)
# Meta is an empty dataframe that serves as a compound "dtype"
meta = pd.DataFrame(
{key: np.array([], dtype=grp[key].dtype) for key in keys}, columns=keys
)
for key in categoricals:
meta[key] = pd.Categorical([], categories=categoricals[key], ordered=True)
return nrows, keys, meta, categoricals
示例4: test_create
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def test_create(self):
""" Enum datasets can be created and type correctly round-trips """
dt = h5py.special_dtype(enum=('i', self.EDICT))
ds = self.f.create_dataset('x', (100, 100), dtype=dt)
dt2 = ds.dtype
dict2 = h5py.check_dtype(enum=dt2)
self.assertEqual(dict2, self.EDICT)
示例5: test_compound
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def test_compound(self):
fields = []
fields.append(('field_1', h5py.special_dtype(vlen=str)))
fields.append(('field_2', np.int32))
dt = np.dtype(fields)
self.f['mytype'] = np.dtype(dt)
dt_out = self.f['mytype'].dtype.fields['field_1'][0]
self.assertEqual(h5py.check_dtype(vlen=dt_out), str)
示例6: get_annotations
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def get_annotations(path, fields, enum_field):
data_labels = {}
for field in fields:
data_labels[field] = path[field]
data_dtypes = {}
if h5py.check_dtype(enum=path.dtype[enum_field]):
dataset_dtype = h5py.check_dtype(enum=path.dtype[enum_field])
# data_dtype may lose some dataset dtypes there are duplicates of 'v'
data_dtypes = {v: k for k, v in dataset_dtype.items()}
labels_df = pd.DataFrame(data=data_labels)
return labels_df, data_dtypes
示例7: get_annotations
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def get_annotations(path, enum_field):
data_dtypes = {}
if h5py.check_dtype(enum=path.dtype[enum_field]):
data_dtypes = h5py.check_dtype(enum=path.dtype[enum_field])
return data_dtypes
示例8: dtype
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def dtype(self):
dt = self._h5ds.dtype
if h5py.check_dtype(vlen=dt) is unicode:
return str
return dt
示例9: test_put
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def test_put():
f = make_hdf5_table('a')
# append
df = pd.DataFrame({
'chrom': ['chr3', 'chr3'],
'start': [0, 20],
'end': [20, 40],
'value': [4.0, 5.0],
})
core.put(f['table'], df, lo=5)
f.flush()
out = core.get(f['table'])
assert len(out) == 7
# insert a categorical column
s = pd.Series(pd.Categorical(out['chrom'], ordered=True), index=out.index)
s.name = 'chrom_enum'
core.put(f['table'], s)
assert h5py.check_dtype(enum=f['table/chrom_enum'].dtype)
out = core.get(f['table'])
assert len(out.columns) == 5
assert pd.api.types.is_categorical_dtype(out['chrom_enum'].dtype)
out = core.get(f['table'], convert_enum=False)
assert len(out.columns) == 5
assert pd.api.types.is_integer_dtype(out['chrom_enum'].dtype)
# don't convert categorical to enum
s.name = 'chrom_string'
core.put(f['table'], s, store_categories=False)
out = core.get(f['table'])
assert len(out.columns) == 6
assert not pd.api.types.is_categorical_dtype(out['chrom_string'].dtype)
# scalar input
core.put(f['table'], {'foo': 42})
out = core.get(f['table'])
assert len(out.columns) == 7
assert (out['foo'] == 42).all()
示例10: get_values
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import check_dtype [as 別名]
def get_values(
self, queries: List[Dict[str, Union[str, bool]]], subset: Optional[List[str]] = None
) -> Tuple[pd.DataFrame, Dict[str, str]]:
"""
Parameters
----------
subset
queries: List[Dict[str, Union[str, bool]]]
List of queries. Fields actually used are native, name, driver
"""
import h5py
units = {}
entries = self.get_index(subset)
indexes = entries._h5idx
with self._read_file() as f:
ret = pd.DataFrame(index=entries["index"])
for query in queries:
dataset_name = "value/" if query["native"] else "contributed_value/"
dataset_name += self._normalize_hdf5_name(query["name"])
driver = query["driver"]
dataset = f[dataset_name]
if not h5py.check_dtype(vlen=dataset.dtype):
data = [dataset[i] for i in indexes]
else:
if driver.lower() == "gradient":
data = [np.reshape(dataset[i], (-1, 3)) for i in indexes]
elif driver.lower() == "hessian":
data = []
for i in indexes:
n2 = len(dataset[i])
n = int(round(np.sqrt(n2)))
data.append(np.reshape(dataset[i], (n, n)))
else:
warnings.warn(
f"Variable length data type not understood, returning flat array " f"(driver = {driver}).",
RuntimeWarning,
)
try:
data = [np.array(dataset[i]) for i in indexes]
except ValueError:
data = [dataset[i] for i in indexes]
column_name = query["name"]
column_units = self._deserialize_field(dataset.attrs["units"])
ret[column_name] = data
units[column_name] = column_units
return ret, units