本文整理汇总了Python中h5py.__version__方法的典型用法代码示例。如果您正苦于以下问题:Python h5py.__version__方法的具体用法?Python h5py.__version__怎么用?Python h5py.__version__使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类h5py
的用法示例。
在下文中一共展示了h5py.__version__方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_optional_netcdf4_attrs
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def test_optional_netcdf4_attrs(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, 'w') as f:
foo_data = np.arange(50).reshape(5, 10)
f.create_dataset('foo', data=foo_data)
f.create_dataset('x', data=np.arange(5))
f.create_dataset('y', data=np.arange(10))
if h5py.__version__ < LooseVersion('2.10.0'):
f['foo'].dims.create_scale(f['x'])
f['foo'].dims.create_scale(f['y'])
else:
f['x'].make_scale()
f['y'].make_scale()
f['foo'].dims[0].attach_scale(f['x'])
f['foo'].dims[1].attach_scale(f['y'])
with h5netcdf.File(tmp_local_or_remote_netcdf, 'r') as ds:
assert ds['foo'].dimensions == ('x', 'y')
assert ds.dimensions == {'x': 5, 'y': 10}
assert array_equal(ds['foo'], foo_data)
示例2: test_invalid_netcdf_malformed_dimension_scales
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def test_invalid_netcdf_malformed_dimension_scales(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, 'w') as f:
foo_data = np.arange(125).reshape(5, 5, 5)
f.create_dataset('foo1', data=foo_data)
f.create_dataset('x', data=np.arange(5))
f.create_dataset('y', data=np.arange(5))
f.create_dataset('z', data=np.arange(5))
if h5py.__version__ < LooseVersion('2.10.0'):
f['foo1'].dims.create_scale(f['x'])
f['foo1'].dims.create_scale(f['y'])
f['foo1'].dims.create_scale(f['z'])
else:
f['x'].make_scale()
f['y'].make_scale()
f['z'].make_scale()
f['foo1'].dims[0].attach_scale(f['x'])
with raises(ValueError):
with h5netcdf.File(tmp_local_or_remote_netcdf, 'r',
phony_dims='sort') as ds:
pass
示例3: ge_pkg_versions
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def ge_pkg_versions():
dep_versions = {}
dep_versions['display'] = run_command('cat /proc/driver/nvidia/version')
dep_versions['cuda'] = 'NA'
cuda_home = '/usr/local/cuda/'
if 'CUDA_HOME' in os.environ:
cuda_home = os.environ['CUDA_HOME']
cmd = cuda_home+'/version.txt'
if os.path.isfile(cmd):
dep_versions['cuda'] = run_command('cat '+cmd)
dep_versions['cudnn'] = torch.backends.cudnn.version()
dep_versions['platform'] = platform.platform()
dep_versions['python'] = sys.version_info[:3]
dep_versions['torch'] = torch.__version__
dep_versions['numpy'] = np.__version__
dep_versions['h5py'] = h5py.__version__
dep_versions['json'] = json.__version__
dep_versions['ortools'] = ortools.__version__
dep_versions['torchvision'] = pkg_resources.get_distribution("torchvision").version
# dep_versions['PIL'] = Image.VERSION
# dep_versions['OpenCV'] = 'NA'
# if 'cv2' in sys.modules:
# dep_versions['OpenCV'] = cv2.__version__
return dep_versions
示例4: check_version_compatibility
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def check_version_compatibility():
if parse_version(h5py.__version__) < parse_version("2.7") \
and parse_version(np.__version__) >= parse_version("1.13"):
raise EnvironmentError("Incompatible h5py=={} and numpy=={} versions detected. \n"
"Array reading/decoding may not proceed as expected. \n"
"Please upgrade to the latest compatible verions"
"".format(h5py.__version__, np.__version__))
示例5: _sanitize_data_for_writing
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def _sanitize_data_for_writing(data):
# To make the interface more user friendly we encode python strings as byte-strings when writing datasets
check_version_compatibility()
if isinstance(data, str):
# Plain python-strings can be encoded trivially
return data.encode()
elif isinstance(data, np.ndarray) and data.dtype.kind == np.dtype(np.unicode):
# If the array is all of one type, unicode-string, we can encode with numpy
return data.astype('S')
elif isinstance(data, np.ndarray) and len(data.dtype) > 1:
# If the array is of mixed types we have to set the encoding column by column
encoded_dtypes = []
for field_name in data.dtype.names:
field_dtype, field_byte_index = data.dtype.fields[field_name]
if field_dtype.kind == 'U':
str_len = field_dtype.itemsize // field_dtype.alignment
field_dtype = np.dtype("|S{}".format(str_len))
encoded_dtypes.append((field_name, field_dtype))
try:
return data.astype(encoded_dtypes)
except (ValueError, UnicodeEncodeError):
if parse_version(h5py.__version__) < parse_version("2.7"):
raise UnicodeError("Cannot encode array with types: {}.\n"
"There are known bugs in h5py<2.7 which yield non-deteministic results when decoding "
"arrays with empty strings and additional bugs with compatibility between "
"h5py<2.7 and numpy>=1.13 when decoding arrays with mixed/padded data types.\n"
"Please try upgrading to the latest h5py and numpy versions"
"".format(encoded_dtypes))
else:
raise
return data
示例6: _sanitize_data_for_reading
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def _sanitize_data_for_reading(data):
# To make the interface more user friendly we decode byte-strings into unicode strings when reading datasets
check_version_compatibility()
if isinstance(data, h5py.Dataset):
data = data[()]
if isinstance(data, bytes):
# Plain byte-strings can be decoded trivially
return data.decode()
elif isinstance(data, np.ndarray) and data.dtype.kind == 'S':
# If the array is all of one type, byte-string, we can decode with numpy
return np.char.decode(data)
elif isinstance(data, np.ndarray) and len(data.dtype) > 1:
# If the array is of mixed types we have to decode column by column
decoded_dtypes = []
for field_name in data.dtype.names:
field_dtype, field_byte_index = data.dtype.fields[field_name]
if field_dtype.kind == 'S':
field_dtype = np.dtype("<U{}".format(field_dtype.itemsize))
decoded_dtypes.append((field_name, field_dtype))
try:
return data.astype(decoded_dtypes)
except (UnicodeDecodeError, SystemError):
# On h5py==2.6 we can't decode padded string-arrays properly - we should advise users to upgrade
if parse_version(h5py.__version__) < parse_version("2.7"):
raise UnicodeError("Cannot encode array with types: {}.\n"
"There are known bugs in h5py<2.7 which yield non-deteministic results when decoding "
"arrays with empty strings and additional bugs with compatibility between "
"h5py<2.7 and numpy>=1.13 when decoding arrays with mixed/padded data types.\n"
"Please try upgrading to the latest h5py and numpy versions".format(decoded_dtypes))
else:
raise
return data
示例7: test_real_example_file
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def test_real_example_file(self):
with MultiFast5File(os.path.join(test_data, 'rle_basecall_table', 'rle_example.fast5'), 'r') as mf5:
for read in mf5.get_reads():
actual_data = read.handle['Analyses/Basecall_1D_000/BaseCalled_template/RunlengthBasecall']
expected_dtypes = [('base', '<U1'), # After cleaning this is a unicode string
('scale', '<f4'),
('shape', '<f4'),
('weight', '<f4'),
('index', '<u4'),
('runlength', '<u4')]
for field, expected_type in expected_dtypes:
if field != 'base':
self.assertEqual(dtype(expected_type), actual_data[field].dtype)
else:
# Before cleaning the 'base' column is of type byte-string length=1
self.assertEqual(dtype('|S1'), actual_data[field].dtype)
try:
clean_data = _sanitize_data_for_reading(actual_data)
self.assertEqual(dtype(expected_dtypes), clean_data.dtype)
except UnicodeError:
if parse_version(h5py.__version__) < parse_version("2.7"):
# h5py==2.6 often fails to decode these arrays correctly
pass
else:
raise
示例8: check_h5py_module
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def check_h5py_module():
"""To make sure we do have the h5py module.
The reason this function is here is becasue we removed h5py from anvi'o dependencies,
but some migration scripts may still need it if the user has very old databases. In
those cases the user must install it manually."""
try:
import h5py
h5py.__version__
except:
raise ConfigError("Please install the Python module `h5py` manually for this migration task to continue. "
"The reason why the standard anvi'o installation did not install module is complicated, "
"and really unimportant. If you run `pip install h5py` in your Python virtual environmnet "
"for anvi'o, and try running the migration program again things should be alright.")
示例9: __version__
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def __version__(self):
if self._version is None:
try:
import astropy
version = astropy.__version__
except ImportError:
version = NotAModule(self._name)
self._version = version
return self._version
示例10: _create_dim_scales
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def _create_dim_scales(self):
"""Create all necessary HDF5 dimension scale."""
dim_order = self._dim_order.maps[0]
for dim in sorted(dim_order, key=lambda d: dim_order[d]):
if dim not in self._h5group:
size = self._current_dim_sizes[dim]
kwargs = {}
if self._dim_sizes[dim] is None:
kwargs["maxshape"] = (None,)
self._h5group.create_dataset(
name=dim, shape=(size,), dtype='S1', **kwargs)
h5ds = self._h5group[dim]
h5ds.attrs['_Netcdf4Dimid'] = dim_order[dim]
if len(h5ds.shape) > 1:
dims = self._variables[dim].dimensions
coord_ids = np.array([dim_order[d] for d in dims], 'int32')
h5ds.attrs['_Netcdf4Coordinates'] = coord_ids
# TODO: don't re-create scales if they already exist. With the
# current version of h5py, this would require using the low-level
# h5py.h5ds.is_scale interface to detect pre-existing scales.
scale_name = dim if dim in self.variables else NOT_A_VARIABLE
if h5py.__version__ < LooseVersion('2.10.0'):
h5ds.dims.create_scale(h5ds, scale_name)
else:
h5ds.make_scale(scale_name)
for subgroup in self.groups.values():
subgroup._create_dim_scales()
示例11: test_fileobj
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def test_fileobj():
if h5py.__version__ < LooseVersion('2.9.0'):
pytest.skip('h5py > 2.9.0 required to test file-like objects')
fileobj = tempfile.TemporaryFile()
write_h5netcdf(fileobj)
read_h5netcdf(fileobj, h5netcdf)
fileobj = io.BytesIO()
write_h5netcdf(fileobj)
read_h5netcdf(fileobj, h5netcdf)
示例12: test_invalid_netcdf4_mixed
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def test_invalid_netcdf4_mixed(tmp_local_or_remote_netcdf):
h5 = get_hdf5_module(tmp_local_or_remote_netcdf)
with h5.File(tmp_local_or_remote_netcdf, 'w') as f:
var, var2 = create_invalid_netcdf_data()
for k, v in var.items():
f.create_dataset(k, data=v)
for k, v in var2.items():
f.create_dataset(k, data=np.arange(v))
if h5py.__version__ < LooseVersion('2.10.0'):
f['foo2'].dims.create_scale(f['x1'])
f['foo2'].dims.create_scale(f['y1'])
f['foo2'].dims.create_scale(f['z1'])
else:
f['x1'].make_scale()
f['y1'].make_scale()
f['z1'].make_scale()
f['foo2'].dims[0].attach_scale(f['x1'])
f['foo2'].dims[1].attach_scale(f['y1'])
f['foo2'].dims[2].attach_scale(f['z1'])
with h5netcdf.File(tmp_local_or_remote_netcdf, 'r',
phony_dims='sort') as ds:
var = ds.variables
check_invalid_netcdf4_mixed(var, 3)
with h5netcdf.File(tmp_local_or_remote_netcdf, 'r',
phony_dims='access') as ds:
var = ds.variables
check_invalid_netcdf4_mixed(var, 0)
with netCDF4.Dataset(tmp_local_or_remote_netcdf, 'r') as ds:
var = ds.variables
check_invalid_netcdf4_mixed(var, 3)
with h5netcdf.File(tmp_local_or_remote_netcdf, 'r') as ds:
with raises(ValueError):
ds.variables['foo1'].dimensions
示例13: _todict
# 需要导入模块: import h5py [as 别名]
# 或者: from h5py import __version__ [as 别名]
def _todict(self):
"""Returns the dataset as a dictionary, useful for saving"""
return {'savedir': abspath(self.savedir),
'channel_names': self.channel_names,
'num_frames': self.num_frames,
'frame_shape': self.frame_shape,
'num_sequences': self.num_sequences,
'__version__': sima.__version__}