本文整理匯總了Python中h5py.special_dtype方法的典型用法代碼示例。如果您正苦於以下問題:Python h5py.special_dtype方法的具體用法?Python h5py.special_dtype怎麽用?Python h5py.special_dtype使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類h5py
的用法示例。
在下文中一共展示了h5py.special_dtype方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: set_predicted_description
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def set_predicted_description(self, split, data_key, sentence):
'''
Set the predicted sentence tokens in the data_key group,
creating the group if necessary, or erasing the current value if
necessary.
'''
if self.openmode != "r+":
# forcefully quit when trying to write to a read-only file
raise RuntimeError("Dataset is read-only, try again with --h5_writable")
dataset_key = 'predicted_description'
try:
predicted_text = self.dataset[split][data_key].create_dataset(dataset_key, (1,), dtype=h5py.special_dtype(vlen=unicode))
except RuntimeError:
# the dataset already exists, erase it and create an empty space
del self.dataset[split][data_key][dataset_key]
predicted_text = self.dataset[split][data_key].create_dataset(dataset_key, (1,), dtype=h5py.special_dtype(vlen=unicode))
predicted_text[0] = " ".join([x for x in sentence])
示例2: finish_chunck
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def finish_chunck(self):
if len(self.text) == 0:
return
codec = self.compute_codec()
filename = "{}_{:03d}{}".format(self.output_filename, self.current_chunk, DataSetType.gt_extension(DataSetType.HDF5))
self.files.append(filename)
file = h5py.File(filename, 'w')
dti32 = h5py.special_dtype(vlen=np.dtype('int32'))
dtui8 = h5py.special_dtype(vlen=np.dtype('uint8'))
file.create_dataset('transcripts', (len(self.text),), dtype=dti32, compression='gzip')
file.create_dataset('images_dims', data=[d.shape for d in self.data], dtype=int)
file.create_dataset('images', (len(self.text),), dtype=dtui8, compression='gzip')
file.create_dataset('codec', data=list(map(ord, codec)))
file['transcripts'][...] = [list(map(codec.index, d)) for d in self.text]
file['images'][...] = [d.reshape(-1) for d in self.data]
file.close()
self.current_chunk += 1
self.data = []
self.text = []
示例3: _hfd5_from_dataframe
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def _hfd5_from_dataframe(ratings, movies, outputfilename):
# transform ratings dataframe into a sparse matrix
m = coo_matrix((ratings['rating'].astype(np.float32),
(ratings['movieId'], ratings['userId']))).tocsr()
with h5py.File(outputfilename, "w") as f:
# write out the ratings matrix
g = f.create_group('movie_user_ratings')
g.create_dataset("data", data=m.data)
g.create_dataset("indptr", data=m.indptr)
g.create_dataset("indices", data=m.indices)
# write out the titles as a numpy array
titles = np.empty(shape=(movies.movieId.max()+1,), dtype=np.object)
titles[movies.movieId] = movies.title
dt = h5py.special_dtype(vlen=str)
dset = f.create_dataset('movie', (len(titles),), dtype=dt)
dset[:] = titles
示例4: _hfd5_from_dataframe
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def _hfd5_from_dataframe(data, outputfilename):
# create a sparse matrix of all the users/plays
plays = coo_matrix((data['plays'].astype(np.float32),
(data['artist'].cat.codes.copy(),
data['user'].cat.codes.copy()))).tocsr()
with h5py.File(outputfilename, "w") as f:
g = f.create_group('artist_user_plays')
g.create_dataset("data", data=plays.data)
g.create_dataset("indptr", data=plays.indptr)
g.create_dataset("indices", data=plays.indices)
dt = h5py.special_dtype(vlen=str)
artist = list(data['artist'].cat.categories)
dset = f.create_dataset('artist', (len(artist),), dtype=dt)
dset[:] = artist
user = list(data['user'].cat.categories)
dset = f.create_dataset('user', (len(user),), dtype=dt)
dset[:] = user
示例5: _hfd5_from_dataframe
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def _hfd5_from_dataframe(data, outputfilename):
items = data['mid'].cat.codes.copy()
users = data['uid'].cat.codes.copy()
values = np.ones(len(items)).astype(np.float32)
# create a sparse matrix of all the item/users/likes
likes = coo_matrix((values, (items, users))).astype(np.float32).tocsr()
with h5py.File(outputfilename, "w") as f:
g = f.create_group('item_user_likes')
g.create_dataset("data", data=likes.data)
g.create_dataset("indptr", data=likes.indptr)
g.create_dataset("indices", data=likes.indices)
dt = h5py.special_dtype(vlen=str)
item = list(data['mid'].cat.categories)
dset = f.create_dataset('item', (len(item),), dtype=dt)
dset[:] = item
user = list(data['uid'].cat.categories)
dset = f.create_dataset('user', (len(user),), dtype=dt)
dset[:] = user
示例6: _hfd5_from_dataframe
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def _hfd5_from_dataframe(data, track_info, outputfilename):
# create a sparse matrix of all the users/plays
plays = coo_matrix((data['plays'].astype(np.float32),
(data['track'].cat.codes.copy(),
data['user'].cat.codes.copy()))).tocsr()
with h5py.File(outputfilename, "w") as f:
g = f.create_group('track_user_plays')
g.create_dataset("data", data=plays.data)
g.create_dataset("indptr", data=plays.indptr)
g.create_dataset("indices", data=plays.indices)
dt = h5py.special_dtype(vlen=str)
dset = f.create_dataset('track', track_info.shape, dtype=dt)
dset[:] = track_info
user = list(data['user'].cat.categories)
dset = f.create_dataset('user', (len(user),), dtype=dt)
dset[:] = user
示例7: write_data
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def write_data(h5py_file, mode, x_paths, y_paths):
num_data = len(x_paths)
uint8_dt = h5py.special_dtype(vlen=np.uint8)
string_dt = h5py.special_dtype(vlen=str)
group = h5py_file.create_group(mode)
h5_name = group.create_dataset('name', shape=(num_data,), dtype=string_dt)
h5_image = group.create_dataset('image', shape=(num_data,), dtype=uint8_dt)
h5_label = group.create_dataset('label', shape=(num_data,), dtype=uint8_dt)
h5_image.attrs['size'] = [256,512,3]
h5_label.attrs['size'] = [256,512,1]
for i in range(num_data):
x_img = cv2.imread(x_paths[i], 1)
y_img = cv2.imread(y_paths[i], 0)
x_img = cv2.resize(x_img, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_LINEAR)
y_img = cv2.resize(y_img, None, fx=0.25, fy=0.25, interpolation=cv2.INTER_NEAREST)
h5_image[i] = x_img.flatten()
h5_label[i] = y_img.flatten()
h5_name[i] = os.path.basename(x_paths[i])
# break
示例8: test_int
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def test_int(self):
dt = h5py.special_dtype(vlen=int)
ds = self.f.create_dataset('vlen', (4,), dtype=dt)
ds[0] = np.arange(3)
ds[1] = np.arange(0)
ds[2] = [1, 2, 3]
ds[3] = np.arange(1)
self.assertArrayEqual(ds[0], np.arange(3))
self.assertArrayEqual(ds[1], np.arange(0))
self.assertArrayEqual(ds[2], np.array([1, 2, 3]))
self.assertArrayEqual(ds[1], np.arange(0))
ds[0:2] = np.array([np.arange(5), np.arange(4)])
self.assertArrayEqual(ds[0], np.arange(5))
self.assertArrayEqual(ds[1], np.arange(4))
ds[0:2] = np.array([np.arange(3), np.arange(3)])
self.assertArrayEqual(ds[0], np.arange(3))
self.assertArrayEqual(ds[1], np.arange(3))
示例9: test_convert
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def test_convert(self):
dt = h5py.special_dtype(vlen=int)
ds = self.f.create_dataset('vlen', (3,), dtype=dt)
ds[0] = np.array([1.4, 1.2])
ds[1] = np.array([1.2])
ds[2] = [1.2, 2, 3]
self.assertArrayEqual(ds[0], np.array([1, 1]))
self.assertArrayEqual(ds[1], np.array([1]))
self.assertArrayEqual(ds[2], np.array([1, 2, 3]))
ds[0:2] = np.array([[0.1, 1.1, 2.1, 3.1, 4], np.arange(4)])
self.assertArrayEqual(ds[0], np.arange(5))
self.assertArrayEqual(ds[1], np.arange(4))
ds[0:2] = np.array([np.array([0.1, 1.2, 2.2]),
np.array([0.2, 1.2, 2.2])])
self.assertArrayEqual(ds[0], np.arange(3))
self.assertArrayEqual(ds[1], np.arange(3))
示例10: test_compound_vlen_enum
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def test_compound_vlen_enum(self):
eidt = h5py.special_dtype(enum=(np.uint8, {'OFF': 0, 'ON': 1}))
vidt = h5py.special_dtype(vlen=np.uint8)
def a(items):
return np.array(items, dtype=np.uint8)
f = self.f
dt_vve = np.dtype([
('foo', vidt),
('bar', vidt),
('switch', eidt)])
vve = f.create_dataset('dt_vve', shape=(2,), dtype=dt_vve)
data = np.array([(a([1,2,3]), a([1,2]), 1),
(a([]), a([2,4,6]), 0),],
dtype=dt_vve)
vve[:] = data
actual = vve[:]
self.assertVlenArrayEqual(data['foo'], actual['foo'])
self.assertVlenArrayEqual(data['bar'], actual['bar'])
self.assertArrayEqual(data['switch'], actual['switch'])
示例11: test_vlen_enum
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def test_vlen_enum(self):
fname = self.mktemp()
arr1 = [[1],[1,2]]
dt1 = h5py.special_dtype(vlen=h5py.special_dtype(
enum=('i', dict(foo=1, bar=2))))
with h5py.File(fname,'w') as f:
df1 = f.create_dataset('test', (len(arr1),), dtype=dt1)
df1[:] = np.array(arr1)
with h5py.File(fname,'r') as f:
df2 = f['test']
dt2 = df2.dtype
arr2 = [e.tolist() for e in df2[:]]
self.assertEqual(arr1, arr2)
self.assertEqual(h5py.check_dtype(enum=h5py.check_dtype(vlen=dt1)),
h5py.check_dtype(enum=h5py.check_dtype(vlen=dt2)))
示例12: test_compound_vlen
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def test_compound_vlen(self):
vidt = h5py.special_dtype(vlen=np.uint8)
eidt = h5py.special_dtype(enum=(np.uint8, {'OFF': 0, 'ON': 1}))
for np_align in (False, True):
dt = np.dtype([
('a', eidt),
('foo', vidt),
('bar', vidt),
('switch', eidt)], align=np_align)
np_offsets = [dt.fields[i][1] for i in dt.names]
for logical in (False, True):
if logical and np_align:
# Vlen types have different size in the numpy struct
self.assertRaises(TypeError, h5py.h5t.py_create, dt,
logical=logical)
else:
ht = h5py.h5t.py_create(dt, logical=logical)
offsets = [ht.get_member_offset(i)
for i in range(ht.get_nmembers())]
if np_align:
self.assertEqual(np_offsets, offsets)
示例13: prepare_hdf5_file
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def prepare_hdf5_file(hdf5_file, n_train, n_valid, n_test):
"""Create datasets within a given HDF5 file.
Parameters
----------
hdf5_file : :class:`h5py.File` instance
HDF5 file handle to which to write.
n_train : int
The number of training set examples.
n_valid : int
The number of validation set examples.
n_test : int
The number of test set examples.
"""
n_total = n_train + n_valid + n_test
splits = create_splits(n_train, n_valid, n_test)
hdf5_file.attrs['split'] = H5PYDataset.create_split_array(splits)
vlen_dtype = h5py.special_dtype(vlen=numpy.dtype('uint8'))
hdf5_file.create_dataset('encoded_images', shape=(n_total,),
dtype=vlen_dtype)
hdf5_file.create_dataset('targets', shape=(n_total, 1), dtype=numpy.int16)
hdf5_file.create_dataset('filenames', shape=(n_total, 1), dtype='S32')
示例14: write_h5
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def write_h5(datasetDict, out_file, metadata=None, ref_file=None, compression=None):
if os.path.isfile(out_file):
print('delete exsited file: {}'.format(out_file))
os.remove(out_file)
print('create HDF5 file: {} with w mode'.format(out_file))
dt = h5py.special_dtype(vlen=np.dtype('float64'))
with h5py.File(out_file, 'w') as f:
for dsName in datasetDict.keys():
data = datasetDict[dsName]
ds = f.create_dataset(dsName,
data=data,
compression=compression)
for key, value in metadata.items():
f.attrs[key] = str(value)
#print(key + ': ' + value)
print('finished writing to {}'.format(out_file))
return out_file
######################################################################
示例15: write
# 需要導入模塊: import h5py [as 別名]
# 或者: from h5py import special_dtype [as 別名]
def write(self, example, filename, image_types=[]):
'''
Write an example out to disk.
status: success, failure or error.failure
'''
filename = os.path.join(self.name, filename)
f = h5f.File(filename, 'w')
if image_types != []:
dt = h5f.special_dtype(vlen=bytes)
for (img_type_str, img_format_str) in image_types:
f.create_dataset("type_" + img_type_str, data=[img_format_str])
for key, value in example.items():
if self.verbose > 0:
print('H5fDataset writing key: ' + str(key))
f.create_dataset(key, data=value)
f.close()