本文整理匯總了Python中fuel.datasets.hdf5.H5PYDataset方法的典型用法代碼示例。如果您正苦於以下問題:Python hdf5.H5PYDataset方法的具體用法?Python hdf5.H5PYDataset怎麽用?Python hdf5.H5PYDataset使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類fuel.datasets.hdf5
的用法示例。
在下文中一共展示了hdf5.H5PYDataset方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: load_imgs
# 需要導入模塊: from fuel.datasets import hdf5 [as 別名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 別名]
def load_imgs(ntrain=None, ntest=None, batch_size=128, data_file=None):
t = time()
print('LOADING DATASET...')
path = os.path.join(data_file)
tr_data = H5PYDataset(path, which_sets=('train',))
te_data = H5PYDataset(path, which_sets=('test',))
if ntrain is None:
ntrain = tr_data.num_examples
else:
ntrain = min(ntrain, tr_data.num_examples)
if ntest is None:
ntest = te_data.num_examples
else:
ntest = min(ntest, te_data.num_examples)
print('name = %s, ntrain = %d, ntest = %d' % (data_file, ntrain, ntest))
tr_scheme = ShuffledScheme(examples=ntrain, batch_size=batch_size)
tr_stream = DataStream(tr_data, iteration_scheme=tr_scheme)
te_scheme = ShuffledScheme(examples=ntest, batch_size=batch_size)
te_stream = DataStream(te_data, iteration_scheme=te_scheme)
print('%.2f secs to load data' % (time() - t))
return tr_data, te_data, tr_stream, te_stream, ntrain, ntest
示例2: load_imgs_seq
# 需要導入模塊: from fuel.datasets import hdf5 [as 別名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 別名]
def load_imgs_seq(ntrain=None, ntest=None, batch_size=128, data_file=None):
t = time()
print('LOADING DATASET...')
path = os.path.join(data_file)
tr_data = H5PYDataset(path, which_sets=('train',))
te_data = H5PYDataset(path, which_sets=('test',))
if ntrain is None:
ntrain = tr_data.num_examples
if ntest is None:
ntest = te_data.num_examples
tr_scheme = SequentialScheme(examples=ntrain, batch_size=batch_size)
tr_stream = DataStream(tr_data, iteration_scheme=tr_scheme)
te_scheme = SequentialScheme(examples=ntest, batch_size=batch_size)
te_stream = DataStream(te_data, iteration_scheme=te_scheme)
print('name = %s, ntrain = %d, ntest = %d' % (data_file, ntrain, ntest))
print('%.2f seconds to load data' % (time() - t))
return tr_data, te_data, tr_stream, te_stream, ntrain, ntest
示例3: load_imgs_raw
# 需要導入模塊: from fuel.datasets import hdf5 [as 別名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 別名]
def load_imgs_raw(ntrain=None, ntest=None, data_file=None):
t = time()
print('LOADING DATASET...')
path = os.path.join(data_file)
tr_data = H5PYDataset(path, which_sets=('train',))
te_data = H5PYDataset(path, which_sets=('test',))
if ntrain is None:
ntrain = tr_data.num_examples
if ntest is None:
ntest = te_data.num_examples
print('name = %s, ntrain = %d, ntest = %d' % (data_file, ntrain, ntest))
print('%.2f seconds to load data' % (time() - t))
return tr_data, te_data, ntrain, ntest
示例4: faces
# 需要導入模塊: from fuel.datasets import hdf5 [as 別名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 別名]
def faces(ntrain=None, nval=None, ntest=None, batch_size=128):
path = os.path.join(data_dir, 'faces_364293_128px.hdf5')
tr_data = H5PYDataset(path, which_sets=('train',))
te_data = H5PYDataset(path, which_sets=('test',))
if ntrain is None:
ntrain = tr_data.num_examples
if ntest is None:
ntest = te_data.num_examples
if nval is None:
nval = te_data.num_examples
tr_scheme = ShuffledScheme(examples=ntrain, batch_size=batch_size)
tr_stream = DataStream(tr_data, iteration_scheme=tr_scheme)
te_scheme = SequentialScheme(examples=ntest, batch_size=batch_size)
te_stream = DataStream(te_data, iteration_scheme=te_scheme)
val_scheme = SequentialScheme(examples=nval, batch_size=batch_size)
val_stream = DataStream(tr_data, iteration_scheme=val_scheme)
return tr_data, te_data, tr_stream, val_stream, te_stream
示例5: __init__
# 需要導入模塊: from fuel.datasets import hdf5 [as 別名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 別名]
def __init__(self, path, which_set='train', load_size=None, crop_size=None, dtype=numpy.float32):
from fuel.datasets.hdf5 import H5PYDataset
self._dtype = dtype
self._load_size = load_size
self._crop_size = crop_size
self._data_set = H5PYDataset(path, which_sets=(which_set,))
示例6: __init__
# 需要導入模塊: from fuel.datasets import hdf5 [as 別名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 別名]
def __init__(self, h5filename, ntrain=None, ntest=None, batch_size=1, folds=None):
if folds is None:
te_sets = ('test',)
tr_sets = ('train',)
else:
notest = (folds[0] == folds[1]);
te_sets = () if notest else ('fold_{}'.format(folds[1]),)
tr_sets = tuple(['fold_{}'.format(i) for i in range(folds[0]) if i != folds[1]])
self.batch_size = batch_size
self.tr_data = H5PYDataset(h5filename, which_sets=tr_sets)
self.te_data = None if notest else H5PYDataset(h5filename, which_sets=te_sets)
self.ntrain = ntrain or self.tr_data.num_examples
self.ntest = ntest or self.te_data.num_examples if self.te_data else 0
示例7: install_and_load
# 需要導入模塊: from fuel.datasets import hdf5 [as 別名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 別名]
def install_and_load(self):
path = os.path.join(
os.path.dirname(__file__), 'WSJ0', 'wsj0-danet.hdf5')
self.h5file = h5py.File(path, 'r')
train_set = H5PYDataset(
self.h5file, which_sets=('train',))
valid_set = H5PYDataset(
self.h5file, which_sets=('valid',))
test_set = H5PYDataset(
self.h5file, which_sets=('test',))
self.subset = dict(
train=train_set, valid=valid_set, test=test_set)
self.is_loaded = True
示例8: get_dataset_iterator
# 需要導入模塊: from fuel.datasets import hdf5 [as 別名]
# 或者: from fuel.datasets.hdf5 import H5PYDataset [as 別名]
def get_dataset_iterator(dataset, split, include_features=True, include_targets=False, unit_scale=True, label_transforms=False, return_length=False):
"""Get iterator for dataset, split, targets (labels) and scaling (from 255 to 1.0)"""
sources = []
sources = sources + ['features'] if include_features else sources
sources = sources + ['targets'] if include_targets else sources
if split == "all":
splits = ('train', 'valid', 'test')
elif split == "nontrain":
splits = ('valid', 'test')
else:
splits = (split,)
dataset_fname = find_in_data_path("{}.hdf5".format(dataset))
h5_dataset = H5PYDataset(dataset_fname, which_sets=splits,
sources=sources)
if unit_scale:
h5_dataset.default_transformers = uint8_pixels_to_floatX(('features',))
datastream = DataStream.default_stream(
dataset=h5_dataset,
iteration_scheme=SequentialExampleScheme(h5_dataset.num_examples))
if label_transforms:
# TODO: maybe refactor this common bit with get_custom_streams below
datastream = AddLabelUncertainty(datastream,
chance=0,
which_sources=('targets',))
datastream = RandomLabelStrip(datastream,
chance=0,
which_sources=('targets',))
# HACK: allow variable stretch
datastream = StretchLabels(datastream,
length=128,
which_sources=('targets',))
it = datastream.get_epoch_iterator()
if return_length:
return it, h5_dataset.num_examples
else:
return it
# get images from dataset. numanchors=None to get all. image_size only needed for color conversion