本文整理汇总了Python中fuel.schemes.SequentialScheme方法的典型用法代码示例。如果您正苦于以下问题:Python schemes.SequentialScheme方法的具体用法?Python schemes.SequentialScheme怎么用?Python schemes.SequentialScheme使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类fuel.schemes
的用法示例。
在下文中一共展示了schemes.SequentialScheme方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_imgs_seq
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def load_imgs_seq(ntrain=None, ntest=None, batch_size=128, data_file=None):
t = time()
print('LOADING DATASET...')
path = os.path.join(data_file)
tr_data = H5PYDataset(path, which_sets=('train',))
te_data = H5PYDataset(path, which_sets=('test',))
if ntrain is None:
ntrain = tr_data.num_examples
if ntest is None:
ntest = te_data.num_examples
tr_scheme = SequentialScheme(examples=ntrain, batch_size=batch_size)
tr_stream = DataStream(tr_data, iteration_scheme=tr_scheme)
te_scheme = SequentialScheme(examples=ntest, batch_size=batch_size)
te_stream = DataStream(te_data, iteration_scheme=te_scheme)
print('name = %s, ntrain = %d, ntest = %d' % (data_file, ntrain, ntest))
print('%.2f seconds to load data' % (time() - t))
return tr_data, te_data, tr_stream, te_stream, ntrain, ntest
示例2: faces
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def faces(ntrain=None, nval=None, ntest=None, batch_size=128):
path = os.path.join(data_dir, 'faces_364293_128px.hdf5')
tr_data = H5PYDataset(path, which_sets=('train',))
te_data = H5PYDataset(path, which_sets=('test',))
if ntrain is None:
ntrain = tr_data.num_examples
if ntest is None:
ntest = te_data.num_examples
if nval is None:
nval = te_data.num_examples
tr_scheme = ShuffledScheme(examples=ntrain, batch_size=batch_size)
tr_stream = DataStream(tr_data, iteration_scheme=tr_scheme)
te_scheme = SequentialScheme(examples=ntest, batch_size=batch_size)
te_stream = DataStream(te_data, iteration_scheme=te_scheme)
val_scheme = SequentialScheme(examples=nval, batch_size=batch_size)
val_stream = DataStream(tr_data, iteration_scheme=val_scheme)
return tr_data, te_data, tr_stream, val_stream, te_stream
示例3: test_mnist_train
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_mnist_train():
skip_if_not_available(datasets=['mnist.hdf5'])
dataset = MNIST(('train',), load_in_memory=False)
handle = dataset.open()
data, labels = dataset.get_data(handle, slice(0, 10))
assert data.dtype == 'uint8'
assert data.shape == (10, 1, 28, 28)
assert labels.shape == (10, 1)
known = numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 30, 36, 94, 154, 170, 253,
253, 253, 253, 253, 225, 172, 253, 242, 195, 64, 0,
0, 0, 0])
assert_allclose(data[0][0][6], known)
assert labels[0][0] == 5
assert dataset.num_examples == 60000
dataset.close(handle)
stream = DataStream.default_stream(
dataset, iteration_scheme=SequentialScheme(10, 10))
data = next(stream.get_epoch_iterator())[0]
assert data.min() >= 0.0 and data.max() <= 1.0
assert data.dtype == config.floatX
示例4: test_mnist_test
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_mnist_test():
skip_if_not_available(datasets=['mnist.hdf5'])
dataset = MNIST(('test',), load_in_memory=False)
handle = dataset.open()
data, labels = dataset.get_data(handle, slice(0, 10))
assert data.dtype == 'uint8'
assert data.shape == (10, 1, 28, 28)
assert labels.shape == (10, 1)
known = numpy.array([0, 0, 0, 0, 0, 0, 84, 185, 159, 151, 60, 36, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
assert_allclose(data[0][0][7], known)
assert labels[0][0] == 7
assert dataset.num_examples == 10000
dataset.close(handle)
stream = DataStream.default_stream(
dataset, iteration_scheme=SequentialScheme(10, 10))
data = next(stream.get_epoch_iterator())[0]
assert data.min() >= 0.0 and data.max() <= 1.0
assert data.dtype == config.floatX
示例5: epoch
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def epoch(self, subset, batch_size, shuffle=False):
dataset = self.subset[subset]
handle = dataset.open()
dset_size = self.h5file.attrs['split'][
dict(train=0, valid=1, test=2)[subset]][3]
indices = np.arange(
((dset_size + batch_size - 1) // batch_size)*batch_size)
indices %= dset_size
if shuffle:
np.random.shuffle(indices)
req_itor = SequentialScheme(
examples=indices, batch_size=batch_size).get_request_iterator()
for req in req_itor:
data_pt = dataset.get_data(handle, req)
max_len = max(map(len, data_pt[0]))
spectra_li = [utils.random_zeropad(
x, max_len - len(x), axis=-2)
for x in data_pt[0]]
spectra = np.stack(spectra_li)
yield (spectra,)
dataset.close(handle)
示例6: test_mean_aggregator
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_mean_aggregator():
num_examples = 4
batch_size = 2
features = numpy.array([[0, 3],
[2, 9],
[2, 4],
[5, 1]], dtype=theano.config.floatX)
dataset = IndexableDataset(OrderedDict([('features', features)]))
data_stream = DataStream(dataset,
iteration_scheme=SequentialScheme(num_examples,
batch_size))
x = tensor.matrix('features')
y = (x**2).mean(axis=0)
y.name = 'y'
z = y.sum()
z.name = 'z'
y.tag.aggregation_scheme = Mean(y, 1.)
z.tag.aggregation_scheme = Mean(z, 1.)
assert_allclose(DatasetEvaluator([y]).evaluate(data_stream)['y'],
numpy.array([8.25, 26.75], dtype=theano.config.floatX))
assert_allclose(DatasetEvaluator([z]).evaluate(data_stream)['z'],
numpy.array([35], dtype=theano.config.floatX))
示例7: test_axis_labels_on_produces_batches
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_axis_labels_on_produces_batches(self):
dataset = IndexableDataset(numpy.eye(2))
axis_labels = {'data': ('batch', 'features')}
dataset.axis_labels = axis_labels
stream = DataStream(dataset, iteration_scheme=SequentialScheme(2, 2))
assert_equal(stream.axis_labels, axis_labels)
示例8: test_cifar100
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_cifar100():
train = CIFAR100(('train',), load_in_memory=False)
assert train.num_examples == 50000
handle = train.open()
coarse_labels, features, fine_labels = train.get_data(handle,
slice(49990, 50000))
assert features.shape == (10, 3, 32, 32)
assert coarse_labels.shape == (10, 1)
assert fine_labels.shape == (10, 1)
train.close(handle)
test = CIFAR100(('test',), load_in_memory=False)
handle = test.open()
coarse_labels, features, fine_labels = test.get_data(handle,
slice(0, 10))
assert features.shape == (10, 3, 32, 32)
assert coarse_labels.shape == (10, 1)
assert fine_labels.shape == (10, 1)
assert features.dtype == numpy.uint8
assert coarse_labels.dtype == numpy.uint8
assert fine_labels.dtype == numpy.uint8
test.close(handle)
stream = DataStream.default_stream(
test, iteration_scheme=SequentialScheme(10, 10))
data = next(stream.get_epoch_iterator())[1]
assert data.min() >= 0.0 and data.max() <= 1.0
assert data.dtype == config.floatX
assert_raises(ValueError, CIFAR100, ('valid',))
示例9: test_in_memory
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_in_memory():
skip_if_not_available(datasets=['mnist.hdf5'])
# Load MNIST and get two batches
mnist = MNIST(('train',), load_in_memory=True)
data_stream = DataStream(mnist, iteration_scheme=SequentialScheme(
examples=mnist.num_examples, batch_size=256))
epoch = data_stream.get_epoch_iterator()
for i, (features, targets) in enumerate(epoch):
if i == 1:
break
handle = mnist.open()
known_features, _ = mnist.get_data(handle, slice(256, 512))
mnist.close(handle)
assert numpy.all(features == known_features)
# Pickle the epoch and make sure that the data wasn't dumped
with tempfile.NamedTemporaryFile(delete=False) as f:
filename = f.name
cPickle.dump(epoch, f)
assert os.path.getsize(filename) < 1024 * 1024 # Less than 1MB
# Reload the epoch and make sure that the state was maintained
del epoch
with open(filename, 'rb') as f:
epoch = cPickle.load(f)
features, targets = next(epoch)
handle = mnist.open()
known_features, _ = mnist.get_data(handle, slice(512, 768))
mnist.close(handle)
assert numpy.all(features == known_features)
示例10: test_ngram_stream_raises_error_on_batch_stream
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_ngram_stream_raises_error_on_batch_stream():
sentences = [list(numpy.random.randint(10, size=sentence_length))
for sentence_length in [3, 5, 7]]
stream = DataStream(
IndexableDataset(sentences), iteration_scheme=SequentialScheme(3, 1))
assert_raises(ValueError, NGrams, 4, stream)
示例11: test_cifar10
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_cifar10():
train = CIFAR10(('train',), load_in_memory=False)
assert train.num_examples == 50000
handle = train.open()
features, targets = train.get_data(handle, slice(49990, 50000))
assert features.shape == (10, 3, 32, 32)
assert targets.shape == (10, 1)
train.close(handle)
test = CIFAR10(('test',), load_in_memory=False)
handle = test.open()
features, targets = test.get_data(handle, slice(0, 10))
assert features.shape == (10, 3, 32, 32)
assert targets.shape == (10, 1)
assert features.dtype == numpy.uint8
assert targets.dtype == numpy.uint8
test.close(handle)
stream = DataStream.default_stream(
test, iteration_scheme=SequentialScheme(10, 10))
data = next(stream.get_epoch_iterator())[0]
assert data.min() >= 0.0 and data.max() <= 1.0
assert data.dtype == config.floatX
assert_raises(ValueError, CIFAR10, ('valid',))
assert_raises(ValueError, CIFAR10,
('train',), subset=slice(50000, 60000))
示例12: test_data_stream_pickling
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def test_data_stream_pickling(self):
stream = DataStream(H5PYDataset(self.h5file, which_sets=('train',)),
iteration_scheme=SequentialScheme(100, 10))
cPickle.loads(cPickle.dumps(stream))
stream.close()
示例13: fuel_data_to_list
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def fuel_data_to_list(fuel_data, shuffle):
if(shuffle):
scheme = ShuffledScheme(fuel_data.num_examples, fuel_data.num_examples)
else:
scheme = SequentialScheme(fuel_data.num_examples, fuel_data.num_examples)
fuel_data_stream = DataStream.default_stream(fuel_data, iteration_scheme=scheme)
return next(fuel_data_stream.get_epoch_iterator())
示例14: get_stream
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def get_stream():
return DataStream(
MNIST(('train',)), iteration_scheme=SequentialScheme(1500, 500))
示例15: _test_dataset
# 需要导入模块: from fuel import schemes [as 别名]
# 或者: from fuel.schemes import SequentialScheme [as 别名]
def _test_dataset():
train = DogsVsCats(('train',))
assert train.num_examples == 25000
assert_raises(ValueError, DogsVsCats, ('valid',))
test = DogsVsCats(('test',))
stream = DataStream.default_stream(
test, iteration_scheme=SequentialScheme(10, 10))
data = next(stream.get_epoch_iterator())[0][0]
assert data.dtype.kind == 'f'