本文整理汇总了Python中theano.tests.unittest_tools.seed_rng函数的典型用法代码示例。如果您正苦于以下问题:Python seed_rng函数的具体用法?Python seed_rng怎么用?Python seed_rng使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了seed_rng函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_conv_nnet2_classif
def run_conv_nnet2_classif(use_gpu, seed, isize, ksize, bsize,
n_train=10,
check_isfinite=True,
verbose=0,
version=-1):
"""Run the train function returned by build_conv_nnet2_classif on one device.
"""
utt.seed_rng(seed) # Seeds numpy.random with seed
train, params, x_shape, y_shape, mode = build_conv_nnet2_classif(
use_gpu=use_gpu,
isize=isize,
ksize=ksize,
n_batch=bsize,
verbose=verbose,
version=version,
check_isfinite=check_isfinite)
xval = my_rand(*x_shape)
yval = my_rand(*y_shape)
lr = theano._asarray(0.01, dtype='float32')
rvals = my_zeros(n_train)
for i in xrange(n_train):
rvals[i] = train(xval, yval, lr)[0]
示例2: setUp
def setUp(self):
if theano.config.mode == "FAST_COMPILE":
m = theano.compile.mode.get_mode("FAST_RUN").excluding("local_elemwise_fusion")
else:
m = theano.compile.mode.get_default_mode().excluding("local_elemwise_fusion")
self.m = m
utt.seed_rng()
示例3: test_full
def test_full():
seed_rng()
shapes = get_basic_shapes()
shapes += get_shapes2()
#test image stride
shapes += get_shapes2(scales_img=(2, 2), img_stride=(1, 2))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 1))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 2))
shapes += get_shapes2(scales_img=(2, 2), img_stride=(-1, -1))
shapes += get_shapes2(scales_img=(2, 2), kern_stride=(-1, -1))
#test subsample done in a separate fct
shapes += [
#other test
((2, 1, 2, 2), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1))
, ((3, 2, 4, 4), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 2), (1, 1), (1, 1), (1, 1))
, ((1, 1, 4, 4), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 3), (1, 1), (1, 1), (1, 1))
, ((4, 1, 10, 10), (1, 1, 2, 10), (1, 1), (1, 1), (1, 1))
, ((4, 1, 20, 10), (1, 1, 2, 10), (1, 1), (1, 1), (1, 1))
, ((3, 2, 8, 8), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize
, ((3, 2, 8, 6), (4, 2, 4, 4), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize, non-square image
, ((3, 2, 8, 6), (4, 2, 4, 3), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize, non-square image, non-square kern
, ((3, 2, 8, 6), (4, 2, 4, 6), (1, 1), (1, 1), (1, 1)) #stack, nkern, bsize ,non-square image, non-square kern, kernsize==imgsize on one dim
, ((16, 5, 64, 64), (8, 5, 8, 8), (1, 1), (1, 1), (1, 1)) # a big one
, ((16, 1, 28, 28), (20, 1, 5, 5), (1, 1), (1, 1), (1, 1)) # MNIST LeNET layer 1
, ((20, 16, 32, 32), (1, 16, 28, 28), (1, 1), (1, 1), (1, 1)) # layer 1 backprop to weights
#other test
, ((3, 1, 1, 1), (2, 1, 5, 3), (1, 1), (1, 1), (1, 1))#kernel bigger then image
, ((3, 2, 1, 1), (4, 2, 1, 1), (1, 1), (1, 1), (1, 1))
, ((3, 2, 4, 4), (4, 2, 2, 6), (1, 1), (1, 1), (1, 1))
, ((3, 2, 4, 4), (4, 2, 8, 6), (1, 1), (1, 1), (1, 1))#kernel bigger then image
, ((4, 2, 10, 10), (3, 2, 2, 12), (1, 1), (1, 1), (1, 1))
]
shapes += [
# ((60,1,28,28),(20,1,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 1 layers
# , ((60,20,12,12),(30,20,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 2 layers
((60,30,8,8),(20,30,5,5), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 1 full
# , ((20,60,12,12),(30,60,8,8), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 2 valid
# , ((1,60,28,28),(20,60,24,24), (1, 1), (1, 1), (1, 1))#test_lenet_28 bprop 2 valid
# , ((10,1,64,64),(20,1,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 1 layers
# , ((10,20,29,29),(30,20,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 2 layers
, ((10,30,23,23),(20,30,7,7), (1, 1), (1, 1), (1, 1))#test_lenet_64 full
# , ((20,10,29,29),(30,10,23,23), (1, 1), (1, 1), (1, 1))#test_lenet_64 bprop 1
# , ((1,10,64,64),(20,10,58,58), (1, 1), (1, 1), (1, 1))#test_lenet_64 bprop 2
#Test more than maxThreadsDim0
, ((2,4,13,1050), (3,4,10, 11), (1, 1), (1, 1), (1, 1))
, ((2,4,1050,13), (3,4,10, 11), (1, 1), (1, 1), (1, 1))
]
# shapes=shapes[:277]
version = [-2, -1, 0, 1, 2, 3, 4, 5]
verbose = 0
# version=[4]
random = True
exec_conv(version, shapes, verbose, random, 'full')
示例4: test_invalid_input_shape
def test_invalid_input_shape(self):
"""
Tests that when the shape gived at build time is not the same as
run time we raise an error
"""
seed_rng()
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
global theano_mode
theano_mode_orig = theano_mode
try:
if theano.config.mode in ['DebugMode', 'DEBUG_MODE']:
theano_mode = theano.compile.mode.get_mode(
'FAST_RUN').including('gpu')
for mode in ['valid', 'full']:
for shapes in [((3, 2, 8, 8), (4, 2, 5, 5), (8, 8)),
((3, 2, 8, 8), (4, 2, 5, 5), (5, 8)),
#((3, 2, 8, 8), (4, 2, 5, 5), (8, 5)),
# We use only the number of columns.
]:
self.assertRaises(ValueError, _params_allgood,
shapes[0], shapes[1],
verbose=verbose, random=random,
mode=mode,
print_=print_, ones=ones,
compile_kshp=shapes[2])
finally:
theano_mode = theano_mode_orig
示例5: test_valid_4
def test_valid_4():
seed_rng()
shapes = get_valid_shapes()
version = [4]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = (
[ishape[0]]
+ [kshape[0]]
+ list(numpy.asarray(ishape[2:]) - numpy.asarray(kshape[2:]) + numpy.asarray([1, 1]))
)
if oshape[3] > device_prop["maxThreadsDim0"]:
continue
if ishape[1] > 1:
continue
if (kshape[2] * ishape[3] * 4 + numpy.prod(kshape[2:]) * 4) > (16 * 1024 - 150):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
exec_conv(version, shapes, verbose, random, "valid", print_=print_, ones=ones, rtol=1.1e-5)
示例6: _test_subsample
def _test_subsample(cls, mode, version_valid=[-1], version_full=[-1]):
seed_rng()
shapes = [((1, 1, 1, 1), (1, 1, 1, 1), (1, 1), (1, 1), (1, 1)),
((1, 1, 1, 1), (1, 1, 1, 1), (2, 2), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (1, 3), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (3, 3), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (3, 1), (1, 1), (1, 1))
]
shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 1))
shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 2))
shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 1))
shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 2))
# We put only the version that implement the subsample to make the
# test faster.
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
for t in exec_conv(version_valid, shapes, verbose, random, 'valid',
print_=print_, ones=ones,
theano_mode=mode, cls=cls):
yield t
for t in exec_conv(version_full, shapes, verbose, random, 'full',
print_=print_, ones=ones,
theano_mode=mode, cls=cls):
yield t
示例7: test_doubleop
def test_doubleop():
utt.seed_rng()
x = matrix()
f = function([x], DoubleOp()(x))
inp = numpy.asarray(numpy.random.rand(5, 4), dtype=config.floatX)
out = f(inp)
utt.assert_allclose(inp * 2, out)
示例8: test_subsample
def test_subsample():
seed_rng()
# implement when
shapes = [((1, 1, 1, 1), (1, 1, 1, 1), (1, 1), (1, 1), (1, 1)),
((1, 1, 1, 1), (1, 1, 1, 1), (2, 2), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (1, 3), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (3, 3), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (3, 1), (1, 1), (1, 1))
]
shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 1))
shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 2))
shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 1))
shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 2))
#We put only the version that implement the subsample to make the test faster.
version_valid = [-2, -1, 1, 3, 11, 12]
version_full = [-2, -1]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
exec_conv(version_valid, shapes, verbose, random, 'valid',
print_=print_, ones=ones)
exec_conv(version_full, shapes, verbose, random, 'full',
print_=print_, ones=ones)
示例9: test_batch_normalization_train_without_running_averages
def test_batch_normalization_train_without_running_averages():
# compile and run batch_normalization_train without running averages
utt.seed_rng()
x, scale, bias, dy = T.tensor4('x'), T.tensor4('scale'), T.tensor4('bias'), T.tensor4('dy')
data_shape = (5, 10, 30, 25)
param_shape = (1, 10, 30, 25)
# forward pass
out, x_mean, x_invstd = bn.batch_normalization_train(x, scale, bias, 'per-activation')
# backward pass
grads = T.grad(None, wrt=[x, scale, bias], known_grads={out: dy})
# compile
f = theano.function([x, scale, bias, dy], [out, x_mean, x_invstd] + grads)
# check if the abstract Ops have been replaced
assert not any([isinstance(n.op, (bn.AbstractBatchNormTrain,
bn.AbstractBatchNormInference,
bn.AbstractBatchNormTrainGrad))
for n in f.maker.fgraph.toposort()])
# run
X = 4 + 3 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Dy = -1 + 2 * numpy.random.randn(*data_shape).astype(theano.config.floatX)
Scale = numpy.random.randn(*param_shape).astype(theano.config.floatX)
Bias = numpy.random.randn(*param_shape).astype(theano.config.floatX)
f(X, Scale, Bias, Dy)
示例10: test_valid_7_8_13
def test_valid_7_8_13():
seed_rng()
shapes = get_valid_shapes()
# This is to test the "new" lower shared memory usage.
shapes.append(((10, 30, 60, 60), (20, 30, 40, 40),
(1, 1), (1, 1), (1, 1)))
version = [7, 8, 13]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
# print len(shapes)
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
numpy.asarray(kshape[2:]) +
numpy.asarray([1, 1]))
if oshape[2] * oshape[3] > device_prop['maxThreadsDim0']:
continue
if max(numpy.prod(ishape[2:]) * 4 + 2 * kshape[3] * 4,
oshape[2] * oshape[3] * 4 * 2) > (16 * 1024 - 150):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
# print len(shapes2)
exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5)
示例11: test_subsample
def test_subsample():
seed_rng()
# implement when
shapes = [((1, 1, 1, 1), (1, 1, 1, 1), (1, 1), (1, 1), (1, 1)),
((1, 1, 1, 1), (1, 1, 1, 1), (2, 2), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (1, 3), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (3, 3), (1, 1), (1, 1)),
((4, 2, 10, 10), (3, 2, 2, 2), (3, 1), (1, 1), (1, 1))
]
shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 1))
shapes += get_shapes2(scales_img=(2, 2), subsample=(1, 2))
shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 1))
shapes += get_shapes2(scales_img=(2, 2), subsample=(2, 2))
version_valid = [-1]
version_full = [-1]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
exec_conv(version_valid, shapes, verbose, random, 'valid',
print_=print_, ones=ones)
exec_conv(version_full, shapes, verbose, random, 'full',
print_=print_, ones=ones)
示例12: test_valid_9_10
def test_valid_9_10():
seed_rng()
shapes = get_valid_shapes()
version = [9, 10]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
shapes2 = []
# print len(shapes)
for id, (ishape, kshape, subshape, istride, kstride) in enumerate(shapes):
oshape = [ishape[0]] + [kshape[0]] + list(numpy.asarray(ishape[2:]) -
numpy.asarray(kshape[2:]) +
numpy.asarray([1, 1]))
if oshape[3] > device_prop['maxThreadsDim0']:
continue
if (kshape[3] * 4 + ishape[3]) > (16 * 1024 - 150):
continue
if subshape == (1, 1):
shapes2.append((ishape, kshape, subshape, istride, kstride))
shapes = shapes2
# print len(shapes2)
exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5)
示例13: test_valid
def test_valid(conv_gemm=False):
seed_rng()
shapes = get_valid_shapes()
#shapes=shapes[400:426]
# I put -1 in case we forget to add version in the test to.
# I put -2 to test the reference version.
version = [-2, -1, 6]
verbose = 0
random = True
print_ = False
ones = False
if ones:
random = False
if conv_gemm:
# Test the GpuCorrMM version
mode = theano_mode.including("conv_gemm")
cls = cuda.blas.BaseGpuCorrMM
# dummy version; not used by GpuCorrMM so one version is enough
version = [-1]
# Add tests with strided inputs by still square images and filters.
shapes += get_shapes2(scales_img=(2, 2), img_stride=(2, 2))
shapes += get_shapes2(scales_kern=(2, 2), kern_stride=(2, 2))
else:
mode = theano_mode
cls = None
exec_conv(version, shapes, verbose, random, 'valid',
print_=print_, ones=ones, rtol=1.1e-5,
theano_mode=mode, cls=cls)
示例14: test_logical_shapes
def test_logical_shapes(self):
seed_rng()
for stride in range(1, 4):
kshp = (10, 2, 10, 10)
featshp = (3, 10, 11, 11)
a = tensor.ftensor4()
A = tensor.ftensor4()
# Need to transpose first two dimensions of kernel, and reverse
# index kernel image dims (for correlation)
kernel_rotated = tensor.transpose(A, axes=[1, 0, 2, 3])
featshp_logical = (featshp[0], featshp[1], featshp[2] * stride,
featshp[3] * stride)
kshp_rotated = (kshp[1], kshp[0], kshp[2], kshp[3])
#print featshp, kshp_rotated, featshp_logical[1:], kshp[2:]
image_estimate = tensor.nnet.conv2d(a, kernel_rotated,
border_mode='full',
image_shape=featshp,
filter_shape=kshp_rotated,
imshp_logical=featshp_logical[1:],
kshp_logical=kshp[2:])
func = theano.function([a, A], image_estimate, mode=theano_mode)
#theano.printing.debugprint(func,)
assert any([isinstance(node.op, theano.sandbox.cuda.blas.GpuConv)
for node in func.maker.fgraph.toposort()])
a_in = numpy.random.randn(*featshp).astype("float32")
A_in = numpy.random.randn(*kshp).astype("float32")
func(a_in, A_in)
示例15: setUp
def setUp(self):
utt.seed_rng()
self.mode = mode_with_gpu.excluding('constant_folding')
self.gemv_op = gpu_sparse_block_gemv
self.outer_op = gpu_sparse_block_outer
self.gemv_class = GpuSparseBlockGemv
self.outer_class = GpuSparseBlockOuter