本文整理汇总了Python中theano.tests.unittest_tools.assert_allclose函数的典型用法代码示例。如果您正苦于以下问题:Python assert_allclose函数的具体用法?Python assert_allclose怎么用?Python assert_allclose使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assert_allclose函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_concatenate
def test_concatenate(self):
def ref(*inputs):
axis = inputs[0]
tensors = inputs[1:]
return numpy.concatenate(tensors, axis)
seed = utt.fetch_seed()
rng = numpy.random.RandomState(seed)
imgsize_list = ((5, 5), (6, 6), (6, 6), (8, 8))
n, c = 4, 2
axis = 1
image = T.dtensor4('image')
image1 = T.dtensor4('image1')
for imgsize in imgsize_list:
imval = rng.rand(n, c, imgsize[0], imgsize[1])
output_ref = ref(axis, imval, imval)
Opout = self.mkl_concatenate_func(axis, image, image1)
f = function([image, image1], [Opout, ])
output_mkl = f(imval, imval)
utt.assert_allclose(output_mkl, output_ref)
示例2: run_gradweight
def run_gradweight(self, inputs_shape, filters_shape, dCdH_shape,
subsample=(1, 1, 1)):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
dCdH_val = numpy.random.random(dCdH_shape).astype('float32')
inputs = shared(inputs_val)
dCdH = shared(dCdH_val)
conv = theano.tensor.nnet.convGrad3D(V=inputs, dCdH=dCdH,
WShape=filters_shape,
d=subsample)
img = gpu_contiguous(inputs.dimshuffle(0, 4, 1, 2, 3))
topgrad = gpu_contiguous(dCdH.dimshuffle(0, 4, 1, 2, 3))
if (subsample == (1, 1, 1)):
conv_gemm = GpuCorr3dMM_gradWeights(subsample=subsample)(img,
topgrad)
else:
conv_gemm = GpuCorr3dMM_gradWeights(subsample=subsample)(
img, topgrad, shape=filters_shape[1:4])
conv_gemm = conv_gemm.dimshuffle(0, 2, 3, 4, 1)
f_ref = theano.function([], conv)
f = theano.function([], conv_gemm, mode=mode_with_gpu)
res_ref = f_ref()
res = f()
utt.assert_allclose(res_ref, res)
示例3: cmp
def cmp(n, m, f, f_gpu):
data = numpy.arange(n * m, dtype='float32').reshape(n, m)
gdata = numpy.asarray(data)[:, :, None, None]
out = f(data)
gout = numpy.asarray(f_gpu(gdata))[:, :, 0, 0]
utt.assert_allclose(out, gout)
示例4: run_gradinput
def run_gradinput(self, inputs_shape, filters_shape,
subsample=(1, 1, 1)):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
bias = shared(numpy.zeros(filters_shape[4]).astype('float32'))
conv = theano.tensor.nnet.convTransp3D(W=filters,
b=bias,
d=subsample,
H=inputs)
f_ref = theano.function([], conv)
res_ref = f_ref()
# Get bottom shape using convTransp3D
bottom_shape = res_ref.shape
bottom_val = numpy.random.random(bottom_shape).astype('float32')
bottom = shared(bottom_val)
weight = gpu_contiguous(filters.dimshuffle(0, 4, 1, 2, 3))
top = gpu_contiguous(inputs.dimshuffle(0, 4, 1, 2, 3))
if (subsample == (1, 1, 1)):
conv_gemm = GpuCorr3dMM_gradInputs(subsample=subsample)(
kern=weight, topgrad=top)
else:
conv_gemm = GpuCorr3dMM_gradInputs(subsample=subsample)(
kern=weight, topgrad=top,
shape=bottom.shape[1:4])
conv_gemm = conv_gemm.dimshuffle(0, 2, 3, 4, 1)
f = theano.function([], conv_gemm, mode=mode_with_gpu)
res = f()
utt.assert_allclose(res_ref, res)
示例5: test_opt_convgrad3d_gemm
def test_opt_convgrad3d_gemm(self):
inputs_shape = (16, 10, 12, 16, 1)
filters_shape = (10, 6, 12, 4, 1)
dCdH_shape = (16, 5, 1, 13, 10)
inputs_val = numpy.random.random(inputs_shape).astype('float32')
dCdH_val = numpy.random.random(dCdH_shape).astype('float32')
inputs = shared(inputs_val)
dCdH = shared(dCdH_val)
conv = theano.tensor.nnet.convGrad3D(V=inputs, dCdH=dCdH,
WShape=filters_shape,
d=(1, 1, 1))
mode = mode_with_gpu.including('convgrad3d_gemm')
f_ref = theano.function([], conv)
f_gemm = theano.function([], conv, mode=mode)
# make sure we inserted the gemm trickery
topo = f_gemm.maker.fgraph.toposort()
assert sum(isinstance(n.op, GpuCorr3dMM_gradWeights) for n in topo) > 0
res_ref = f_ref()
res_gemm = f_gemm()
utt.assert_allclose(res_ref, res_gemm)
示例6: gemm_directly
def gemm_directly(bs, ch, nf, rImg1, rImg2, rFlt1, rFlt2, subsx, subsy,
direction):
ishape = (bs, ch, rImg1, rImg2)
kshape = (nf, ch, rFlt1, rFlt2)
subsample = (subsx, subsy)
npy_img = theano._asarray(numpy.random.rand(*ishape), dtype='float32')
npy_kern = theano._asarray(numpy.random.rand(*kshape), dtype='float32')
i = cuda_tensor4()
k = cuda_tensor4()
if direction == 'fprop':
cpuval = py_conv(npy_img, npy_kern, 'valid', subsample)
op = theano.sandbox.cuda.blas.GpuCorrMM(border_mode='valid',
subsample=subsample)(i, k)
f = theano.function([i, k], op, mode=theano_mode)
gpuval = f(npy_img, npy_kern[:,:,::-1,::-1])
elif direction == 'bprop img':
cpuval = py_conv(npy_img, npy_kern, 'full', subsample)
op = theano.sandbox.cuda.blas.GpuCorrMM_gradInputs(
border_mode='valid', subsample=subsample)(i, k)
f = theano.function([i, k], op, mode=theano_mode)
gpuval = f(npy_kern.transpose(1, 0, 2, 3), npy_img)
elif direction == 'bprop kern':
cpuval = py_conv(npy_img, npy_kern, 'valid', subsample)
op = theano.sandbox.cuda.blas.GpuCorrMM_gradWeights(
border_mode='valid', subsample=subsample)(i, k)
f = theano.function([i, k], op, mode=theano_mode)
gpuval = numpy.array(f(
npy_img.transpose(1, 0, 2, 3),
npy_kern.transpose(1, 0, 2, 3)[:,:,::-1,::-1])).transpose(
1, 0, 2, 3)
assert_allclose(cpuval, gpuval, rtol=1e-4)
示例7: test_elemwise_pow
def test_elemwise_pow():
# Test that GpuElemwise(pow) can compile with any combination of integer
# or float input dtype.
dtypes = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64",
"float16", "float32", "float64"]
for dtype_base in dtypes:
for dtype_exp in dtypes:
# Compile a gpu function with the specified dtypes
base_val = np.random.randint(0, 5, size=10).astype(dtype_base)
exp_val = np.random.randint(0, 3, size=10).astype(dtype_exp)
base = theano.tensor.vector(dtype=dtype_base)
exp = gpuarray_shared_constructor(exp_val)
assert exp.dtype == dtype_exp
output = base ** exp
f = theano.function([base], output, mode=mode_with_gpu)
theano.printing.debugprint(f)
# We don't transfer to the GPU when the output dtype is int*
n = len([n for n in f.maker.fgraph.apply_nodes
if isinstance(n.op, GpuElemwise)])
assert n == (output.dtype in tensor.float_dtypes)
# Call the function to make sure the output is valid
out = f(base_val)
expected_out = base_val ** exp_val
assert_allclose(out, expected_out)
示例8: test_irfft
def test_irfft(self):
inputs_val = np.random.random((1, N, N)).astype(theano.config.floatX)
inputs = theano.shared(inputs_val)
rfft = fft.rfft(inputs)
f_rfft = theano.function([], rfft)
res_fft = f_rfft()
m = rfft.type()
irfft = fft.irfft(m)
f_irfft = theano.function([m], irfft)
res_irfft = f_irfft(res_fft)
utt.assert_allclose(inputs_val, np.asarray(res_irfft))
inputs_val = np.random.random((1, N, N, 2)).astype(theano.config.floatX)
inputs = theano.shared(inputs_val)
irfft = fft.irfft(inputs)
f_irfft = theano.function([], irfft)
res_irfft = f_irfft()
inputs_ref = inputs_val[..., 0] + inputs_val[..., 1] * 1j
irfft_ref = np.fft.irfftn(inputs_ref, axes=(1, 2))
utt.assert_allclose(irfft_ref, res_irfft, atol=1e-4, rtol=1e-4)
示例9: run_conv_valid
def run_conv_valid(self, inputs_shape, filters_shape, pad=False):
inputs_val = numpy.random.random(inputs_shape).astype('float32')
filters_val = numpy.random.random(filters_shape).astype('float32')
inputs = shared(inputs_val)
filters = shared(filters_val)
bias = shared(numpy.zeros(filters_shape[0]).astype('float32'))
# Flip filter as conv3D compute correlation
filters_flip = filters[:, ::-1, ::-1, ::-1, :]
# filters_flip = filters
conv_ref = theano.tensor.nnet.conv3D(V=inputs, W=filters_flip,
b=bias, d=(1, 1, 1))
conv_fft = theano.sandbox.cuda.fftconv.conv3d_fft(
inputs.dimshuffle(0, 4, 1, 2, 3),
filters.dimshuffle(0, 4, 1, 2, 3),
border_mode="valid",
pad_last_dim=pad)
conv_fft = conv_fft.dimshuffle(0, 2, 3, 4, 1)
f_ref = theano.function([], conv_ref, mode="FAST_RUN")
mode = mode_with_gpu
mode.check_py_code = False
f_fft = theano.function([], conv_fft, mode=mode)
res_ref = f_ref()
res_fft = f_fft()
utt.assert_allclose(res_ref, res_fft, rtol=1e-05, atol=1e-05)
示例10: test_None
def test_None(self):
a = tensor.dmatrix()
l = sort(a, None)
f = theano.function([a], l)
gv = f(self.m_val)
gt = np.sort(self.m_val, None)
utt.assert_allclose(gv, gt)
示例11: test_1Drfft
def test_1Drfft(self):
inputs_val = np.random.random((1, N)).astype(theano.config.floatX)
x = T.matrix('x')
rfft = fft.rfft(x)
f_rfft = theano.function([x], rfft)
res_rfft = f_rfft(inputs_val)
res_rfft_comp = (np.asarray(res_rfft[:, :, 0]) +
1j * np.asarray(res_rfft[:, :, 1]))
rfft_ref = np.fft.rfft(inputs_val, axis=1)
utt.assert_allclose(rfft_ref, res_rfft_comp)
m = rfft.type()
print(m.ndim)
irfft = fft.irfft(m)
f_irfft = theano.function([m], irfft)
res_irfft = f_irfft(res_rfft)
utt.assert_allclose(inputs_val, np.asarray(res_irfft))
# The numerical gradient of the FFT is sensitive, must set large
# enough epsilon to get good accuracy.
eps = 1e-1
def f_rfft(inp):
return fft.rfft(inp)
inputs_val = np.random.random((1, N)).astype(theano.config.floatX)
utt.verify_grad(f_rfft, [inputs_val], eps=eps)
def f_irfft(inp):
return fft.irfft(inp)
inputs_val = np.random.random((1, N // 2 + 1, 2)).astype(theano.config.floatX)
utt.verify_grad(f_irfft, [inputs_val], eps=eps)
示例12: test3
def test3(self):
a = tensor.dvector()
w2 = sort(a)
f = theano.function([a], w2)
gv = f(self.v_val)
gt = np.sort(self.v_val)
utt.assert_allclose(gv, gt)
示例13: with_linker
def with_linker(self, linker, op, type, rand_val):
for xsh, ysh in [((3, 5), (3, 5)),
((3, 5), (1, 5)),
((3, 5), (3, 1)),
((1, 5), (5, 1)),
((1, 1), (1, 1)),
((self.openmp_minsize,), (self.openmp_minsize,)),
((self.openmp_minsize_sqrt,
self.openmp_minsize_sqrt),
(self.openmp_minsize_sqrt,
self.openmp_minsize_sqrt)),
((2, 3, 4, 5), (2, 3, 4, 5)),
((2, 3, 4, 5), (1, 3, 1, 5)),
((2, 3, 4, 5), (1, 1, 1, 1)),
((), ())]:
x = type('float64', [(entry == 1) for entry in xsh])('x')
y = type('float64', [(entry == 1) for entry in ysh])('y')
e = op(scalar.add)(x, y)
f = copy(linker).accept(FunctionGraph([x, y], [e])).make_function()
xv = rand_val(xsh)
yv = rand_val(ysh)
zv = xv + yv
unittest_tools.assert_allclose(f(xv, yv), zv)
#test Elemwise.infer_shape
#the Shape op don't implement c_code!
if isinstance(linker, gof.PerformLinker):
x = type('float64', [(entry == 1) for entry in xsh])('x')
y = type('float64', [(entry == 1) for entry in ysh])('y')
e = op(scalar.add)(x, y)
f = copy(linker).accept(FunctionGraph(
[x, y], [e.shape])).make_function()
assert tuple(f(xv, yv)) == tuple(zv.shape)
示例14: test_relu_grad
def test_relu_grad(self):
seed = utt.fetch_seed()
rng = numpy.random.RandomState(seed)
imgsize_list = ((5, 5), (6, 6), (6, 6), (8, 8))
n, c = 4, 2
axis = 1
image = T.dtensor4('image')
image1 = T.dtensor4('image1')
for imgsize in imgsize_list:
imval = rng.rand(n, c, imgsize[0], imgsize[1])
out = T.concatenate([image, image1], axis)
sum_ref = T.sum(out)
gx_ref = T.grad(sum_ref, [image, image1])
f_ref = theano.function([image, image1], outputs=gx_ref, mode=mode_without_mkl)
output_ref = f_ref(imval, imval)
out_mkl = self.mkl_concatenate_func(axis, image, image1)
sum_mkl = T.sum(out_mkl)
gx_mkl = T.grad(sum_mkl, [image, image1])
f_mkl = theano.function([image, image1], outputs=gx_mkl)
output_mkl = f_mkl(imval, imval)
utt.assert_allclose(output_mkl, output_ref)
示例15: test_hgemm_swap
def test_hgemm_swap():
from theano.sandbox.cuda import nvcc_compiler
if nvcc_compiler.nvcc_version < '7.5':
raise SkipTest("SgemmEx is only avaialble on cuda 7.5+")
v = tensor.vector(dtype='float16')
m = tensor.matrix(dtype='float16')
m2 = tensor.matrix(dtype='float16')
m32 = tensor.matrix(dtype='float32')
# test that we don't try to replace anything but matrix x matrix in float16
f = theano.function([v, m], tensor.dot(v, m), mode=mode_with_gpu)
assert len([node for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuGemm)]) == 0
f = theano.function([m32, m], tensor.dot(m32, m), mode=mode_with_gpu)
assert len([node for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuGemm)]) == 0
f = theano.function([m, m2], tensor.dot(m, m2), mode=mode_with_gpu)
assert len([node for node in f.maker.fgraph.apply_nodes
if isinstance(node.op, GpuGemm)]) == 1
v1 = numpy.random.random((3, 4)).astype('float16')
v2 = numpy.random.random((4, 2)).astype('float16')
of = f(v1, v2)
on = numpy.dot(v1, v2)
utt.assert_allclose(of, on)