本文整理汇总了Python中theano.tensor.verify_grad函数的典型用法代码示例。如果您正苦于以下问题:Python verify_grad函数的具体用法?Python verify_grad怎么用?Python verify_grad使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了verify_grad函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_maxpool
def test_maxpool():
# generate flatted images
maxpoolshps = ((2,2),(3,3),(4,4),(5,5),(6,6))
imval = N.random.rand(4,5,10,10)
images = T.dmatrix()
for maxpoolshp in maxpoolshps:
# symbolic stuff
output, outshp = sp.max_pool(images, imval.shape[1:], maxpoolshp)
f = function([images,],[output,])
output_val = f(imval.reshape(imval.shape[0],-1))
# numeric verification
my_output_val = N.zeros((imval.shape[0], imval.shape[1],
imval.shape[2]/maxpoolshp[0],
imval.shape[3]/maxpoolshp[1]))
assert N.prod(my_output_val.shape[1:]) == N.prod(N.r_[imval.shape[1],outshp])
for n in range(imval.shape[0]):
for k in range(imval.shape[1]):
for i in range(imval.shape[2]/maxpoolshp[0]):
for j in range(imval.shape[3]/maxpoolshp[1]):
ii,jj = i*maxpoolshp[0], j*maxpoolshp[1]
patch = imval[n,k,ii:ii+maxpoolshp[0],jj:jj+maxpoolshp[1]]
my_output_val[n,k,i,j] = N.max(patch)
my_output_val = my_output_val.reshape(imval.shape[0],-1)
assert N.all(output_val == my_output_val)
def mp(input):
output, outshp = sp.max_pool(input, imval.shape[1:], maxpoolshp)
return output
T.verify_grad(None, mp, [imval.reshape(imval.shape[0],-1)])
示例2: test_col_scale
def test_col_scale():
x = theano.sparse.csc_dmatrix()
s = theano.tensor.dvector()
def d(x, s):
return sp.sp_sum(sp.col_scale(x, s), sparse_grad=True)
rng = numpy.random.RandomState(8723)
R = 5
C = 8
x_val_dense = numpy.zeros((R, C), dtype="d")
for idx in [(0, 0), (4, 1), (2, 1), (3, 3), (4, 4), (3, 7), (2, 7)]:
x_val_dense.__setitem__(idx, rng.randn())
x_val = scipy.sparse.csc_matrix(x_val_dense)
s_val = rng.randn(C)
f = theano.function([x, s], sp.col_scale(x, s))
print "A", f(x_val, s_val).toarray()
print "B", (x_val_dense * s_val)
assert numpy.all(f(x_val, s_val).toarray() == (x_val_dense * s_val))
if 0:
tensor.verify_grad(None, d, [x_val, s_val], mode=theano.Mode(linker="py", optimizer="fast_compile"))
else:
print >> sys.stderr, "WARNING: skipping gradient test because verify_grad doesn't support sparse arguments"
示例3: test_eigvalsh_grad
def test_eigvalsh_grad():
rng = numpy.random.RandomState(utt.fetch_seed())
a = rng.randn(5, 5)
a = a + a.T
b = 10 * numpy.eye(5, 5) + rng.randn(5, 5)
tensor.verify_grad(lambda a, b: eigvalsh(a, b).dot([1, 2, 3, 4, 5]),
[a, b], rng=numpy.random)
示例4: test_det_grad
def test_det_grad():
# If scipy is not available, this test will fail, thus we skip it.
if not use_scipy:
raise SkipTest('Scipy is not available')
rng = numpy.random.RandomState(utt.fetch_seed())
r = rng.randn(5,5)
tensor.verify_grad(det, [r], rng=numpy.random)
示例5: test_expm_grad_3
def test_expm_grad_3():
# with non-symmetric matrix (complex eigenvectors)
if not imported_scipy:
raise SkipTest("Scipy needed for the expm op.")
rng = numpy.random.RandomState(utt.fetch_seed())
A = rng.randn(5, 5).astype(config.floatX)
tensor.verify_grad(expm, [A,], rng=rng)
示例6: test_expm_grad_3
def test_expm_grad_3():
# with non-symmetric matrix (complex eigenvectors)
if not imported_scipy:
raise SkipTest("Scipy needed for the expm op.")
rng = numpy.random.RandomState(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.randn(5, 5)
tensor.verify_grad(expm, [A], rng=rng)
示例7: test_inverse_grad
def test_inverse_grad():
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(4, 4)
tensor.verify_grad(matrix_inverse, [r], rng=np.random)
rng = np.random.RandomState(utt.fetch_seed())
r = rng.randn(4, 4)
tensor.verify_grad(matrix_inverse, [r], rng=np.random)
示例8: verify_grad
def verify_grad(op, pt, n_tests=2, rng=None, *args, **kwargs):
"""
Wrapper for tensor/basic.py:verify_grad
Takes care of seeding the random number generator if None is given
"""
if rng is None:
seed_rng()
rng = numpy.random
T.verify_grad(op, pt, n_tests, rng, *args, **kwargs)
示例9: test_fractional_max_pooling_numeric_gradient
def test_fractional_max_pooling_numeric_gradient():
def fun(x):
return fmp.DisjointPseudorandomFractionalMaxPooling2DOp(
alpha=1.414,
u=0.5
)(x)
T.verify_grad(fun,
[np.arange(25).reshape(1, 1, 5, 5).astype(fX)],
rng=np.random)
示例10: test_eigvalsh_grad
def test_eigvalsh_grad():
if not imported_scipy:
raise SkipTest("Scipy needed for the geigvalsh op.")
import scipy.linalg
rng = numpy.random.RandomState(utt.fetch_seed())
a = rng.randn(5, 5)
a = a + a.T
b = 10 * numpy.eye(5, 5) + rng.randn(5, 5)
tensor.verify_grad(lambda a, b: eigvalsh(a, b).dot([1, 2, 3, 4, 5]), [a, b], rng=numpy.random)
示例11: test_expm_grad_2
def test_expm_grad_2():
# with non-symmetric matrix with real eigenspecta
if not imported_scipy:
raise SkipTest("Scipy needed for the expm op.")
rng = numpy.random.RandomState(utt.fetch_seed())
A = rng.randn(5, 5).astype(config.floatX)
w = (rng.randn(5).astype(config.floatX))**2
A = (numpy.diag(w**0.5)).dot(A + A.T).dot(numpy.diag(w**(-0.5)))
assert not numpy.allclose(A, A.T)
tensor.verify_grad(expm, [A,], rng=rng)
示例12: test_expm_grad_2
def test_expm_grad_2():
# with non-symmetric matrix with real eigenspecta
if not imported_scipy:
raise SkipTest("Scipy needed for the expm op.")
rng = numpy.random.RandomState(utt.fetch_seed())
# Always test in float64 for better numerical stability.
A = rng.randn(5, 5)
w = rng.randn(5)**2
A = (numpy.diag(w**0.5)).dot(A + A.T).dot(numpy.diag(w**(-0.5)))
assert not numpy.allclose(A, A.T)
tensor.verify_grad(expm, [A], rng=rng)
示例13: test_softmax_grad
def test_softmax_grad(self):
def cmp(n, m, f, f_gpu):
data = numpy.arange(n * m, dtype="float32").reshape(n, m)
gdata = numpy.asarray(data)[:, :, None, None]
out = f(data)
gout = numpy.asarray(f_gpu(gdata))[:, :, 0, 0]
utt.assert_allclose(out, gout)
x = T.matrix("x", "float32")
x_gpu = T.tensor4("x_gpu", "float32")
f_z = T.nnet.softmax_op
f_gpu = dnn.GpuDnnSoftmax("accurate", "channel")
# Verify the grad operation
dims = (2, 3, 4, 5)
gdata = numpy.arange(numpy.product(dims), dtype="float32").reshape(dims)
T.verify_grad(f_gpu, [gdata], rng=numpy.random, mode=mode_with_gpu)
# Verify that the CPU and GPU implementations return the same results
# up to a tolerance.
self._test_softmax(x, x_gpu, f_z, f_gpu, cmp)
self._test_softmax(x, x, f_z, f_z, self._cmp)
# Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad
# optimization is applied when cudnn is required
y = T.fvector("y")
f = theano.function([y], T.grad(T.nnet.softmax(y).mean(), y), mode=mode_with_gpu)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 1
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 0
# Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad
# optimization is not applied when cudnn is excluded or not
# available
mode_wo_cudnn = mode_with_gpu.excluding("cudnn")
y = T.fvector("y")
f = theano.function([y], T.grad(T.nnet.softmax(y).mean(), y), mode=mode_wo_cudnn)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 0
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 1
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not
# crash with manual graph
y = T.fvector("y")
o = theano.tensor.nnet.SoftmaxGrad()(y, y * 2)
f = theano.function([y], o, mode=mode_with_gpu)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 1
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 0
示例14: test_verify_exprgrad
def test_verify_exprgrad():
from theano import tensor
import numpy
x = tt.scalar()
f = theano.function([x], x)
#def f(x):
# return x
x_val = numpy.asarray([0.1, 0.2])
rng = numpy.random.RandomState(42)
print 'going'
print tensor.verify_grad(f, [x_val], rng=rng)
示例15: test_grad
def test_grad(self):
x = tensor.matrix('x')
one_of_n = tensor.lvector('one_of_n')
op = crossentropy_categorical_1hot
xe = op(x, one_of_n)
f = theano.function([x, one_of_n], xe)
x_val = numpy.asarray([[.4, .6, .0], [.1, .8, .1]],
dtype=config.floatX)
xe_val = f(x_val, [0, 1])
assert numpy.allclose(xe_val, -numpy.log([.4, .8]))
def oplike(x):
return op(x, [0, 1])
tensor.verify_grad(oplike, [x_val], rng=numpy.random)