本文整理汇总了Python中theano.grad方法的典型用法代码示例。如果您正苦于以下问题:Python theano.grad方法的具体用法?Python theano.grad怎么用?Python theano.grad使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.grad方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_csm_grad
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_csm_grad(self):
for sparsetype in ('csr', 'csc'):
x = tensor.vector()
y = tensor.ivector()
z = tensor.ivector()
s = tensor.ivector()
call = getattr(sp, sparsetype + '_matrix')
spm = call(random_lil((300, 400), config.floatX, 5))
out = tensor.grad(dense_from_sparse(
CSM(sparsetype)(x, y, z, s)
).sum(), x)
self._compile_and_check([x, y, z, s],
[out],
[spm.data, spm.indices, spm.indptr,
spm.shape],
(CSMGrad, CSMGradC)
)
示例2: test_sparseblockgemv_grad_shape
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_sparseblockgemv_grad_shape(self):
b = tensor.fmatrix()
W = tensor.ftensor4()
h = tensor.ftensor3()
iIdx = tensor.imatrix()
oIdx = tensor.imatrix()
o = self.gemv_op(b.take(oIdx, axis=0), W, h, iIdx, oIdx)
go = theano.grad(o.sum(), [b, W, h])
f = theano.function([W, h, iIdx, b, oIdx], go, mode=self.mode)
W_val, h_val, iIdx_val, b_val, oIdx_val = \
BlockSparse_Gemv_and_Outer.gemv_data()
# just make sure that it runs correcly and all the shapes are ok.
b_g, W_g, h_g = f(W_val, h_val, iIdx_val, b_val, oIdx_val)
assert b_g.shape == b_val.shape
assert h_g.shape == h_val.shape
assert W_g.shape == W_val.shape
示例3: test_grad
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_grad(self):
c = T.matrix()
p_y = T.exp(c) / T.exp(c).sum(axis=1).dimshuffle(0, 'x')
# test that function contains softmax and softmaxgrad
w = T.matrix()
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([c, w], T.grad((p_y * w).sum(), c))
hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
finally:
config.warn.sum_div_dimshuffle_bug = backup
g_ops = [n.op for n in g.maker.fgraph.toposort()]
# print '--- g ='
# printing.debugprint(g)
# print '==='
raise SkipTest('Optimization not enabled for the moment')
assert len(g_ops) == 2
assert softmax_op in g_ops
assert softmax_grad in g_ops
g(self.rng.rand(3, 4), self.rng.uniform(.5, 1, (3, 4)))
示例4: test_transpose_basic
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_transpose_basic(self):
# this should be a transposed softmax
c = T.matrix()
p_y = T.exp(c) / T.exp(c).sum(axis=0)
# test that function contains softmax and no div.
f = theano.function([c], p_y)
# printing.debugprint(f)
# test that function contains softmax and no div.
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([c], T.grad(p_y.sum(), c))
hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
finally:
config.warn.sum_div_dimshuffle_bug = backup
# printing.debugprint(g)
raise SkipTest('Optimization not enabled for the moment')
示例5: test_1d_basic
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_1d_basic(self):
# this should be a softmax, but of a one-row matrix
c = T.vector()
p_y = T.exp(c) / T.exp(c).sum()
# test that function contains softmax and no div.
f = theano.function([c], p_y)
hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
# printing.debugprint(f)
# test that function contains softmax and no div.
backup = config.warn.sum_div_dimshuffle_bug
config.warn.sum_div_dimshuffle_bug = False
try:
g = theano.function([c], T.grad(p_y.sum(), c))
hasattr(g.maker.fgraph.outputs[0].tag, 'trace')
finally:
config.warn.sum_div_dimshuffle_bug = backup
# printing.debugprint(g)
raise SkipTest('Optimization not enabled for the moment')
# REPEAT 3 CASES in presence of log(softmax) with the advanced indexing
# etc.
示例6: test_broadcast_grad
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_broadcast_grad():
rng = numpy.random.RandomState(utt.fetch_seed())
x1 = T.tensor4('x')
x1_data = rng.randn(1, 1, 300, 300)
sigma = T.scalar('sigma')
sigma_data = 20
window_radius = 3
filter_1d = T.arange(-window_radius, window_radius+1)
filter_1d = filter_1d.astype(theano.config.floatX)
filter_1d = T.exp(-0.5*filter_1d**2/sigma**2)
filter_1d = filter_1d / filter_1d.sum()
filter_W = filter_1d.dimshuffle(['x', 'x', 0, 'x'])
y = theano.tensor.nnet.conv2d(x1, filter_W, border_mode='full',
filter_shape=[1, 1, None, None])
theano.grad(y.sum(), sigma)
示例7: test_other_grad_tests
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_other_grad_tests(self):
x = theano.tensor.dmatrix()
x_val1 = numpy.array([[1, 2, 3], [0, 5, 6], [0, 0, 9]],
dtype='float32')
x_val2 = numpy.array([[1, 2, 0], [0, 5, 6], [7, 8, 9], [9, 10, 0]],
dtype='float32')
rng = rng = numpy.random.RandomState(43)
p = Prod(axis=1)
grad_p = theano.tensor.grad(p(x).sum(), x)
grad_fn = theano.function([x], grad_p, mode=self.mode)
assert numpy.allclose(grad_fn(x_val1), [[6., 3., 2.], [30., 0.,
0.], [0., 0., 0.]])
assert numpy.allclose(grad_fn(x_val2), [[0., 0., 2.], [30.,
0., 0.], [72., 63., 56.], [0., 0., 90.]])
p_axis0 = Prod(axis=0)
grad_p_axis0 = theano.tensor.grad(p_axis0(x).sum(), x)
grad_fn_axis0 = theano.function([x], grad_p_axis0, mode=self.mode)
assert numpy.allclose(grad_fn_axis0(x_val2), [[0., 400.,
0.], [63., 160., 0.], [0., 100., 0.], [0., 80., 0.]])
tensor.verify_grad(p, [x_val1], rng=rng, mode=self.mode)
示例8: test_gt_grad
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_gt_grad():
"""A user test that failed.
Something about it made Elemwise.grad return something that was
too complicated for get_scalar_constant_value to recognize as being 0, so
gradient.grad reported that it was not a valid gradient of an
integer.
"""
floatX = config.floatX
T = theano.tensor
input_ = T.vector(dtype=floatX)
random_values = numpy.random.RandomState(1234).uniform(
low=-1, high=1, size=(2, 2))
W_values = numpy.asarray(random_values, dtype=floatX)
W = theano.shared(value=W_values, name='weights')
correct_score = T.dot(input_, W)
wrong_input = T.vector(dtype=floatX)
wrong_score = theano.clone(correct_score, {input_: wrong_input})
# Hinge loss
scores = T.ones_like(correct_score) - correct_score + wrong_score
cost = (scores * (scores > 0)).sum()
T.grad(cost, input_)
示例9: test_grad_1d
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_grad_1d(self):
subi = 0
data = numpy.asarray(rand(2, 3), dtype=self.dtype)
n = self.shared(data)
z = scal.constant(subi)
t = n[z:, z]
gn = theano.tensor.grad(theano.tensor.sum(theano.tensor.exp(t)), n)
f = inplace_func([], gn, mode=self.mode)
topo = f.maker.fgraph.toposort()
topo_ = [node for node in topo if not isinstance(node.op,
self.ignore_topo)]
if not self.fast_compile:
assert len(topo_) == 6
assert numpy.sum([isinstance(node.op, self.inc_sub)
for node in topo_]) == 1
assert numpy.sum([isinstance(node.op, self.sub)
for node in topo_]) == 1
gval = f()
good = numpy.zeros_like(data)
good[subi:, subi] = numpy.exp(data[subi:, subi])
self.assertTrue(numpy.allclose(gval, good), (gval, good))
示例10: test_grad_2d_inc_set_subtensor
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_grad_2d_inc_set_subtensor(self):
for n_shape, m_shape in [
[(2, 3), (2, 2)],
[(3, 2), (2, 2)],
[(3, 2), (1, 2)],
[(3, 2), (2,)],
]:
for op in [inc_subtensor, set_subtensor]:
subi = 2
data = numpy.asarray(rand(*n_shape), dtype=self.dtype)
n = self.shared(data)
z = scal.constant(subi)
m = matrix('m', dtype=self.dtype)
mv = numpy.asarray(rand(*m_shape), dtype=self.dtype)
t = op(n[:z, :z], m)
gn, gm = theano.tensor.grad(theano.tensor.sum(t), [n, m])
utt.verify_grad(lambda m: op(n[:z, :z], m), [mv])
utt.verify_grad(lambda nn: op(nn[:z, :z], mv), [data])
示例11: test_grad_0d
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_grad_0d(self):
data = numpy.asarray(rand(2, 3), dtype=self.dtype)
n = self.shared(data)
t = n[1, 0]
gn = theano.tensor.grad(theano.tensor.sum(theano.tensor.exp(t)), n)
f = self.function([], gn)
topo = f.maker.fgraph.toposort()
topo_ = [node for node in topo if not isinstance(node.op,
self.ignore_topo)]
if not self.fast_compile:
assert len(topo_) == 6
assert numpy.sum([isinstance(node.op, self.inc_sub)
for node in topo_]) == 1
assert numpy.sum([isinstance(node.op, self.sub)
for node in topo_]) == 1
gval = f()
good = numpy.zeros_like(data)
good[1, 0] = numpy.exp(data[1, 0])
self.assertTrue(numpy.allclose(gval, good), (gval, good))
示例12: test_err_bound_list
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_err_bound_list(self):
n = self.shared(numpy.ones((2, 3), dtype=self.dtype) * 5)
l = lvector()
t = n[l]
# We test again AdvancedSubtensor1 as we transfer data to the cpu.
self.assertTrue(isinstance(t.owner.op, tensor.AdvancedSubtensor1))
f = self.function([l], t, op=self.adv_sub1)
# the grad
g = self.function([l],
inc_subtensor(t, numpy.asarray([[1.]], self.dtype)),
op=self.adv_incsub1)
for shp in [[0, 4], [0, -3], [-10]]:
self.assertRaises(IndexError, f, shp)
self.assertRaises(IndexError, g, shp)
示例13: test_grad_advanced_inc_subtensor
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_grad_advanced_inc_subtensor(self):
def inc_slice(*s):
def just_numeric_args(a, b):
cost = (a[s] + b).sum()
cost_wrt_a = theano.tensor.grad(cost, a)
cost_wrt_b = theano.tensor.grad(cost, b)
grads = cost_wrt_a.sum() + cost_wrt_b.sum()
return grads
return just_numeric_args
# vector
utt.verify_grad(
inc_slice(slice(2, 4, None)),
(numpy.asarray([0, 1, 2, 3, 4, 5.]), numpy.asarray([9, 9.]),))
# matrix
utt.verify_grad(
inc_slice(slice(1, 2, None), slice(None, None, None)),
(numpy.asarray([[0, 1], [2, 3], [4, 5.]]),
numpy.asarray([[9, 9.]]),))
# single element
utt.verify_grad(
inc_slice(2, 1),
(numpy.asarray([[0, 1], [2, 3], [4, 5.]]), numpy.asarray(9.),))
示例14: test_inc_adv_subtensor_with_broadcasting
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_inc_adv_subtensor_with_broadcasting(self):
if inplace_increment is None:
raise inplace_increment_missing
inc = dscalar()
a = inc_subtensor(self.m[self.ix1, self.ix12], inc)
g_inc = tensor.grad(a.sum(), inc)
assert a.type == self.m.type, (a.type, self.m.type)
f = theano.function([self.m, self.ix1, self.ix12, inc], [a, g_inc],
allow_input_downcast=True)
aval, gval = f([[.4, .9, .1],
[5, 6, 7],
[.5, .3, .15]],
[1, 2, 1],
[0, 1, 0],
2.1)
assert numpy.allclose(aval,
[[.4, .9, .1],
[5 + 2.1 * 2, 6, 7],
[.5, .3 + 2.1, .15]]), aval
assert numpy.allclose(gval, 3.0), gval
示例15: test_inc_adv_subtensor1_with_broadcasting
# 需要导入模块: import theano [as 别名]
# 或者: from theano import grad [as 别名]
def test_inc_adv_subtensor1_with_broadcasting(self):
if inplace_increment is None:
raise inplace_increment_missing
inc = dscalar()
a = inc_subtensor(self.m[self.ix1], inc)
g_inc = tensor.grad(a.sum(), inc)
assert a.type == self.m.type, (a.type, self.m.type)
f = theano.function([self.m, self.ix1, inc], [a, g_inc],
allow_input_downcast=True)
aval, gval = f([[.4, .9, .1],
[5, 6, 7],
[.5, .3, .15]],
[0, 1, 0],
2.1)
assert numpy.allclose(aval,
[[.4 + 2.1 * 2, .9 + 2.1 * 2, .1 + 2.1 * 2],
[5 + 2.1, 6 + 2.1, 7 + 2.1],
[.5, .3, .15]]), aval
assert numpy.allclose(gval, 9.0), gval