本文整理匯總了Python中theano.gradient方法的典型用法代碼示例。如果您正苦於以下問題:Python theano.gradient方法的具體用法?Python theano.gradient怎麽用?Python theano.gradient使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano
的用法示例。
在下文中一共展示了theano.gradient方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: grad
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def grad(self, inputs, g):
# g[1:] is all integers, so their Jacobian in this op
# is 0. We thus don't need to worry about what their values
# are.
# if g[0] is disconnected, then this op doesn't contribute
# any gradient anywhere. but we know that at least one of
# g[1:] is connected, or this grad method wouldn't have been
# called, so we should report zeros
(csm,) = inputs
if isinstance(g[0].type, DisconnectedType):
return [csm.zeros_like()]
data, indices, indptr, shape = csm_properties(csm)
return [CSM(csm.format)(g[0], indices, indptr, shape)]
# don't make this a function or it breaks some optimizations below
示例2: perform
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def perform(self, node, inputs, outputs):
(a_indices, a_indptr, b, g_ab) = inputs
(out,) = outputs
g_a_data = numpy.zeros(a_indices.shape, dtype=g_ab.dtype)
for i in xrange(len(a_indptr) - 1): # loop over rows
ind0 = a_indptr[i]
ind1 = a_indptr[i + 1]
# loop over values in that row (columns)
for j_idx in xrange(ind0, ind1):
j = a_indices[j_idx]
# grad is dot product of i-th row of gradient with j-th row of b
# Depending on the type of g_ab and b (sparse or dense),
# the following dot product can result in a scalar or
# a (1, 1) sparse matrix.
dot_val = numpy.dot(g_ab[i], b[j].T)
if isinstance(dot_val, scipy.sparse.spmatrix):
dot_val = dot_val[0, 0]
g_a_data[j_idx] = dot_val
out[0] = g_a_data
示例3: grad
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def grad(self, inp, cost_grad):
"""
Notes
-----
The gradient is currently implemented for matrices only.
"""
a, val = inp
grad = cost_grad[0]
if (a.dtype.startswith('complex')):
return [None, None]
elif a.ndim > 2:
raise NotImplementedError('%s: gradient is currently implemented'
' for matrices only' %
self.__class__.__name__)
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices
wr_val = theano.tensor.nlinalg.diag(grad).sum()
return [wr_a, wr_val]
示例4: grad
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def grad(self, inputs, g_outputs):
r"""The gradient function should return
.. math:: V\frac{\partial X^{-1}}{\partial X},
where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
``inputs``. Using the `matrix cookbook
<http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
one can deduce that the relation corresponds to
.. math:: (X^{-1} \cdot V^{T} \cdot X^{-1})^T.
"""
x, = inputs
xi = self(x)
gz, = g_outputs
# TT.dot(gz.T,xi)
return [-matrix_dot(xi, gz.T, xi).T]
示例5: R_op
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def R_op(self, inputs, eval_points):
r"""The gradient function should return
.. math:: \frac{\partial X^{-1}}{\partial X}V,
where :math:`V` corresponds to ``g_outputs`` and :math:`X` to
``inputs``. Using the `matrix cookbook
<http://www2.imm.dtu.dk/pubdb/views/publication_details.php?id=3274>`_,
one can deduce that the relation corresponds to
.. math:: X^{-1} \cdot V \cdot X^{-1}.
"""
x, = inputs
xi = self(x)
ev, = eval_points
if ev is None:
return [None]
return [-matrix_dot(xi, ev, xi)]
示例6: grad_not_implemented
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def grad_not_implemented(op, x_pos, x, comment=""):
"""
Return an un-computable symbolic variable of type `x.type`.
If any call to tensor.grad results in an expression containing this
un-computable variable, an exception (NotImplementedError) will be
raised indicating that the gradient on the
`x_pos`'th input of `op` has not been implemented. Likewise if
any call to theano.function involves this variable.
Optionally adds a comment to the exception explaining why this
gradient is not implemented.
"""
return (NullType((
"This variable is Null because the grad method for "
"input %s (%s) of the %s op is not implemented. %s"
) % (x_pos, x, op, comment)))()
示例7: grad_undefined
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def grad_undefined(op, x_pos, x, comment=""):
"""
Return an un-computable symbolic variable of type `x.type`.
If any call to tensor.grad results in an expression containing this
un-computable variable, an exception (GradUndefinedError) will be
raised indicating that the gradient on the
`x_pos`'th input of `op` is mathematically undefined. Likewise if
any call to theano.function involves this variable.
Optionally adds a comment to the exception explaining why this
gradient is not defined.
"""
return (NullType(
(
"This variable is Null because the grad method for "
"input %s (%s) of the %s op is mathematically undefined. %s"
) % (x_pos, x, op, comment)))()
示例8: abs_rel_errors
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def abs_rel_errors(self, g_pt):
"""Return the abs and rel error of gradient estimate `g_pt`
`g_pt` must be a list of ndarrays of the same length as self.gf,
otherwise a ValueError is raised.
Corresponding ndarrays in `g_pt` and `self.gf` must have the same
shape or ValueError is raised.
"""
if len(g_pt) != len(self.gf):
raise ValueError('argument has wrong number of elements',
len(g_pt))
errs = []
for i, (a, b) in enumerate(zip(g_pt, self.gf)):
if a.shape != b.shape:
raise ValueError('argument element %i has wrong shape %s' % (
i, str((a.shape, b.shape))))
errs.append(numeric_grad.abs_rel_err(a, b))
return errs
示例9: consider_constant
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def consider_constant(x):
"""
DEPRECATED: use zero_grad() or disconnected_grad() instead.
Consider an expression constant when computing gradients.
The expression itself is unaffected, but when its gradient is
computed, or the gradient of another expression that this
expression is a subexpression of, it will not be backpropagated
through. In other words, the gradient of the expression is
truncated to 0.
:param x: A Theano expression whose gradient should be truncated.
:return: The expression is returned unmodified, but its gradient
is now truncated to 0.
.. versionadded:: 0.7
"""
warnings.warn((
"consider_constant() is deprecated, use zero_grad() or "
"disconnected_grad() instead."), stacklevel=3)
return consider_constant_(x)
示例10: disconnected_grad
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def disconnected_grad(x):
"""
Consider an expression constant when computing gradients,
while effectively not backpropagating through it.
The expression itself is unaffected, but when its gradient is
computed, or the gradient of another expression that this
expression is a subexpression of, it will not be backpropagated
through. This is effectively equivalent to truncating the gradient
expression to 0, but is executed faster than zero_grad(), which stilll
has to go through the underlying computational graph related to the
expression.
:param x: A Theano expression whose gradient should not be
backpropagated through.
:return: The expression is returned unmodified, but its gradient
is now effectively truncated to 0.
"""
return disconnected_grad_(x)
示例11: dnn_gradweight
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def dnn_gradweight(img, topgrad,
kerns_shp,
border_mode='valid', subsample=(1, 1),
conv_mode='conv'):
"""
GPU convolution gradient with respect to weight using cuDNN from NVIDIA.
The memory layout to use is 'bc01', that is 'batch', 'channel',
'first dim', 'second dim' in that order.
FIXME parameters doc
:warning: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
"""
img = gpu_contiguous(img)
topgrad = gpu_contiguous(topgrad)
kerns_shp = theano.tensor.as_tensor_variable(kerns_shp)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(img.shape, kerns_shp)
out = gpu_alloc_empty(*kerns_shp)
return GpuDnnConvGradW()(img, topgrad, out, desc)
示例12: dnn_gradinput
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def dnn_gradinput(kerns, topgrad,
img_shp,
border_mode='valid', subsample=(1, 1),
conv_mode='conv'):
"""
GPU convolution gradient with respect to input using cuDNN from NVIDIA.
The memory layout to use is 'bc01', that is 'batch', 'channel',
'first dim', 'second dim' in that order.
FIXME parameters doc
:warning: The cuDNN library only works with GPU that have a compute
capability of 3.0 or higer. This means that older GPU will not
work with this Op.
"""
kerns = gpu_contiguous(kerns)
topgrad = gpu_contiguous(topgrad)
img_shp = theano.tensor.as_tensor_variable(img_shp)
desc = GpuDnnConvDesc(border_mode=border_mode, subsample=subsample,
conv_mode=conv_mode)(img_shp, kerns.shape)
out = gpu_alloc_empty(*img_shp)
return GpuDnnConvGradI()(kerns, topgrad, out, desc)
示例13: grad
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def grad(self, inputs, gout):
(cond, ift, iff) = inputs
(gz,) = gout
first_part = switch(cond, gz, 0.)
second_part = switch(cond, 0., gz)
out = self(cond, ift, iff)
if out.type.dtype in discrete_types:
first_part = 0.
second_part = 0.
# cond does affect the elements of the output so it is connected.
# For the sake of making the gradient convenient we assume that
# condition + epsilon always triggers the same branch as condition
condition_grad = cond.zeros_like().astype(theano.config.floatX)
return (condition_grad, first_part, second_part)
示例14: zero_grad
# 需要導入模塊: import theano [as 別名]
# 或者: from theano import gradient [as 別名]
def zero_grad(x):
"""
Consider an expression constant when computing gradients.
The expression itself is unaffected, but when its gradient is
computed, or the gradient of another expression that this
expression is a subexpression of, it will be backpropagated
through with a value of zero. In other words, the gradient of
the expression is truncated to 0.
:param x: A Theano expression whose gradient should be truncated.
:return: The expression is returned unmodified, but its gradient
is now truncated to 0.
"""
return zero_grad_(x)