本文整理汇总了Python中tensorflow.python.ops.gradients.gradients方法的典型用法代码示例。如果您正苦于以下问题:Python gradients.gradients方法的具体用法?Python gradients.gradients怎么用?Python gradients.gradients使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.gradients
的用法示例。
在下文中一共展示了gradients.gradients方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _compute_gradient_list
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _compute_gradient_list(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None,
extra_feed_dict=None):
"""Compute gradients for a list of x values."""
assert isinstance(x, list)
dx, dy = zip(*[_compute_dx_and_dy(xi, y, y_shape) for xi in x])
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
if x_init_value is None:
x_init_value = [None] * len(x)
ret = [_compute_gradient(xi, x_shapei, dxi, y, y_shape, dyi, x_init_valuei,
delta, extra_feed_dict=extra_feed_dict)
for xi, x_shapei, dxi, dyi, x_init_valuei in zip(x, x_shape, dx, dy,
x_init_value)]
return ret
示例2: _resource_apply_sparse_duplicate_indices
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _resource_apply_sparse_duplicate_indices(self, grad, handle, indices):
"""Add ops to apply sparse gradients to `handle`, with repeated indices.
Optimizers which override this method must deal with repeated indices. See
the docstring of `_apply_sparse_duplicate_indices` for details. By default
the correct behavior, to sum non-unique indices and their associated
gradients, is enforced by first pre-processing `grad` and `indices` and
passing them on to `_resource_apply_sparse`. Optimizers which deal correctly
with duplicate indices may instead override this method to avoid the
overhead of summing.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices may be repeated.
Returns:
An `Operation` which updates the value of the variable.
"""
summed_grad, unique_indices = _deduplicate_indexed_slices(
values=grad, indices=indices)
return self._resource_apply_sparse(summed_grad, handle, unique_indices)
示例3: _resource_apply_sparse
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _resource_apply_sparse(self, grad, handle, indices):
"""Add ops to apply sparse gradients to the variable `handle`.
Similar to `_apply_sparse`, the `indices` argument to this method has been
de-duplicated. Optimizers which deal correctly with non-unique indices may
instead override `_resource_apply_sparse_duplicate_indices` to avoid this
overhead.
Args:
grad: a `Tensor` representing the gradient for the affected indices.
handle: a `Tensor` of dtype `resource` which points to the variable
to be updated.
indices: a `Tensor` of integral type representing the indices for
which the gradient is nonzero. Indices are unique.
Returns:
An `Operation` which updates the value of the variable.
"""
raise NotImplementedError()
示例4: _finish
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _finish(self, update_ops, name_scope):
"""Do what is needed to finish the update.
This is called with the `name_scope` using the "name" that
users have chosen for the application of gradients.
Args:
update_ops: List of `Operation` objects to update variables. This list
contains the values returned by the `_apply_dense()` and
`_apply_sparse()` calls.
name_scope: String. Name to use for the returned operation.
Returns:
The operation to apply updates.
"""
return control_flow_ops.group(*update_ops, name=name_scope)
# --------------
# Utility methods for subclasses.
# --------------
示例5: get_train_step
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def get_train_step(self, loss):
"""Returns the ops to run to perform a training step on this estimator.
Args:
loss: The loss to use when calculating gradients.
Returns:
The ops to run to perform a training step.
"""
my_vars = self._get_vars()
if not (self._get_feature_columns() or my_vars):
return []
grads = gradients.gradients(loss, my_vars)
if self._gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, self._gradient_clip_norm)
return [self._get_optimizer().apply_gradients(zip(grads, my_vars))]
示例6: _apply_sparse
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _apply_sparse(self, grad, var):
"""Add ops to apply sparse gradients to `var`.
The IndexedSlices object passed to `grad` in this function is by default
pre-processed in `_apply_sparse_duplicate_indices` to remove duplicate
indices (see its docstring for details). Optimizers which can tolerate or
have correct special cases for duplicate sparse indices may override
`_apply_sparse_duplicate_indices` instead of this function, avoiding that
overhead.
Args:
grad: `IndexedSlices`, with no repeated indices.
var: A `Variable` object.
Return:
An `Operation`.
"""
raise NotImplementedError()
示例7: _compute_gradient_list
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _compute_gradient_list(x,
x_shape,
y,
y_shape,
x_init_value=None,
delta=1e-3,
init_targets=None):
"""Compute gradients for a list of x values."""
assert isinstance(x, list)
dx, dy = zip(*[_compute_dx_and_dy(xi, y, y_shape) for xi in x])
if init_targets is not None:
assert isinstance(init_targets, (list, tuple))
for init in init_targets:
init.run()
if x_init_value is None:
x_init_value = [None] * len(x)
ret = [_compute_gradient(xi, x_shapei, dxi, y, y_shape, dyi, x_init_valuei,
delta)
for xi, x_shapei, dxi, dyi, x_init_valuei in zip(x, x_shape, dx, dy,
x_init_value)]
return ret
示例8: testColocateGradientsWithAggregation
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())
示例9: testColocateGradientsWithAggregationInMultipleDevices
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def testColocateGradientsWithAggregationInMultipleDevices(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
with g.device("/task:1"):
wx = math_ops.matmul(w, x)
with g.device("/task:2"):
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), w.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(w.op.colocation_groups() != gw2.op.colocation_groups())
示例10: testNoGradientForStringOutputs
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def testNoGradientForStringOutputs(self):
with ops.Graph().as_default() as g:
@ops.RegisterGradient("TestOp")
def _TestOpGrad(op, float_grad, string_grad):
"""Gradient function for TestOp."""
self.assertEquals(float_grad.dtype, dtypes.float32)
self.assertFalse(string_grad)
return float_grad
ops.RegisterShape("TestOp")(None)
c = constant(1.0)
x, y = g.create_op("TestOp", [c], [dtypes.float32, dtypes.string]).outputs
z = x * 2.0
w = z * 3.0
grads = gradients.gradients(z, [c])
self.assertTrue(isinstance(grads[0], ops.Tensor))
示例11: testHessian1D_multi
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def testHessian1D_multi(self):
# Test the computation of the hessian with respect to multiple tensors
m = 4
n = 3
rng = np.random.RandomState([1, 2, 3])
mat_values = [rng.randn(m, m).astype("float32") for _ in range(n)]
x_values = [rng.randn(m).astype("float32") for _ in range(n)]
hess_values = [mat_value + mat_value.T for mat_value in mat_values]
with self.test_session(use_gpu=True):
mats = [constant_op.constant(mat_value) for mat_value in mat_values]
xs = [constant_op.constant(x_value) for x_value in x_values]
xs_mats_xs = [math_ops.reduce_sum(x[:, None] * mat * x[None, :])
for x, mat in zip(xs, mats)]
hessians = gradients.hessians(xs_mats_xs, xs)
hessians_actual = [hess.eval() for hess in hessians]
for hess_value, hess_actual in zip(hess_values, hessians_actual):
self.assertAllClose(hess_value, hess_actual)
示例12: __init__
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def __init__(self,
num_label_columns,
optimizer,
gradient_clip_norm,
num_ps_replicas,
scope):
"""Common initialization for all _ComposableModel objects.
Args:
num_label_columns: The number of label columns.
optimizer: An instance of `tf.Optimizer` used to apply gradients to
the model. If `None`, will use a FTRL optimizer.
gradient_clip_norm: A float > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
tf.clip_by_global_norm for more details.
num_ps_replicas: The number of parameter server replicas.
scope: Scope for variables created in this model.
"""
self._num_label_columns = num_label_columns
self._optimizer = optimizer
self._gradient_clip_norm = gradient_clip_norm
self._num_ps_replicas = num_ps_replicas
self._scope = scope
self._feature_columns = None
示例13: _compute_dx_and_dy
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _compute_dx_and_dy(x, y, y_shape):
"""Returns a node to compute gradient of x wrt y."""
# We make up a dy so that we can compute the gradients. We don't really use
# the value of dy -- we will always feed it. We need to add an identity node
# so that we can always feed it properly. Otherwise, for the Add operation,
# dx is the same as dy and we cannot fetch the tensor that we are feeding.
with x.graph.as_default():
dy_orig = constant_op.constant(1.0, shape=y_shape, dtype=y.dtype)
dy = array_ops.identity(dy_orig)
# We compute the gradients for x wrt. y
grads = gradients.gradients(y, x, dy)
assert len(grads) == 1
return grads[0], dy_orig
示例14: _valid_dtypes
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _valid_dtypes(self):
"""Valid types for loss, variables and gradients.
Subclasses should override to allow other float types.
Returns:
Valid types for loss, variables and gradients.
"""
return set([dtypes.float16, dtypes.float32, dtypes.float64])
示例15: _apply_dense
# 需要导入模块: from tensorflow.python.ops import gradients [as 别名]
# 或者: from tensorflow.python.ops.gradients import gradients [as 别名]
def _apply_dense(self, grad, var):
"""Add ops to apply dense gradients to `var`.
Args:
grad: A `Tensor`.
var: A `Variable` object.
Return:
An `Operation`.
"""
raise NotImplementedError()