本文整理汇总了Python中tensorflow.python.ops.gradients.gradients函数的典型用法代码示例。如果您正苦于以下问题:Python gradients函数的具体用法?Python gradients怎么用?Python gradients使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了gradients函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testUnknownUnconnectedGradientsValueGiven
def testUnknownUnconnectedGradientsValueGiven(self):
with ops.Graph().as_default():
x = constant(1.0)
y = constant(1.0)
with self.assertRaisesRegexp(
ValueError, "Unknown value for unconnected_gradients: 'nonsense'"):
gradients.gradients([y], [x], unconnected_gradients="nonsense")
示例2: testRealOnly
def testRealOnly(self):
x = constant_op.constant(7+3j, dtype=dtypes.complex64)
y = math_ops.square(x)
with self.assertRaisesRegexp(
TypeError,
r"Gradients of complex tensors must set grad_ys "
r"\(y\.dtype = tf\.complex64\)"):
gradients.gradients(y, x)
示例3: testPartialDerivatives
def testPartialDerivatives(self):
with self.test_session():
x = constant_op.constant(1.)
y = 2 * x
z = x + y
totalg = gradients.gradients(z, [x, y])
self.assertEqual([3.0, 1.0], [g.eval() for g in totalg])
partialg = gradients.gradients(z, [x, y], stop_gradients=[x, y])
self.assertEqual([1.0, 1.0], [g.eval() for g in partialg])
示例4: testFloorDivGrad
def testFloorDivGrad(self):
with self.test_session():
a = variables.Variable(2.0)
b = variables.Variable(4.0)
with self.test_session() as sess:
sess.run(variables.initialize_all_variables())
c_grad = gradients.gradients(math_ops.div_deprecated(a, b), [a, b])
self.assertAllEqual([x.eval() for x in c_grad], [0.25, -0.125])
c_grad = gradients.gradients(math_ops.div(a, b), [a, b])
self.assertAllEqual([x.eval() for x in c_grad], [0.25, -0.125])
c_grad = gradients.gradients(math_ops.floordiv(a, b), [a, b])
self.assertAllEqual([None if x is None else x.eval() for x in c_grad], [None, None])
示例5: testFloorDivGrad
def testFloorDivGrad(self):
with self.test_session():
a = variables.Variable(2.)
b = variables.Variable(4.)
with self.test_session() as sess:
sess.run(variables.global_variables_initializer())
c_grad = gradients.gradients(math_ops.divide(a, b), [a, b])
self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
c_grad = gradients.gradients(math_ops.div(a, b), [a, b])
self.assertAllEqual([x.eval() for x in c_grad], [.25, -.125])
c_grad = gradients.gradients(math_ops.floordiv(a, b), [a, b])
self.assertAllEqual([None if x is None else x.eval()
for x in c_grad], [None, None])
示例6: testDependentYs
def testDependentYs(self):
with self.test_session():
x = constant_op.constant(3.0)
y = math_ops.square(x)
y1 = math_ops.square(y)
y2 = math_ops.square(y1)
g = gradients.gradients([y, y2], x)
self.assertAllClose(17502.0, g[0].eval())
g = gradients.gradients(y + y2, x)
self.assertAllClose(17502.0, g[0].eval())
z = array_ops.identity(y)
z2 = array_ops.identity(y2)
g = gradients.gradients([z, z2], x)
self.assertAllClose(17502.0, g[0].eval())
示例7: test_jacobian_fixed_shape
def test_jacobian_fixed_shape(self):
x = random_ops.random_uniform([2, 2])
y = math_ops.matmul(x, x, transpose_a=True)
jacobian_pfor = gradients.jacobian(y, x, use_pfor=True)
jacobian_while = gradients.jacobian(y, x, use_pfor=False)
answer = ops.convert_to_tensor([[
gradient_ops.gradients(y[0][0], x)[0],
gradient_ops.gradients(y[0][1], x)[0]
], [
gradient_ops.gradients(y[1][0], x)[0],
gradient_ops.gradients(y[1][1], x)[0]
]])
self.run_and_assert_equal(answer, jacobian_pfor)
self.run_and_assert_equal(answer, jacobian_while)
示例8: testColocateGradientsWithAggregation
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/gpu:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/gpu:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEquals("/gpu:1", gw1.device)
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertEquals(None, gw2.device)
示例9: testCustomGradientErrors
def testCustomGradientErrors(self):
@custom_gradient.custom_gradient
def F(x):
def Grad(_):
raise RuntimeError("x")
return x, Grad
with ops.Graph().as_default():
x = constant(1.0)
y = F(x)
with self.assertRaises(RuntimeError):
gradients.gradients(y, x)
示例10: loop_fn
def loop_fn(i):
image = array_ops.gather(images, i)
label = array_ops.gather(labels, i)
logits = array_ops.reshape(model(image, training=training), [-1])
loss = losses.softmax_cross_entropy(
logits=logits, onehot_labels=label, reduction=losses.Reduction.NONE)
return gradient_ops.gradients(loss, variables.trainable_variables())
示例11: testCustomGradientWithVariables
def testCustomGradientWithVariables(self):
@custom_gradient.custom_gradient
def F(x):
out = core_layers.dense(x, 3, use_bias=False)
def Grad(out_grad, variables=None): # pylint: disable=redefined-outer-name
self.assertEqual(1, len(variables))
grads = gradients.gradients(out, [x, variables[0]], grad_ys=out_grad)
return grads[0], [array_ops.ones((4, 3))]
return out, Grad
with ops.Graph().as_default():
x = array_ops.ones((2, 4))
with variable_scope.variable_scope("f", use_resource=True) as vs:
y = F(x)
all_vars = vs.global_variables()
assert len(all_vars) == 1
grads = gradients.gradients(y, [x, all_vars[0]])
for g in grads:
self.assertTrue(g is not None)
with session.Session() as sess:
sess.run(variables.global_variables_initializer())
dw = sess.run(math_ops.reduce_sum(grads[1]))
self.assertEqual(12., dw)
示例12: approximate_hessian
def approximate_hessian(self, grads_and_vars, name=None):
"""
I haven't tested this yet so I have no idea if it works, but even if it
does it's probably super slow, and either way nothing else has been modified
to deal with it.
"""
gv = 0
var_refs = []
for g_t, x_tm1 in grads_and_vars:
var_refs.append(x_tm1.ref())
if g_t is None:
continue
with ops.name_scope('update_' + x_tm1.op.name), ops.device(x_tm1.device):
if isinstance(g_t, ops.Tensor):
gv += math_ops.reduce_sum(g_t * random_ops.random_normal(g_t.get_shape()))
else:
idxs, idxs_ = array_ops.unique(g_t.indices)
g_t_ = math_ops.unsorted_segment_sum(g_t.values, idxs_, array_ops.size(idxs))
gv += math_ops.reduce_sum(g_t_ * random_ops.random_normal(g_t_.get_shape()))
hesses = gradients.gradients(gv, var_refs,
gate_gradients=(gate_gradients == Optimizer.GATE_OP),
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops)
return zip([g_t for g_t, _ in grads_and_vars], [x_tm1 for _, x_tm1 in grads_and_vars], hesses)
示例13: get_gradients
def get_gradients(self, loss, params):
"""Returns gradients of `loss` with respect to `params`.
Arguments:
loss: Loss tensor.
params: List of variables.
Returns:
List of gradient tensors.
Raises:
ValueError: In case any gradient cannot be computed (e.g. if gradient
function not implemented).
"""
loss = self._scale_loss(loss)
grads = gradients.gradients(loss, params)
if None in grads:
raise ValueError("An operation has `None` for gradient. "
"Please make sure that all of your ops have a "
"gradient defined (i.e. are differentiable). "
"Common ops without gradient: "
"K.argmax, K.round, K.eval.")
if hasattr(self, "clipnorm"):
grads = [clip_ops.clip_by_norm(g, self.clipnorm) for g in grads]
if hasattr(self, "clipvalue"):
grads = [
clip_ops.clip_by_value(g, -self.clipvalue, self.clipvalue)
for g in grads
]
return grads
示例14: test_zero_grad_tf_gradients
def test_zero_grad_tf_gradients(self):
if context.executing_eagerly():
self.skipTest("tf.gradients not supported in eager.")
x = constant_op.constant([-1., 0., 1.])
g = self.evaluate(gradients.gradients(math_ops.pow(x, 2), x)[0])
self.assertAllClose([-2., 0., 2.], g)
示例15: testColocateGradientsWithAggregation
def testColocateGradientsWithAggregation(self):
with ops.Graph().as_default() as g:
with g.device("/device:GPU:1"):
w = constant(1.0, shape=[1, 1])
x = constant(1.0, shape=[1, 2])
y = constant(1.0, shape=[1, 2])
wx = math_ops.matmul(w, x)
wy = math_ops.matmul(w, y)
with g.device("/device:GPU:0"):
z = wx + wy
gw1 = gradients.gradients(z, [w], colocate_gradients_with_ops=True)[0]
self.assertEqual(gw1.op.colocation_groups(), wx.op.colocation_groups())
gw2 = gradients.gradients(z, [w], colocate_gradients_with_ops=False)[0]
self.assertTrue(wx.op.colocation_groups() != gw2.op.colocation_groups())