本文整理汇总了Python中grl_ops.gradient_reversal方法的典型用法代码示例。如果您正苦于以下问题:Python grl_ops.gradient_reversal方法的具体用法?Python grl_ops.gradient_reversal怎么用?Python grl_ops.gradient_reversal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类grl_ops
的用法示例。
在下文中一共展示了grl_ops.gradient_reversal方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testGradientReversalOp
# 需要导入模块: import grl_ops [as 别名]
# 或者: from grl_ops import gradient_reversal [as 别名]
def testGradientReversalOp(self):
with tf.Graph().as_default():
with self.test_session():
# Test that in forward prop, gradient reversal op acts as the
# identity operation.
examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0])
output = grl_ops.gradient_reversal(examples)
expected_output = examples
self.assertAllEqual(output.eval(), expected_output.eval())
# Test that shape inference works as expected.
self.assertAllEqual(output.get_shape(), expected_output.get_shape())
# Test that in backward prop, gradient reversal op multiplies
# gradients by -1.
examples = tf.constant([[1.0]])
w = tf.get_variable(name='w', shape=[1, 1])
b = tf.get_variable(name='b', shape=[1])
init_op = tf.global_variables_initializer()
init_op.run()
features = tf.nn.xw_plus_b(examples, w, b)
# Construct two outputs: features layer passes directly to output1, but
# features layer passes through a gradient reversal layer before
# reaching output2.
output1 = features
output2 = grl_ops.gradient_reversal(features)
gold = tf.constant([1.0])
loss1 = gold - output1
loss2 = gold - output2
opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
grads_and_vars_1 = opt.compute_gradients(loss1,
tf.trainable_variables())
grads_and_vars_2 = opt.compute_gradients(loss2,
tf.trainable_variables())
self.assertAllEqual(len(grads_and_vars_1), len(grads_and_vars_2))
for i in range(len(grads_and_vars_1)):
g1 = grads_and_vars_1[i][0]
g2 = grads_and_vars_2[i][0]
# Verify that gradients of loss1 are the negative of gradients of
# loss2.
self.assertAllEqual(tf.negative(g1).eval(), g2.eval())
示例2: dann_loss
# 需要导入模块: import grl_ops [as 别名]
# 或者: from grl_ops import gradient_reversal [as 别名]
def dann_loss(source_samples, target_samples, weight, scope=None):
"""Adds the domain adversarial (DANN) loss.
Args:
source_samples: a tensor of shape [num_samples, num_features].
target_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.variable_scope('dann'):
batch_size = tf.shape(source_samples)[0]
samples = tf.concat(axis=0, values=[source_samples, target_samples])
samples = slim.flatten(samples)
domain_selection_mask = tf.concat(
axis=0, values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))])
# Perform the gradient reversal and be careful with the shape.
grl = grl_ops.gradient_reversal(samples)
grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))
grl = slim.fully_connected(grl, 100, scope='fc1')
logits = slim.fully_connected(grl, 1, activation_fn=None, scope='fc2')
domain_predictions = tf.sigmoid(logits)
domain_loss = tf.losses.log_loss(
domain_selection_mask, domain_predictions, weights=weight)
domain_accuracy = utils.accuracy(
tf.round(domain_predictions), domain_selection_mask)
assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
with tf.control_dependencies([assert_op]):
tag_loss = 'losses/domain_loss'
tag_accuracy = 'losses/domain_accuracy'
if scope:
tag_loss = scope + tag_loss
tag_accuracy = scope + tag_accuracy
tf.summary.scalar(tag_loss, domain_loss)
tf.summary.scalar(tag_accuracy, domain_accuracy)
return domain_loss
################################################################################
# DIFFERENCE LOSS
################################################################################