本文整理匯總了Python中grl_ops.gradient_reversal方法的典型用法代碼示例。如果您正苦於以下問題:Python grl_ops.gradient_reversal方法的具體用法?Python grl_ops.gradient_reversal怎麽用?Python grl_ops.gradient_reversal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類grl_ops
的用法示例。
在下文中一共展示了grl_ops.gradient_reversal方法的2個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: testGradientReversalOp
# 需要導入模塊: import grl_ops [as 別名]
# 或者: from grl_ops import gradient_reversal [as 別名]
def testGradientReversalOp(self):
with tf.Graph().as_default():
with self.test_session():
# Test that in forward prop, gradient reversal op acts as the
# identity operation.
examples = tf.constant([5.0, 4.0, 3.0, 2.0, 1.0])
output = grl_ops.gradient_reversal(examples)
expected_output = examples
self.assertAllEqual(output.eval(), expected_output.eval())
# Test that shape inference works as expected.
self.assertAllEqual(output.get_shape(), expected_output.get_shape())
# Test that in backward prop, gradient reversal op multiplies
# gradients by -1.
examples = tf.constant([[1.0]])
w = tf.get_variable(name='w', shape=[1, 1])
b = tf.get_variable(name='b', shape=[1])
init_op = tf.global_variables_initializer()
init_op.run()
features = tf.nn.xw_plus_b(examples, w, b)
# Construct two outputs: features layer passes directly to output1, but
# features layer passes through a gradient reversal layer before
# reaching output2.
output1 = features
output2 = grl_ops.gradient_reversal(features)
gold = tf.constant([1.0])
loss1 = gold - output1
loss2 = gold - output2
opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
grads_and_vars_1 = opt.compute_gradients(loss1,
tf.trainable_variables())
grads_and_vars_2 = opt.compute_gradients(loss2,
tf.trainable_variables())
self.assertAllEqual(len(grads_and_vars_1), len(grads_and_vars_2))
for i in range(len(grads_and_vars_1)):
g1 = grads_and_vars_1[i][0]
g2 = grads_and_vars_2[i][0]
# Verify that gradients of loss1 are the negative of gradients of
# loss2.
self.assertAllEqual(tf.negative(g1).eval(), g2.eval())
示例2: dann_loss
# 需要導入模塊: import grl_ops [as 別名]
# 或者: from grl_ops import gradient_reversal [as 別名]
def dann_loss(source_samples, target_samples, weight, scope=None):
"""Adds the domain adversarial (DANN) loss.
Args:
source_samples: a tensor of shape [num_samples, num_features].
target_samples: a tensor of shape [num_samples, num_features].
weight: the weight of the loss.
scope: optional name scope for summary tags.
Returns:
a scalar tensor representing the correlation loss value.
"""
with tf.variable_scope('dann'):
batch_size = tf.shape(source_samples)[0]
samples = tf.concat(axis=0, values=[source_samples, target_samples])
samples = slim.flatten(samples)
domain_selection_mask = tf.concat(
axis=0, values=[tf.zeros((batch_size, 1)), tf.ones((batch_size, 1))])
# Perform the gradient reversal and be careful with the shape.
grl = grl_ops.gradient_reversal(samples)
grl = tf.reshape(grl, (-1, samples.get_shape().as_list()[1]))
grl = slim.fully_connected(grl, 100, scope='fc1')
logits = slim.fully_connected(grl, 1, activation_fn=None, scope='fc2')
domain_predictions = tf.sigmoid(logits)
domain_loss = tf.losses.log_loss(
domain_selection_mask, domain_predictions, weights=weight)
domain_accuracy = utils.accuracy(
tf.round(domain_predictions), domain_selection_mask)
assert_op = tf.Assert(tf.is_finite(domain_loss), [domain_loss])
with tf.control_dependencies([assert_op]):
tag_loss = 'losses/domain_loss'
tag_accuracy = 'losses/domain_accuracy'
if scope:
tag_loss = scope + tag_loss
tag_accuracy = scope + tag_accuracy
tf.summary.scalar(tag_loss, domain_loss)
tf.summary.scalar(tag_accuracy, domain_accuracy)
return domain_loss
################################################################################
# DIFFERENCE LOSS
################################################################################