本文整理匯總了Python中tensorflow.compat.v1.squared_difference方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.squared_difference方法的具體用法?Python v1.squared_difference怎麽用?Python v1.squared_difference使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.squared_difference方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: lossfn
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
"""Loss function."""
eps = 1e-12
with tf.variable_scope(name):
d1 = discriminator(real_input, compress, hparams, "discriminator")
d2 = discriminator(fake_input, compress, hparams, "discriminator",
reuse=True)
if lsgan:
dloss = tf.reduce_mean(
tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
loss = (dloss + gloss)/2
else: # cross_entropy
dloss = -tf.reduce_mean(
tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2))
gloss = -tf.reduce_mean(tf.log(d2 + eps))
loss = (dloss + gloss)/2
return loss
示例2: layer_norm_compute
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None):
"""Layer norm raw computation."""
# Save these before they get converted to tensors by the casting below
params = (scale, bias)
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
output = norm_x * scale + bias
return output
示例3: embedding_lookup
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def embedding_lookup(self, x, means):
"""Compute nearest neighbors and loss for training the embeddings.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape
[-1, num_blocks, block_dim].
means: Embedding means.
Returns:
The nearest neighbor in one hot form, the nearest neighbor
itself, the
commitment loss, embedding training loss.
"""
x_means_hot = self.nearest_neighbor(x, means)
x_means_hot_flat = tf.reshape(
x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])
x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)
x_means = tf.transpose(x_means, [1, 0, 2])
q_loss = tf.reduce_mean(
tf.squared_difference(tf.stop_gradient(x), x_means))
e_loss = tf.reduce_mean(
tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, x_means, q_loss, e_loss
示例4: mean_squared_error
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def mean_squared_error(output, target, is_mean=False):
"""Return the TensorFlow expression of mean-squre-error of two distributions.
Parameters
----------
output : 2D or 4D tensor.
target : 2D or 4D tensor.
is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).
References
------------
- `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
return mse
示例5: normalized_mean_square_error
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def normalized_mean_square_error(output, target):
"""Return the TensorFlow expression of normalized mean-squre-error of two distributions.
Parameters
----------
output : 2D or 4D tensor.
target : 2D or 4D tensor.
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
nmse = tf.reduce_mean(nmse_a / nmse_b)
return nmse
示例6: testRandomHorizontalFlipWithEmptyBoxes
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def testRandomHorizontalFlipWithEmptyBoxes(self):
def graph_fn():
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
return [images_diff, images_diff_expected, boxes, boxes_expected]
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
示例7: _get_lr_tensor
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def _get_lr_tensor(self):
"""Get lr minimizing the surrogate.
Returns:
The lr_t.
"""
lr = tf.squared_difference(1.0, tf.sqrt(self._mu)) / self._h_min
return lr
示例8: mean_squared_error
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def mean_squared_error(true, pred):
"""L2 distance between tensors true and pred.
Args:
true: the ground truth image.
pred: the predicted image.
Returns:
mean squared error between ground truth and predicted image.
"""
result = tf.reduce_sum(
tf.squared_difference(true, pred)) / tf.to_float(tf.size(pred))
return result
示例9: generic_l2_loss
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def generic_l2_loss(body_output,
targets,
model_hparams,
vocab_size,
weights_fn):
del model_hparams, vocab_size, weights_fn # unused arg
loss = tf.squared_difference(body_output, tf.to_float(targets))
return tf.reduce_mean(loss), tf.constant(1.0)
示例10: video_l2_internal_loss
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def video_l2_internal_loss(logits, targets, model_hparams):
cutoff = getattr(model_hparams, "video_modality_loss_cutoff", 0.2)
return tf.nn.relu(
tf.squared_difference(logits, targets) - cutoff * cutoff)
示例11: l2_norm
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def l2_norm(x, filters=None, epsilon=1e-6, name=None, reuse=None):
"""Layer normalization with l2 norm."""
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(name, default_name="l2_norm", values=[x], reuse=reuse):
scale = tf.get_variable(
"l2_norm_scale", [filters], initializer=tf.ones_initializer())
bias = tf.get_variable(
"l2_norm_bias", [filters], initializer=tf.zeros_initializer())
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
l2norm = tf.reduce_sum(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(l2norm + epsilon)
return norm_x * scale + bias
示例12: vq_nearest_neighbor
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def vq_nearest_neighbor(x, means,
soft_em=False, num_samples=10, temperature=None):
"""Find the nearest element in means to elements in x."""
bottleneck_size = common_layers.shape_list(means)[0]
x_norm_sq = tf.reduce_sum(tf.square(x), axis=-1, keepdims=True)
means_norm_sq = tf.reduce_sum(tf.square(means), axis=-1, keepdims=True)
scalar_prod = tf.matmul(x, means, transpose_b=True)
dist = x_norm_sq + tf.transpose(means_norm_sq) - 2 * scalar_prod
if soft_em:
x_means_idx = tf.multinomial(-dist, num_samples=num_samples)
x_means_hot = tf.one_hot(
x_means_idx, depth=common_layers.shape_list(means)[0])
x_means_hot = tf.reduce_mean(x_means_hot, axis=1)
else:
if temperature is None:
x_means_idx = tf.argmax(-dist, axis=-1)
else:
x_means_idx = tf.multinomial(- dist / temperature, 1)
x_means_idx = tf.squeeze(x_means_idx, axis=-1)
if (common_layers.should_generate_summaries() and
not common_layers.is_xla_compiled()):
tf.summary.histogram("means_idx", tf.reshape(x_means_idx, [-1]))
x_means_hot = tf.one_hot(x_means_idx, bottleneck_size)
x_means_hot_flat = tf.reshape(x_means_hot, [-1, bottleneck_size])
x_means = tf.matmul(x_means_hot_flat, means)
e_loss = tf.reduce_mean(tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, e_loss, dist
示例13: l2_distance
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def l2_distance(x, y, normalize=False, reduce_axis=-1):
if normalize:
x = tf.nn.l2_normalize(x, axis=-1)
y = tf.nn.l2_normalize(y, axis=-1)
sq_diff = tf.squared_difference(x, y)
return tf.reduce_sum(sq_diff, axis=reduce_axis)
示例14: _get_moments
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def _get_moments(self, inputs):
# Like tf.nn.moments but unbiased sample std. deviation.
# Reduce over channels only.
mean = tf.reduce_mean(inputs, [self.axis], keepdims=True, name="mean")
variance = tf.reduce_sum(
tf.squared_difference(inputs, tf.stop_gradient(mean)),
[self.axis], keepdims=True, name="variance_sum")
# Divide by N-1
inputs_shape = tf.shape(inputs)
counts = tf.reduce_prod([inputs_shape[ax] for ax in [self.axis]])
variance /= (tf.cast(counts, tf.float32) - 1)
return mean, variance
示例15: testRandomHorizontalFlip
# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import squared_difference [as 別名]
def testRandomHorizontalFlip(self):
def graph_fn():
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected1 = self.expectedBoxesAfterLeftRightFlip()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
return [images_diff, images_diff_expected, boxes_diff,
boxes_diff_expected]
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = self.execute_cpu(graph_fn, [])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)