本文整理汇总了Python中tensorflow.squared_difference方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.squared_difference方法的具体用法?Python tensorflow.squared_difference怎么用?Python tensorflow.squared_difference使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.squared_difference方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testRandomFlipBoxes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def testRandomFlipBoxes(self):
boxes = self.createTestBoxes()
# Case where the boxes are flipped.
boxes_expected1 = self.expectedBoxesAfterMirroring()
# Case where the boxes are not flipped.
boxes_expected2 = boxes
# After elementwise multiplication, the result should be all-zero since one
# of them is all-zero.
boxes_diff = tf.multiply(
tf.squared_difference(boxes, boxes_expected1),
tf.squared_difference(boxes, boxes_expected2))
expected_result = tf.zeros_like(boxes_diff)
with self.test_session() as sess:
(boxes_diff, expected_result) = sess.run([boxes_diff, expected_result])
self.assertAllEqual(boxes_diff, expected_result)
示例2: lossfn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
"""Loss function."""
eps = 1e-12
with tf.variable_scope(name):
d1 = discriminator(real_input, compress, hparams, "discriminator")
d2 = discriminator(fake_input, compress, hparams, "discriminator",
reuse=True)
if lsgan:
dloss = tf.reduce_mean(
tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
loss = (dloss + gloss)/2
else: # cross_entropy
dloss = -tf.reduce_mean(
tf.log(d1 + eps)) - tf.reduce_mean(tf.log(1 - d2 + eps))
gloss = -tf.reduce_mean(tf.log(d2 + eps))
loss = (dloss + gloss)/2
return loss
示例3: testRandomHorizontalFlipWithEmptyBoxes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def testRandomHorizontalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterLeftRightFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
示例4: testRandomVerticalFlipWithEmptyBoxes
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def testRandomVerticalFlipWithEmptyBoxes(self):
preprocess_options = [(preprocessor.random_vertical_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createEmptyTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterUpDownFlip()
boxes_expected = self.createEmptyTestBoxes()
images_expected2 = images
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_,
boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
boxes_expected])
self.assertAllClose(boxes_, boxes_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
示例5: lossfn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
"""Loss function."""
eps = 1e-12
with tf.variable_scope(name):
d1 = discriminator(real_input, compress, hparams, "discriminator")
d2 = discriminator(fake_input, compress, hparams, "discriminator",
reuse=True)
if lsgan:
dloss = tf.reduce_mean(
tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
loss = (dloss + gloss)/2
else: # cross_entropy
dloss = -tf.reduce_mean(
tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2))
gloss = -tf.reduce_mean(tf.log(d2 + eps))
loss = (dloss + gloss)/2
return loss
示例6: layer_norm_compute
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None):
"""Layer norm raw computation."""
# Save these before they get converted to tensors by the casting below
params = (scale, bias)
epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
variance = tf.reduce_mean(
tf.squared_difference(x, mean), axis=[-1], keepdims=True)
norm_x = (x - mean) * tf.rsqrt(variance + epsilon)
output = norm_x * scale + bias
return output
示例7: embedding_lookup
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def embedding_lookup(self, x, means):
"""Compute nearest neighbors and loss for training the embeddings.
Args:
x: Batch of encoder continuous latent states sliced/projected into
shape
[-1, num_blocks, block_dim].
means: Embedding means.
Returns:
The nearest neighbor in one hot form, the nearest neighbor
itself, the
commitment loss, embedding training loss.
"""
x_means_hot = self.nearest_neighbor(x, means)
x_means_hot_flat = tf.reshape(
x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])
x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)
x_means = tf.transpose(x_means, [1, 0, 2])
q_loss = tf.reduce_mean(
tf.squared_difference(tf.stop_gradient(x), x_means))
e_loss = tf.reduce_mean(
tf.squared_difference(x, tf.stop_gradient(x_means)))
return x_means_hot, x_means, q_loss, e_loss
示例8: mean_squared_error
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def mean_squared_error(output, target, is_mean=False):
"""Return the TensorFlow expression of mean-squre-error of two distributions.
Parameters
----------
output : 2D or 4D tensor.
target : 2D or 4D tensor.
is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).
References
------------
- `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
if is_mean:
mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
else:
mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
return mse
示例9: normalized_mean_square_error
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def normalized_mean_square_error(output, target):
"""Return the TensorFlow expression of normalized mean-squre-error of two distributions.
Parameters
----------
output : 2D or 4D tensor.
target : 2D or 4D tensor.
"""
with tf.name_scope("mean_squared_error_loss"):
if output.get_shape().ndims == 2: # [batch_size, n_feature]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
nmse = tf.reduce_mean(nmse_a / nmse_b)
return nmse
示例10: testRandomHorizontalFlip
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def testRandomHorizontalFlip(self):
preprocess_options = [(preprocessor.random_horizontal_flip, {})]
images = self.expectedImagesAfterNormalization()
boxes = self.createTestBoxes()
tensor_dict = {fields.InputDataFields.image: images,
fields.InputDataFields.groundtruth_boxes: boxes}
images_expected1 = self.expectedImagesAfterMirroring()
boxes_expected1 = self.expectedBoxesAfterMirroring()
images_expected2 = images
boxes_expected2 = boxes
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images = tensor_dict[fields.InputDataFields.image]
boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]
boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
boxes_diff_expected = tf.zeros_like(boxes_diff)
images_diff1 = tf.squared_difference(images, images_expected1)
images_diff2 = tf.squared_difference(images, images_expected2)
images_diff = tf.multiply(images_diff1, images_diff2)
images_diff_expected = tf.zeros_like(images_diff)
with self.test_session() as sess:
(images_diff_, images_diff_expected_, boxes_diff_,
boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
boxes_diff, boxes_diff_expected])
self.assertAllClose(boxes_diff_, boxes_diff_expected_)
self.assertAllClose(images_diff_, images_diff_expected_)
示例11: testRandomRGBtoGray
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def testRandomRGBtoGray(self):
preprocess_options = [(preprocessor.random_rgb_to_gray, {})]
images_original = self.createTestImages()
tensor_dict = {fields.InputDataFields.image: images_original}
tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
images_gray = tensor_dict[fields.InputDataFields.image]
images_gray_r, images_gray_g, images_gray_b = tf.split(
value=images_gray, num_or_size_splits=3, axis=3)
images_r, images_g, images_b = tf.split(
value=images_original, num_or_size_splits=3, axis=3)
images_r_diff1 = tf.squared_difference(tf.to_float(images_r),
tf.to_float(images_gray_r))
images_r_diff2 = tf.squared_difference(tf.to_float(images_gray_r),
tf.to_float(images_gray_g))
images_r_diff = tf.multiply(images_r_diff1, images_r_diff2)
images_g_diff1 = tf.squared_difference(tf.to_float(images_g),
tf.to_float(images_gray_g))
images_g_diff2 = tf.squared_difference(tf.to_float(images_gray_g),
tf.to_float(images_gray_b))
images_g_diff = tf.multiply(images_g_diff1, images_g_diff2)
images_b_diff1 = tf.squared_difference(tf.to_float(images_b),
tf.to_float(images_gray_b))
images_b_diff2 = tf.squared_difference(tf.to_float(images_gray_b),
tf.to_float(images_gray_r))
images_b_diff = tf.multiply(images_b_diff1, images_b_diff2)
image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1])
with self.test_session() as sess:
(images_r_diff_, images_g_diff_, images_b_diff_, image_zero1_) = sess.run(
[images_r_diff, images_g_diff, images_b_diff, image_zero1])
self.assertAllClose(images_r_diff_, image_zero1_)
self.assertAllClose(images_g_diff_, image_zero1_)
self.assertAllClose(images_b_diff_, image_zero1_)
示例12: tfe_squared_difference
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def tfe_squared_difference(t1, t2):
return tf.squared_difference(t1, t2)
示例13: get_inv_quadratic_form
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squared_difference [as 别名]
def get_inv_quadratic_form(self, data, mean):
tf_sq_distances = tf.squared_difference(data, tf.expand_dims(mean, 0))
tf_sum_sq_distances = tf.reduce_sum(tf_sq_distances, 1)
return tf_sum_sq_distances / self.tf_variance_scalar