當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.squared_difference方法代碼示例

本文整理匯總了Python中tensorflow.squared_difference方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.squared_difference方法的具體用法?Python tensorflow.squared_difference怎麽用?Python tensorflow.squared_difference使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.squared_difference方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testRandomFlipBoxes

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def testRandomFlipBoxes(self):
    boxes = self.createTestBoxes()

    # Case where the boxes are flipped.
    boxes_expected1 = self.expectedBoxesAfterMirroring()

    # Case where the boxes are not flipped.
    boxes_expected2 = boxes

    # After elementwise multiplication, the result should be all-zero since one
    # of them is all-zero.
    boxes_diff = tf.multiply(
        tf.squared_difference(boxes, boxes_expected1),
        tf.squared_difference(boxes, boxes_expected2))
    expected_result = tf.zeros_like(boxes_diff)

    with self.test_session() as sess:
      (boxes_diff, expected_result) = sess.run([boxes_diff, expected_result])
      self.assertAllEqual(boxes_diff, expected_result) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:21,代碼來源:preprocessor_test.py

示例2: lossfn

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
  """Loss function."""
  eps = 1e-12
  with tf.variable_scope(name):
    d1 = discriminator(real_input, compress, hparams, "discriminator")
    d2 = discriminator(fake_input, compress, hparams, "discriminator",
                       reuse=True)
    if lsgan:
      dloss = tf.reduce_mean(
          tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
      gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
      loss = (dloss + gloss)/2
    else:  # cross_entropy
      dloss = -tf.reduce_mean(
          tf.log(d1 + eps)) - tf.reduce_mean(tf.log(1 - d2 + eps))
      gloss = -tf.reduce_mean(tf.log(d2 + eps))
      loss = (dloss + gloss)/2
    return loss 
開發者ID:akzaidi,項目名稱:fine-lm,代碼行數:20,代碼來源:cycle_gan.py

示例3: testRandomHorizontalFlipWithEmptyBoxes

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def testRandomHorizontalFlipWithEmptyBoxes(self):
    preprocess_options = [(preprocessor.random_horizontal_flip, {})]
    images = self.expectedImagesAfterNormalization()
    boxes = self.createEmptyTestBoxes()
    tensor_dict = {fields.InputDataFields.image: images,
                   fields.InputDataFields.groundtruth_boxes: boxes}
    images_expected1 = self.expectedImagesAfterLeftRightFlip()
    boxes_expected = self.createEmptyTestBoxes()
    images_expected2 = images
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]

    images_diff1 = tf.squared_difference(images, images_expected1)
    images_diff2 = tf.squared_difference(images, images_expected2)
    images_diff = tf.multiply(images_diff1, images_diff2)
    images_diff_expected = tf.zeros_like(images_diff)

    with self.test_session() as sess:
      (images_diff_, images_diff_expected_, boxes_,
       boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
                                    boxes_expected])
      self.assertAllClose(boxes_, boxes_expected_)
      self.assertAllClose(images_diff_, images_diff_expected_) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:26,代碼來源:preprocessor_test.py

示例4: testRandomVerticalFlipWithEmptyBoxes

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def testRandomVerticalFlipWithEmptyBoxes(self):
    preprocess_options = [(preprocessor.random_vertical_flip, {})]
    images = self.expectedImagesAfterNormalization()
    boxes = self.createEmptyTestBoxes()
    tensor_dict = {fields.InputDataFields.image: images,
                   fields.InputDataFields.groundtruth_boxes: boxes}
    images_expected1 = self.expectedImagesAfterUpDownFlip()
    boxes_expected = self.createEmptyTestBoxes()
    images_expected2 = images
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]

    images_diff1 = tf.squared_difference(images, images_expected1)
    images_diff2 = tf.squared_difference(images, images_expected2)
    images_diff = tf.multiply(images_diff1, images_diff2)
    images_diff_expected = tf.zeros_like(images_diff)

    with self.test_session() as sess:
      (images_diff_, images_diff_expected_, boxes_,
       boxes_expected_) = sess.run([images_diff, images_diff_expected, boxes,
                                    boxes_expected])
      self.assertAllClose(boxes_, boxes_expected_)
      self.assertAllClose(images_diff_, images_diff_expected_) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:26,代碼來源:preprocessor_test.py

示例5: lossfn

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def lossfn(real_input, fake_input, compress, hparams, lsgan, name):
  """Loss function."""
  eps = 1e-12
  with tf.variable_scope(name):
    d1 = discriminator(real_input, compress, hparams, "discriminator")
    d2 = discriminator(fake_input, compress, hparams, "discriminator",
                       reuse=True)
    if lsgan:
      dloss = tf.reduce_mean(
          tf.squared_difference(d1, 0.9)) + tf.reduce_mean(tf.square(d2))
      gloss = tf.reduce_mean(tf.squared_difference(d2, 0.9))
      loss = (dloss + gloss)/2
    else:  # cross_entropy
      dloss = -tf.reduce_mean(
          tf.log(d1 + eps)) - tf.reduce_mean(tf.log1p(eps - d2))
      gloss = -tf.reduce_mean(tf.log(d2 + eps))
      loss = (dloss + gloss)/2
    return loss 
開發者ID:yyht,項目名稱:BERT,代碼行數:20,代碼來源:cycle_gan.py

示例6: layer_norm_compute

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def layer_norm_compute(x, epsilon, scale, bias, layer_collection=None):
  """Layer norm raw computation."""

  # Save these before they get converted to tensors by the casting below
  params = (scale, bias)

  epsilon, scale, bias = [cast_like(t, x) for t in [epsilon, scale, bias]]
  mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
  variance = tf.reduce_mean(
      tf.squared_difference(x, mean), axis=[-1], keepdims=True)
  norm_x = (x - mean) * tf.rsqrt(variance + epsilon)

  output = norm_x * scale + bias


  return output 
開發者ID:yyht,項目名稱:BERT,代碼行數:18,代碼來源:common_layers.py

示例7: embedding_lookup

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def embedding_lookup(self, x, means):
    """Compute nearest neighbors and loss for training the embeddings.

    Args:
        x: Batch of encoder continuous latent states sliced/projected into
        shape
        [-1, num_blocks, block_dim].
        means: Embedding means.

    Returns:
        The nearest neighbor in one hot form, the nearest neighbor
        itself, the
        commitment loss, embedding training loss.
    """
    x_means_hot = self.nearest_neighbor(x, means)
    x_means_hot_flat = tf.reshape(
        x_means_hot, [-1, self.hparams.num_blocks, self.hparams.block_v_size])
    x_means = tf.matmul(tf.transpose(x_means_hot_flat, perm=[1, 0, 2]), means)
    x_means = tf.transpose(x_means, [1, 0, 2])
    q_loss = tf.reduce_mean(
        tf.squared_difference(tf.stop_gradient(x), x_means))
    e_loss = tf.reduce_mean(
        tf.squared_difference(x, tf.stop_gradient(x_means)))
    return x_means_hot, x_means, q_loss, e_loss 
開發者ID:yyht,項目名稱:BERT,代碼行數:26,代碼來源:vq_discrete.py

示例8: mean_squared_error

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def mean_squared_error(output, target, is_mean=False):
    """Return the TensorFlow expression of mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    is_mean : boolean, if True, use ``tf.reduce_mean`` to compute the loss of one data, otherwise, use ``tf.reduce_sum`` (default).

    References
    ------------
    - `Wiki Mean Squared Error <https://en.wikipedia.org/wiki/Mean_squared_error>`_
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), 1))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), 1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            if is_mean:
                mse = tf.reduce_mean(tf.reduce_mean(tf.squared_difference(output, target), [1, 2, 3]))
            else:
                mse = tf.reduce_mean(tf.reduce_sum(tf.squared_difference(output, target), [1, 2, 3]))
        return mse 
開發者ID:zjuela,項目名稱:LapSRN-tensorflow,代碼行數:27,代碼來源:cost.py

示例9: normalized_mean_square_error

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def normalized_mean_square_error(output, target):
    """Return the TensorFlow expression of normalized mean-squre-error of two distributions.

    Parameters
    ----------
    output : 2D or 4D tensor.
    target : 2D or 4D tensor.
    """
    with tf.name_scope("mean_squared_error_loss"):
        if output.get_shape().ndims == 2:   # [batch_size, n_feature]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=1))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=1))
        elif output.get_shape().ndims == 4: # [batch_size, w, h, c]
            nmse_a = tf.sqrt(tf.reduce_sum(tf.squared_difference(output, target), axis=[1,2,3]))
            nmse_b = tf.sqrt(tf.reduce_sum(tf.square(target), axis=[1,2,3]))
        nmse = tf.reduce_mean(nmse_a / nmse_b)
    return nmse 
開發者ID:zjuela,項目名稱:LapSRN-tensorflow,代碼行數:19,代碼來源:cost.py

示例10: testRandomHorizontalFlip

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def testRandomHorizontalFlip(self):
    preprocess_options = [(preprocessor.random_horizontal_flip, {})]
    images = self.expectedImagesAfterNormalization()
    boxes = self.createTestBoxes()
    tensor_dict = {fields.InputDataFields.image: images,
                   fields.InputDataFields.groundtruth_boxes: boxes}
    images_expected1 = self.expectedImagesAfterMirroring()
    boxes_expected1 = self.expectedBoxesAfterMirroring()
    images_expected2 = images
    boxes_expected2 = boxes
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images = tensor_dict[fields.InputDataFields.image]
    boxes = tensor_dict[fields.InputDataFields.groundtruth_boxes]

    boxes_diff1 = tf.squared_difference(boxes, boxes_expected1)
    boxes_diff2 = tf.squared_difference(boxes, boxes_expected2)
    boxes_diff = tf.multiply(boxes_diff1, boxes_diff2)
    boxes_diff_expected = tf.zeros_like(boxes_diff)

    images_diff1 = tf.squared_difference(images, images_expected1)
    images_diff2 = tf.squared_difference(images, images_expected2)
    images_diff = tf.multiply(images_diff1, images_diff2)
    images_diff_expected = tf.zeros_like(images_diff)

    with self.test_session() as sess:
      (images_diff_, images_diff_expected_, boxes_diff_,
       boxes_diff_expected_) = sess.run([images_diff, images_diff_expected,
                                         boxes_diff, boxes_diff_expected])
      self.assertAllClose(boxes_diff_, boxes_diff_expected_)
      self.assertAllClose(images_diff_, images_diff_expected_) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:32,代碼來源:preprocessor_test.py

示例11: testRandomRGBtoGray

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def testRandomRGBtoGray(self):
    preprocess_options = [(preprocessor.random_rgb_to_gray, {})]
    images_original = self.createTestImages()
    tensor_dict = {fields.InputDataFields.image: images_original}
    tensor_dict = preprocessor.preprocess(tensor_dict, preprocess_options)
    images_gray = tensor_dict[fields.InputDataFields.image]
    images_gray_r, images_gray_g, images_gray_b = tf.split(
        value=images_gray, num_or_size_splits=3, axis=3)
    images_r, images_g, images_b = tf.split(
        value=images_original, num_or_size_splits=3, axis=3)
    images_r_diff1 = tf.squared_difference(tf.to_float(images_r),
                                           tf.to_float(images_gray_r))
    images_r_diff2 = tf.squared_difference(tf.to_float(images_gray_r),
                                           tf.to_float(images_gray_g))
    images_r_diff = tf.multiply(images_r_diff1, images_r_diff2)
    images_g_diff1 = tf.squared_difference(tf.to_float(images_g),
                                           tf.to_float(images_gray_g))
    images_g_diff2 = tf.squared_difference(tf.to_float(images_gray_g),
                                           tf.to_float(images_gray_b))
    images_g_diff = tf.multiply(images_g_diff1, images_g_diff2)
    images_b_diff1 = tf.squared_difference(tf.to_float(images_b),
                                           tf.to_float(images_gray_b))
    images_b_diff2 = tf.squared_difference(tf.to_float(images_gray_b),
                                           tf.to_float(images_gray_r))
    images_b_diff = tf.multiply(images_b_diff1, images_b_diff2)
    image_zero1 = tf.constant(0, dtype=tf.float32, shape=[1, 4, 4, 1])
    with self.test_session() as sess:
      (images_r_diff_, images_g_diff_, images_b_diff_, image_zero1_) = sess.run(
          [images_r_diff, images_g_diff, images_b_diff, image_zero1])
      self.assertAllClose(images_r_diff_, image_zero1_)
      self.assertAllClose(images_g_diff_, image_zero1_)
      self.assertAllClose(images_b_diff_, image_zero1_) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:34,代碼來源:preprocessor_test.py

示例12: tfe_squared_difference

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def tfe_squared_difference(t1, t2):
  return tf.squared_difference(t1, t2) 
開發者ID:google,項目名稱:tangent,代碼行數:4,代碼來源:functions.py

示例13: get_inv_quadratic_form

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import squared_difference [as 別名]
def get_inv_quadratic_form(self, data, mean):
        tf_sq_distances = tf.squared_difference(data, tf.expand_dims(mean, 0))
        tf_sum_sq_distances = tf.reduce_sum(tf_sq_distances, 1)

        return tf_sum_sq_distances / self.tf_variance_scalar 
開發者ID:aakhundov,項目名稱:tf-example-models,代碼行數:7,代碼來源:isotropic_covariance.py


注:本文中的tensorflow.squared_difference方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。