當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.eye方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.eye方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.eye方法的具體用法?Python v1.eye怎麽用?Python v1.eye使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.eye方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: rank_loss

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import eye [as 別名]
def rank_loss(sentence_emb, image_emb, margin=0.2):
  """Experimental rank loss, thanks to kkurach@ for the code."""
  with tf.name_scope("rank_loss"):
    # Normalize first as this is assumed in cosine similarity later.
    sentence_emb = tf.nn.l2_normalize(sentence_emb, 1)
    image_emb = tf.nn.l2_normalize(image_emb, 1)
    # Both sentence_emb and image_emb have size [batch, depth].
    scores = tf.matmul(image_emb, tf.transpose(sentence_emb))  # [batch, batch]
    diagonal = tf.diag_part(scores)  # [batch]
    cost_s = tf.maximum(0.0, margin - diagonal + scores)  # [batch, batch]
    cost_im = tf.maximum(
        0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores)  # [batch, batch]
    # Clear diagonals.
    batch_size = tf.shape(sentence_emb)[0]
    empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size)
    cost_s *= empty_diagonal_mat
    cost_im *= empty_diagonal_mat
    return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:slicenet.py

示例2: convolve

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import eye [as 別名]
def convolve(image, pixel_filter, channels=3, name=None):
    """Perform a 2D pixel convolution on the given image.

    Arguments:
      image: A 3D `float32` `Tensor` of shape `[height, width, channels]`,
        where `channels` is the third argument to this function and the
        first two dimensions are arbitrary.
      pixel_filter: A 2D `Tensor`, representing pixel weightings for the
        kernel. This will be used to create a 4D kernel---the extra two
        dimensions are for channels (see `tf.nn.conv2d` documentation),
        and the kernel will be constructed so that the channels are
        independent: each channel only observes the data from neighboring
        pixels of the same channel.
      channels: An integer representing the number of channels in the
        image (e.g., 3 for RGB).

    Returns:
      A 3D `float32` `Tensor` of the same shape as the input.
    """
    with tf.name_scope(name, "convolve"):
        tf.assert_type(image, tf.float32)
        channel_filter = tf.eye(channels)
        filter_ = tf.expand_dims(
            tf.expand_dims(pixel_filter, -1), -1
        ) * tf.expand_dims(tf.expand_dims(channel_filter, 0), 0)
        result_batch = tf.nn.conv2d(
            tf.stack([image]),  # batch
            filter=filter_,
            strides=[1, 1, 1, 1],
            padding="SAME",
        )
        return result_batch[0]  # unbatch 
開發者ID:tensorflow,項目名稱:tensorboard,代碼行數:34,代碼來源:images_demo.py

示例3: __call__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import eye [as 別名]
def __call__(self, shape, dtype=None, partition_info=None):
    del partition_info  # unused
    assert len(shape) > 2, shape

    support = tuple(shape[:-2]) + (1, 1)
    indices = [[s // 2 for s in support]]
    updates = tf.constant([self.gain], dtype=dtype)
    kernel = tf.scatter_nd(indices, updates, support)

    assert shape[-2] == shape[-1], shape
    if shape[-1] != 1:
      kernel *= tf.eye(shape[-1], dtype=dtype)

    return kernel 
開發者ID:tensorflow,項目名稱:compression,代碼行數:16,代碼來源:initializers.py

示例4: _create_gaussian

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import eye [as 別名]
def _create_gaussian(self, gaussian_type):
    mu = tf.random_normal([3])
    if gaussian_type == tfp.distributions.MultivariateNormalDiag:
      scale_diag = tf.random_normal([3])
      dist = tfp.distributions.MultivariateNormalDiag(mu, scale_diag)
    if gaussian_type == tfp.distributions.MultivariateNormalDiagPlusLowRank:
      scale_diag = tf.random_normal([3])
      perturb_factor = tf.random_normal([3, 2])
      scale_perturb_diag = tf.random_normal([2])
      dist = tfp.distributions.MultivariateNormalDiagPlusLowRank(
          mu,
          scale_diag,
          scale_perturb_factor=perturb_factor,
          scale_perturb_diag=scale_perturb_diag)
    if gaussian_type == tfp.distributions.MultivariateNormalTriL:
      cov = tf.random_uniform([3, 3], minval=0, maxval=1.0)
      # Create a PSD matrix.
      cov = 0.5 * (cov + tf.transpose(cov)) + 3 * tf.eye(3)
      scale = tf.cholesky(cov)
      dist = tfp.distributions.MultivariateNormalTriL(mu, scale)
    if gaussian_type == tfp.distributions.MultivariateNormalFullCovariance:
      cov = tf.random_uniform([3, 3], minval=0, maxval=1.0)
      # Create a PSD matrix.
      cov = 0.5 * (cov + tf.transpose(cov)) + 3 * tf.eye(3)
      dist = tfp.distributions.MultivariateNormalFullCovariance(mu, cov)
    return (dist, mu, dist.covariance()) 
開發者ID:deepmind,項目名稱:trfl,代碼行數:28,代碼來源:distribution_ops_test.py

示例5: body

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import eye [as 別名]
def body(self, features):
    if self.hparams.mode != tf.estimator.ModeKeys.PREDICT:
      # In training mode we need to embed both the queries and the code
      # using the inputs and targets respectively.
      with tf.variable_scope('string_embedding'):
        string_embedding = self.encode(features, 'inputs')

      with tf.variable_scope('code_embedding'):
        code_embedding = self.encode(features, 'targets')

      string_embedding_norm = tf.nn.l2_normalize(string_embedding, axis=1)
      code_embedding_norm = tf.nn.l2_normalize(code_embedding, axis=1)

      # All-vs-All cosine distance matrix, reshaped as row-major.
      cosine_dist = 1.0 - tf.matmul(string_embedding_norm, code_embedding_norm,
                                    transpose_b=True)
      cosine_dist_flat = tf.reshape(cosine_dist, [-1, 1])

      # Positive samples on the diagonal, reshaped as row-major.
      label_matrix = tf.eye(tf.shape(cosine_dist)[0], dtype=tf.int32)
      label_matrix_flat = tf.reshape(label_matrix, [-1])

      logits = tf.concat([1.0 - cosine_dist_flat, cosine_dist_flat], axis=1)
      labels = tf.one_hot(label_matrix_flat, 2)

      loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels,
                                                     logits=logits)
      return string_embedding_norm, {'training': loss}

    # In predict mode we conditionally embed either the string query
    # or the code based on the embed_code feature. In both cases the
    # input will be in the inputs feature but the variable scope will
    # be different
    # Define predicates to be used with tf.cond
    def embed_string():
      with tf.variable_scope('string_embedding'):
        string_embedding = self.encode(features, 'inputs')
      return string_embedding

    def embed_code():
      with tf.variable_scope('code_embedding'):
        code_embedding = self.encode(features, 'inputs')
      return code_embedding

    embed_code_feature = features.get('embed_code')

    # embed_code_feature will be a tensor because inputs will be a batch
    # of inputs. We need to reduce that down to a single value for use
    # with tf.cond; so we simply take the max of all the elements.
    # This implicitly assume all inputs have the same value.
    is_embed_code = tf.reduce_max(embed_code_feature)
    result = tf.cond(is_embed_code > 0, embed_code, embed_string)

    result = tf.nn.l2_normalize(result)
    return result 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:57,代碼來源:similarity_transformer.py

示例6: _materialise_conv2d

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import eye [as 別名]
def _materialise_conv2d(w, b, input_height, input_width, padding, strides):
  """Converts a convolution to an equivalent linear layer.

  Args:
    w: 4D tensor of shape (kernel_height, kernel_width, input_channels,
      output_channels) containing the convolution weights.
    b: 1D tensor of shape (output_channels) containing the convolution biases,
      or `None` if no biases.
    input_height: height of the input tensor.
    input_width: width of the input tensor.
    padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
    strides: Integer list of `[vertical_stride, horizontal_stride]`.

  Returns:
    w: 2D tensor of shape (input_height * input_width * input_channels,
      output_height * output_width * output_channels) containing weights.
    b: 1D tensor of shape (output_height * output_width * output_channels)
      containing biases, or `None` if no biases.
  """
  kernel_height = w.shape[0].value
  kernel_width = w.shape[1].value
  input_channels = w.shape[2].value
  output_channels = w.shape[3].value

  # Temporarily move the input_channels dimension to output_channels.
  w = tf.reshape(w, shape=(kernel_height, kernel_width, 1,
                           input_channels * output_channels))
  # Apply the convolution to elementary (i.e. one-hot) inputs.
  diagonal_input = tf.reshape(
      tf.eye(input_height * input_width, dtype=w.dtype),
      shape=[input_height * input_width, input_height, input_width, 1])
  conv = tf.nn.convolution(
      diagonal_input, w,
      padding=padding, strides=strides)
  output_height = conv.shape[1].value
  output_width = conv.shape[2].value
  # conv is of shape (input_height * input_width, output_height, output_width,
  #                   input_channels * output_channels).
  # Reshape it to (input_height * input_width * input_channels,
  #                output_height * output_width * output_channels).
  w = tf.reshape(conv, shape=(
      [input_height * input_width,
       output_height, output_width,
       input_channels, output_channels]))
  w = tf.transpose(w, perm=[0, 3, 1, 2, 4])
  w = tf.reshape(w, shape=(
      [input_height * input_width * input_channels,
       output_height * output_width * output_channels]))

  # Broadcast b over spatial dimensions.
  b = tf.tile(b, [output_height * output_width]) if b is not None else None

  return w, b 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:55,代碼來源:layer_utils.py

示例7: _materialise_conv1d

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import eye [as 別名]
def _materialise_conv1d(w, b, input_length, padding, stride):
  """Converts a convolution to an equivalent linear layer.

  Args:
    w: 3D tensor of shape (kernel_length, input_channels,
      output_channels) containing the convolution weights.
    b: 1D tensor of shape (output_channels) containing the convolution biases,
      or `None` if no biases.
    input_length: length of the input tensor.
    padding: `"VALID"` or `"SAME"`, the convolution's padding algorithm.
    stride: Integer stride.

  Returns:
    w: 2D tensor of shape (input_length * input_channels,
      output_length * output_channels) containing weights.
    b: 1D tensor of shape (output_length * output_channels)
      containing biases, or `None` if no biases.
  """
  kernel_length = w.shape[0].value
  input_channels = w.shape[1].value
  output_channels = w.shape[2].value

  # Temporarily move the input_channels dimension to output_channels.
  w = tf.reshape(w, shape=(kernel_length, 1,
                           input_channels * output_channels))
  # Apply the convolution to elementary (i.e. one-hot) inputs.
  diagonal_input = tf.reshape(
      tf.eye(input_length, dtype=w.dtype),
      shape=[input_length, input_length, 1])
  conv = tf.nn.conv1d(
      diagonal_input, w,
      padding=padding, stride=stride)
  output_length = conv.shape[1].value
  # conv is of shape (input_length, output_length,
  #                   input_channels * output_channels).
  # Reshape it to (input_length * input_channels,
  #                output_length * output_channels).
  w = tf.reshape(conv, shape=(
      [input_length,
       output_length,
       input_channels, output_channels]))
  w = tf.transpose(w, perm=[0, 2, 1, 3])
  w = tf.reshape(w, shape=(
      [input_length * input_channels,
       output_length * output_channels]))

  # Broadcast b over spatial dimensions.
  b = tf.tile(b, [output_length]) if b is not None else None

  return w, b 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:52,代碼來源:layer_utils.py


注:本文中的tensorflow.compat.v1.eye方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。