当前位置: 首页>>代码示例>>Python>>正文


Python v1.divide方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.divide方法的典型用法代码示例。如果您正苦于以下问题:Python v1.divide方法的具体用法?Python v1.divide怎么用?Python v1.divide使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.divide方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: normalize_image

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def normalize_image(images):
  """Normalize image to zero mean and unit variance.

  Args:
    images: a tensor representing images, at least 3-D.
  Returns:
    images normalized by mean and stdev.
  """
  data_type = images.dtype
  mean = tf.constant(ssd_constants.NORMALIZATION_MEAN, data_type)
  std = tf.constant(ssd_constants.NORMALIZATION_STD, data_type)
  images = tf.divide(tf.subtract(images, mean), std)

  mlperf.logger.log(key=mlperf.tags.DATA_NORMALIZATION_MEAN,
                    value=ssd_constants.NORMALIZATION_MEAN)
  mlperf.logger.log(key=mlperf.tags.DATA_NORMALIZATION_STD,
                    value=ssd_constants.NORMALIZATION_STD)
  return images 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:20,代码来源:ssd_dataloader.py

示例2: f1_metric

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def f1_metric(precision, precision_op, recall, recall_op):
  """Computes F1 based on precision and recall.

  Args:
    precision: <float> [batch_size]
    precision_op: Update op for precision.
    recall: <float> [batch_size]
    recall_op: Update op for recall.

  Returns:
    tensor and update op for F1.
  """
  f1_op = tf.group(precision_op, recall_op)
  numerator = 2 * tf.multiply(precision, recall)
  denominator = tf.add(precision, recall)
  f1 = tf.divide(numerator, denominator)

  # <float> [batch_size]
  zero_vec = tf.zeros_like(f1)
  is_valid = tf.greater(denominator, zero_vec)
  f1 = tf.where(is_valid, x=f1, y=zero_vec)

  return f1, f1_op 
开发者ID:google-research,项目名称:language,代码行数:25,代码来源:nq_long_utils.py

示例3: _compute_loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def _compute_loss(self, prediction_tensor, target_tensor, weights):
    """Compute loss function.

    Args:
      prediction_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing the predicted logits for each class
      target_tensor: A float tensor of shape [batch_size, num_anchors,
        num_classes] representing one-hot encoded classification targets
      weights: a float tensor of shape, either [batch_size, num_anchors,
        num_classes] or [batch_size, num_anchors, 1]. If the shape is
        [batch_size, num_anchors, 1], all the classses are equally weighted.

    Returns:
      loss: a float tensor of shape [batch_size, num_anchors]
        representing the value of the loss function.
    """
    weights = tf.reduce_mean(weights, axis=2)
    num_classes = prediction_tensor.get_shape().as_list()[-1]
    prediction_tensor = tf.divide(
        prediction_tensor, self._logit_scale, name='scale_logit')
    per_row_cross_ent = (tf.nn.softmax_cross_entropy_with_logits(
        labels=tf.reshape(target_tensor, [-1, num_classes]),
        logits=tf.reshape(prediction_tensor, [-1, num_classes])))
    return tf.reshape(per_row_cross_ent, tf.shape(weights)) * weights 
开发者ID:tensorflow,项目名称:models,代码行数:26,代码来源:losses.py

示例4: apply_spectral_norm

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def apply_spectral_norm(x):
  """Normalizes x using the spectral norm.

  The implementation follows Algorithm 1 of
  https://arxiv.org/abs/1802.05957. If x is not a 2-D Tensor, then it is
  reshaped such that the number of channels (last-dimension) is the same.

  Args:
    x: Tensor with the last dimension equal to the number of filters.

  Returns:
    x: Tensor with the same shape as x normalized by the spectral norm.
    assign_op: Op to be run after every step to update the vector "u".
  """
  weights_shape = shape_list(x)
  other, num_filters = tf.reduce_prod(weights_shape[:-1]), weights_shape[-1]

  # Reshape into a 2-D matrix with outer size num_filters.
  weights_2d = tf.reshape(x, (other, num_filters))

  # v = Wu / ||W u||
  with tf.variable_scope("u", reuse=tf.AUTO_REUSE):
    u = tf.get_variable(
        "u", [num_filters, 1],
        initializer=tf.truncated_normal_initializer(),
        trainable=False)
  v = tf.nn.l2_normalize(tf.matmul(weights_2d, u))

  # u_new = vW / ||v W||
  u_new = tf.nn.l2_normalize(tf.matmul(tf.transpose(v), weights_2d))

  # s = v*W*u
  spectral_norm = tf.squeeze(
      tf.matmul(tf.transpose(v), tf.matmul(weights_2d, tf.transpose(u_new))))

  # set u equal to u_new in the next iteration.
  assign_op = tf.assign(u, tf.transpose(u_new))
  return tf.divide(x, spectral_norm), assign_op 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:40,代码来源:common_layers.py

示例5: compute_nats_and_bits_per_dim

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def compute_nats_and_bits_per_dim(data_dim,
                                  latent_dim,
                                  average_reconstruction,
                                  average_prior):
  """Computes negative ELBO, which is an upper bound on the negative likelihood.

  Args:
    data_dim: int-like indicating data dimensionality.
    latent_dim: int-like indicating latent dimensionality.
    average_reconstruction: Scalar Tensor indicating the reconstruction cost
      averaged over all data dimensions and any data batches.
    average_prior: Scalar Tensor indicating the negative log-prior probability
      averaged over all latent dimensions and any data batches.

  Returns:
    Tuple of scalar Tensors, representing the nats and bits per data dimension
    (e.g., subpixels) respectively.
  """
  with tf.name_scope(None, default_name="compute_nats_per_dim"):
    data_dim = tf.cast(data_dim, average_reconstruction.dtype)
    latent_dim = tf.cast(latent_dim, average_prior.dtype)
    negative_log_likelihood = data_dim * average_reconstruction
    negative_log_prior = latent_dim * average_prior
    negative_elbo = negative_log_likelihood + negative_log_prior
    nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim")
    bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim")
    return nats_per_dim, bits_per_dim 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:29,代码来源:latent_layers.py

示例6: toy_model

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def toy_model(features, mesh):
  """A toy model implemented by mesh tensorlfow."""
  batch_dim = mtf.Dimension('batch', FLAGS.batch_size)
  io_dim = mtf.Dimension('io', FLAGS.io_size)

  master_dtype = tf.as_dtype(FLAGS.master_dtype)
  slice_dtype = tf.as_dtype(FLAGS.slice_dtype)
  activation_dtype = tf.as_dtype(FLAGS.activation_dtype)

  x = mtf.import_tf_tensor(mesh, features, mtf.Shape([batch_dim, io_dim]))
  x = mtf.cast(x, activation_dtype)
  h = x
  for lnum in range(1, FLAGS.num_hidden_layers + 2):
    if lnum + 1 == FLAGS.num_hidden_layers + 2:
      # output layer
      dim = io_dim
    elif lnum % 2 == 0:
      dim = mtf.Dimension('hidden_even', FLAGS.hidden_size)
    else:
      dim = mtf.Dimension('hidden_odd', FLAGS.hidden_size)
    h = mtf.layers.dense(
        h, dim,
        use_bias=False,
        master_dtype=master_dtype,
        slice_dtype=slice_dtype,
        name='layer_%d' % lnum)
  y = h
  g = tf.train.get_global_step()
  if FLAGS.step_with_nan >= 0:
    # Trigger NaN in the forward pass, this is used for testing whether
    # MeshTensorFlow can handle occasional NaN value.
    y += mtf.import_tf_tensor(
        mesh,
        tf.divide(
            0.0,
            tf.cond(tf.equal(g, FLAGS.step_with_nan), lambda: 0., lambda: 1.)),
        mtf.Shape([]))

  loss = mtf.reduce_mean(mtf.square(y - x))
  return y, loss 
开发者ID:tensorflow,项目名称:mesh,代码行数:42,代码来源:toy_model_tpu.py

示例7: cross_entropy_seq_with_mask

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
    """Returns the expression of cross-entropy of two sequences, implement
    softmax internally. Normally be used for Dynamic RNN outputs.

    Parameters
    -----------
    logits : network identity outputs
        2D tensor, ``network.outputs``, [batch_size, number of output units].
    target_seqs : int of tensor, like word ID.
        [batch_size, ?]
    input_mask : the mask to compute loss
        The same size with target_seqs, normally 0 and 1.
    return_details : boolean
        - If False (default), only returns the loss.
        - If True, returns the loss, losses, weights and targets (reshape to one vetcor).

    Examples
    --------
    - see Image Captioning Example.
    """
    targets = tf.reshape(target_seqs, [-1])   # to one vector
    weights = tf.to_float(tf.reshape(input_mask, [-1]))   # to one vector like targets
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
    #losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others

    try: ## TF1.0
        loss = tf.divide(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    except: ## TF0.12
        loss = tf.div(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    if return_details:
        return loss, losses, weights, targets
    else:
        return loss 
开发者ID:ravisvi,项目名称:super-resolution-videos,代码行数:39,代码来源:cost.py

示例8: norm_boxes_graph

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def norm_boxes_graph(boxes, shape):
    """Converts boxes from pixel coordinates to normalized coordinates.
    boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
    shape: [..., (height, width)] in pixels

    Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
    coordinates it's inside the box.

    Returns:
        [..., (y1, x1, y2, x2)] in normalized coordinates
    """
    h, w = tf.split(tf.cast(shape, tf.float32), 2)
    scale = tf.concat([h, w, h, w], axis=-1) - tf.constant(1.0)
    shift = tf.constant([0., 0., 1., 1.])
    return tf.divide(boxes - shift, scale) 
开发者ID:OCR-D,项目名称:ocrd_anybaseocr,代码行数:17,代码来源:model.py

示例9: _scale_and_softmax_logits

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import divide [as 别名]
def _scale_and_softmax_logits(self, logits):
    """Scale logits then apply softmax."""
    scaled_logits = tf.divide(logits, self._logit_scale, name='scale_logits')
    return tf.nn.softmax(scaled_logits, name='convert_scores') 
开发者ID:tensorflow,项目名称:models,代码行数:6,代码来源:losses.py


注:本文中的tensorflow.compat.v1.divide方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。