当前位置: 首页>>代码示例>>Python>>正文


Python v1.div方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.div方法的典型用法代码示例。如果您正苦于以下问题:Python v1.div方法的具体用法?Python v1.div怎么用?Python v1.div使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.div方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: drop_connect

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def drop_connect(inputs, is_training, survival_prob):
  """Drop the entire conv with given survival probability."""
  # "Deep Networks with Stochastic Depth", https://arxiv.org/pdf/1603.09382.pdf
  if not is_training:
    return inputs

  # Compute tensor.
  batch_size = tf.shape(inputs)[0]
  random_tensor = survival_prob
  random_tensor += tf.random_uniform([batch_size, 1, 1, 1], dtype=inputs.dtype)
  binary_tensor = tf.floor(random_tensor)
  # Unlike conventional way that multiply survival_prob at test time, here we
  # divide survival_prob at training time, such that no addition compute is
  # needed at test time.
  output = tf.div(inputs, survival_prob) * binary_tensor
  return output 
开发者ID:JunweiLiang,项目名称:Object_Detection_Tracking,代码行数:18,代码来源:utils.py

示例2: call

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def call(self, state):
    """Creates the output tensor/op given the input state tensor.

    See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
    information on this. Note that tf.keras.Model implements `call` which is
    wrapped by `__call__` function by tf.keras.Model.

    Args:
      state: Tensor, input tensor.

    Returns:
      collections.namedtuple, output ops (graph mode) or output tensors (eager).
    """
    net = tf.cast(state, tf.float32)
    net = tf.div(net, 255.)
    net = self.conv1(net)
    net = self.conv2(net)
    net = self.conv3(net)
    net = self.flatten(net)
    net = self.dense1(net)
    net = self.dense2(net)
    unordered_q_heads = tf.reshape(net, [-1, self.num_actions, self.num_heads])
    q_heads, q_values = combine_q_functions(
        unordered_q_heads, self._transform_strategy, **self._kwargs)
    return MultiHeadNetworkType(q_heads, unordered_q_heads, q_values) 
开发者ID:google-research,项目名称:batch_rl,代码行数:27,代码来源:atari_helpers.py

示例3: drop_path

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def drop_path(net, keep_prob, is_training=True):
  """Drops out a whole example hiddenstate with the specified probability."""
  if is_training:
    batch_size = tf.shape(net)[0]
    noise_shape = [batch_size, 1, 1, 1]
    keep_prob = tf.cast(keep_prob, dtype=net.dtype)
    random_tensor = keep_prob
    random_tensor += tf.random_uniform(noise_shape, dtype=net.dtype)
    binary_tensor = tf.floor(random_tensor)
    net = tf.div(net, keep_prob) * binary_tensor
  return net 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:13,代码来源:nasnet_utils.py

示例4: _rowwise_unsorted_segment_sum

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def _rowwise_unsorted_segment_sum(values, indices, n):
  """UnsortedSegmentSum on each row.

  Args:
    values: a `Tensor` with shape `[batch_size, k]`.
    indices: an integer `Tensor` with shape `[batch_size, k]`.
    n: an integer.
  Returns:
    A `Tensor` with the same type as `values` and shape `[batch_size, n]`.
  """
  batch, k = tf.unstack(tf.shape(indices), num=2)
  indices_flat = tf.reshape(indices, [-1]) + tf.div(tf.range(batch * k), k) * n
  ret_flat = tf.unsorted_segment_sum(
      tf.reshape(values, [-1]), indices_flat, batch * n)
  return tf.reshape(ret_flat, [batch, n]) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:17,代码来源:expert_utils.py

示例5: shakeshake

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def shakeshake(xs, equal_grad=False):
  """Multi-argument shake-shake, currently approximated by sums of 2."""
  if len(xs) == 1:
    return xs[0]
  div = (len(xs) + 1) // 2
  arg1 = shakeshake(xs[:div], equal_grad=equal_grad)
  arg2 = shakeshake(xs[div:], equal_grad=equal_grad)
  if equal_grad:
    return shakeshake2_eqgrad(arg1, arg2)
  return shakeshake2(arg1, arg2) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:12,代码来源:common_layers.py

示例6: global_pool_1d

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def global_pool_1d(inputs, pooling_type="MAX", mask=None):
  """Pool elements across the last dimension.

  Useful to convert a list of vectors into a single vector so as
  to get a representation of a set.

  Args:
    inputs: A tensor of shape [batch_size, sequence_length, input_dims]
      containing the sequences of input vectors.
    pooling_type: the pooling type to use, MAX or AVR
    mask: A tensor of shape [batch_size, sequence_length] containing a
      mask for the inputs with 1's for existing elements, and 0's elsewhere.

  Returns:
    A tensor of shape [batch_size, input_dims] containing the sequences of
    transformed vectors.
  """
  with tf.name_scope("global_pool", values=[inputs]):
    if mask is not None:
      mask = tf.expand_dims(mask, axis=2)
      inputs = tf.multiply(inputs, mask)

    if pooling_type == "MAX":
      # A tf.pool can be used here, but reduce is cleaner
      output = tf.reduce_max(inputs, axis=1)
    elif pooling_type == "AVR":
      if mask is not None:
        # Some elems are dummy elems so we can't just reduce the average.
        output = tf.reduce_sum(inputs, axis=1)
        num_elems = tf.reduce_sum(mask, axis=1, keepdims=True)
        output = tf.div(output, tf.maximum(num_elems, 1))
      else:
        output = tf.reduce_mean(inputs, axis=1)

  return output 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:37,代码来源:common_layers.py

示例7: approximate_split

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def approximate_split(x, num_splits, axis=0):
  """Split approximately equally into num_splits parts.

  Args:
    x: a Tensor
    num_splits: an integer
    axis: an integer.

  Returns:
    a list of num_splits Tensors.
  """
  size = shape_list(x)[axis]
  size_splits = [tf.div(size + i, num_splits) for i in range(num_splits)]
  return tf.split(x, size_splits, axis=axis) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:16,代码来源:common_layers.py

示例8: instance_norm

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def instance_norm(x):
  """Instance normalization layer."""
  with tf.variable_scope("instance_norm"):
    epsilon = 1e-5
    mean, var = tf.nn.moments(x, [1, 2], keep_dims=True)
    scale = tf.get_variable(
        "scale", [x.get_shape()[-1]],
        initializer=tf.truncated_normal_initializer(mean=1.0, stddev=0.02))
    offset = tf.get_variable(
        "offset", [x.get_shape()[-1]], initializer=tf.constant_initializer(0.0))
    out = scale * tf.div(x - mean, tf.sqrt(var + epsilon)) + offset

    return out 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:15,代码来源:common_layers.py

示例9: compute_area_features

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def compute_area_features(features, max_area_width, max_area_height=1, height=1,
                          epsilon=1e-6):
  """Computes features for each area.

  Args:
    features: a Tensor in a shape of [batch_size, height * width, depth].
    max_area_width: the max width allowed for an area.
    max_area_height: the max height allowed for an area.
    height: the height of the image.
    epsilon: the epsilon added to the variance for computing standard deviation.
  Returns:
    area_mean: A Tensor of shape [batch_size, num_areas, depth]
    area_std: A Tensor of shape [batch_size, num_areas, depth]
    area_sum: A Tensor of shape [batch_size, num_areas, depth]
    area_heights: A Tensor of shape [batch_size, num_areas, 1]
    area_widths: A Tensor of shape [batch_size, num_areas, 1]
  """
  with tf.name_scope("compute_area_features"):
    tf.logging.info("area_attention compute_area_features: %d x %d",
                    max_area_height, max_area_width)
    area_sum, area_heights, area_widths = _compute_sum_image(
        features, max_area_width=max_area_width,
        max_area_height=max_area_height, height=height)
    area_squared_sum, _, _ = _compute_sum_image(
        tf.pow(features, 2), max_area_width=max_area_width,
        max_area_height=max_area_height, height=height)
    sizes = tf.multiply(area_heights, area_widths)
    float_area_sizes = tf.to_float(sizes)
    area_mean = tf.div(area_sum, float_area_sizes)
    s2_n = tf.div(area_squared_sum, float_area_sizes)
    area_variance = tf.subtract(s2_n, tf.pow(area_mean, 2))
    area_std = tf.sqrt(tf.abs(area_variance) + epsilon)
    return area_mean, area_std, area_sum, area_heights, area_widths 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:35,代码来源:area_attention.py

示例10: padded_where

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def padded_where(condition, length):
  """TPU friendly version of tf.where(cond) with fixed length and padding.

  This is a wrapper around tf.where(cond) that returns the coordinates of the
  True elements of cond (case where x and y are None). This version, however,
  returns a fixed length tensor of coordinates, determined by `length`.  If the
  number of True elements in `condition` is less than `length`, then the
  returned tensor is right-padded with zeros. Otherwise, the returned tensor is
  truncated to `length` size.

  Args:
    condition: tf.Tensor of type boolean; any shape.
    length: Length of (last dimension of) the returned tensor.

  Returns:
    Two tensors:
    - a tensor of type int32, with same shape as `condition`, representing
      coordinates of the last dimension of `condition` tensor where values are
      True.
    - a mask tensor of type int32 with 1s in valid indices of the first tensor,
      and 0s for padded indices.
  """
  condition_shape = shape(condition)
  n = condition_shape[-1]

  # Build a tensor that counts indices from 0 to length of condition.
  ixs = tf.broadcast_to(tf.range(n, dtype=tf.int32), condition_shape)

  # Build tensor where True condition values get their index value or
  # n (== len(condition)) otherwise.
  ixs = tf.where(condition, ixs, tf.ones_like(condition, dtype=tf.int32) * n)

  # Sort indices (so that indices for False values == n, will be placed last),
  # and get the desired number of entries, truncating by `length`.
  ixs = tf.sort(ixs)[Ellipsis, 0:length]

  # For first tensor, zero-out values == n. For second tensor, put 1s where
  # values are < n, and 0s where values are == 0.
  return tf.mod(ixs, n), (1 - tf.div(ixs, n)) 
开发者ID:google-research,项目名称:language,代码行数:41,代码来源:tensor_utils.py

示例11: pixel_wise_softmax

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def pixel_wise_softmax(output, name='pixel_wise_softmax'):
    """Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1.
    Usually be used for image segmentation.

    Parameters
    ------------
    output : tensor
        - For 2d image, 4D tensor [batch_size, height, weight, channel], channel >= 2.
        - For 3d image, 5D tensor [batch_size, depth, height, weight, channel], channel >= 2.

    Examples
    ---------
    >>> outputs = pixel_wise_softmax(network.outputs)
    >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5)

    References
    -----------
    - `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`_
    """
    with tf.name_scope(name) as scope:
        return tf.nn.softmax(output)
        ## old implementation
        # exp_map = tf.exp(output)
        # if output.get_shape().ndims == 4:   # 2d image
        #     evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, True]))
        # elif output.get_shape().ndims == 5: # 3d image
        #     evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, False, True]))
        # else:
        #     raise Exception("output parameters should be 2d or 3d image, not %s" % str(output._shape))
        # return tf.div(exp_map, evidence) 
开发者ID:ravisvi,项目名称:super-resolution-videos,代码行数:32,代码来源:activation.py

示例12: cross_entropy_seq_with_mask

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def cross_entropy_seq_with_mask(logits, target_seqs, input_mask, return_details=False, name=None):
    """Returns the expression of cross-entropy of two sequences, implement
    softmax internally. Normally be used for Dynamic RNN outputs.

    Parameters
    -----------
    logits : network identity outputs
        2D tensor, ``network.outputs``, [batch_size, number of output units].
    target_seqs : int of tensor, like word ID.
        [batch_size, ?]
    input_mask : the mask to compute loss
        The same size with target_seqs, normally 0 and 1.
    return_details : boolean
        - If False (default), only returns the loss.
        - If True, returns the loss, losses, weights and targets (reshape to one vetcor).

    Examples
    --------
    - see Image Captioning Example.
    """
    targets = tf.reshape(target_seqs, [-1])   # to one vector
    weights = tf.to_float(tf.reshape(input_mask, [-1]))   # to one vector like targets
    losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name) * weights
    #losses = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=targets, name=name)) # for TF1.0 and others

    try: ## TF1.0
        loss = tf.divide(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    except: ## TF0.12
        loss = tf.div(tf.reduce_sum(losses),   # loss from mask. reduce_sum before element-wise mul with mask !!
                        tf.reduce_sum(weights),
                        name="seq_loss_with_mask")
    if return_details:
        return loss, losses, weights, targets
    else:
        return loss 
开发者ID:ravisvi,项目名称:super-resolution-videos,代码行数:39,代码来源:cost.py

示例13: _gather_clone_loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def _gather_clone_loss(clone, num_clones, regularization_losses):
  """Gather the loss for a single clone.

  Args:
    clone: A Clone namedtuple.
    num_clones: The number of clones being deployed.
    regularization_losses: Possibly empty list of regularization_losses
      to add to the clone losses.

  Returns:
    A tensor for the total loss for the clone.  Can be None.
  """
  # The return value.
  sum_loss = None
  # Individual components of the loss that will need summaries.
  clone_loss = None
  regularization_loss = None
  # Compute and aggregate losses on the clone device.
  with tf.device(clone.device):
    all_losses = []
    clone_losses = tf.get_collection(tf.GraphKeys.LOSSES, clone.scope)
    if clone_losses:
      clone_loss = tf.add_n(clone_losses, name='clone_loss')
      if num_clones > 1:
        clone_loss = tf.div(clone_loss, 1.0 * num_clones,
                            name='scaled_clone_loss')
      all_losses.append(clone_loss)
    if regularization_losses:
      regularization_loss = tf.add_n(regularization_losses,
                                     name='regularization_loss')
      all_losses.append(regularization_loss)
    if all_losses:
      sum_loss = tf.add_n(all_losses)
  # Add the summaries out of the clone device block.
  if clone_loss is not None:
    tf.summary.scalar('/'.join(filter(None,
                                      ['Losses', clone.scope, 'clone_loss'])),
                      clone_loss)
  if regularization_loss is not None:
    tf.summary.scalar('Losses/regularization_loss', regularization_loss)
  return sum_loss 
开发者ID:tensorflow,项目名称:models,代码行数:43,代码来源:model_deploy.py

示例14: preprocess_image

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def preprocess_image(image,
                     output_height,
                     output_width,
                     is_training,
                     use_grayscale=False):
  """Preprocesses the given image.

  Args:
    image: A `Tensor` representing an image of arbitrary size.
    output_height: The height of the image after preprocessing.
    output_width: The width of the image after preprocessing.
    is_training: `True` if we're preprocessing the image for training and
      `False` otherwise.
    use_grayscale: Whether to convert the image from RGB to grayscale.

  Returns:
    A preprocessed image.
  """
  del is_training  # Unused argument
  image = tf.to_float(image)
  if use_grayscale:
    image = tf.image.rgb_to_grayscale(image)
  image = tf.image.resize_image_with_crop_or_pad(
      image, output_width, output_height)
  image = tf.subtract(image, 128.0)
  image = tf.div(image, 128.0)
  return image 
开发者ID:tensorflow,项目名称:models,代码行数:29,代码来源:lenet_preprocessing.py

示例15: calculate_frame_metrics

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import div [as 别名]
def calculate_frame_metrics(frame_labels, frame_predictions):
  """Calculate frame-based metrics."""
  frame_labels_bool = tf.cast(frame_labels, tf.bool)
  frame_predictions_bool = tf.cast(frame_predictions, tf.bool)

  frame_true_positives = tf.reduce_sum(tf.to_float(tf.logical_and(
      tf.equal(frame_labels_bool, True),
      tf.equal(frame_predictions_bool, True))))
  frame_false_positives = tf.reduce_sum(tf.to_float(tf.logical_and(
      tf.equal(frame_labels_bool, False),
      tf.equal(frame_predictions_bool, True))))
  frame_false_negatives = tf.reduce_sum(tf.to_float(tf.logical_and(
      tf.equal(frame_labels_bool, True),
      tf.equal(frame_predictions_bool, False))))
  frame_accuracy = (
      tf.reduce_sum(
          tf.to_float(tf.equal(frame_labels_bool, frame_predictions_bool))) /
      tf.cast(tf.size(frame_labels), tf.float32))

  frame_precision = tf.where(
      tf.greater(frame_true_positives + frame_false_positives, 0),
      tf.div(frame_true_positives,
             frame_true_positives + frame_false_positives),
      0)
  frame_recall = tf.where(
      tf.greater(frame_true_positives + frame_false_negatives, 0),
      tf.div(frame_true_positives,
             frame_true_positives + frame_false_negatives),
      0)
  frame_f1_score = f1_score(frame_precision, frame_recall)
  frame_accuracy_without_true_negatives = accuracy_without_true_negatives(
      frame_true_positives, frame_false_positives, frame_false_negatives)

  return {
      'true_positives': [frame_true_positives],
      'false_positives': [frame_false_positives],
      'false_negatives': [frame_false_negatives],
      'accuracy': [frame_accuracy],
      'accuracy_without_true_negatives': [
          frame_accuracy_without_true_negatives],
      'precision': [frame_precision],
      'recall': [frame_recall],
      'f1_score': [frame_f1_score],
  } 
开发者ID:magenta,项目名称:magenta,代码行数:46,代码来源:metrics.py


注:本文中的tensorflow.compat.v1.div方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。