当前位置: 首页>>代码示例>>Python>>正文


Python v1.add_n方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.add_n方法的典型用法代码示例。如果您正苦于以下问题:Python v1.add_n方法的具体用法?Python v1.add_n怎么用?Python v1.add_n使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.add_n方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: loss_function

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def loss_function(self, inputs, build_network_result):
    """Returns the op to measure the loss of the model."""
    logits = build_network_result.logits
    _, labels = inputs
    # TODO(laigd): consider putting the aux logit in the Inception model,
    # which could call super.loss_function twice, once with the normal logits
    # and once with the aux logits.
    aux_logits = build_network_result.extra_info
    with tf.name_scope('xentropy'):
      mlperf.logger.log(key=mlperf.tags.MODEL_HP_LOSS_FN, value=mlperf.tags.CCE)
      cross_entropy = tf.losses.sparse_softmax_cross_entropy(
          logits=logits, labels=labels)
      loss = tf.reduce_mean(cross_entropy, name='xentropy_mean')
    if aux_logits is not None:
      with tf.name_scope('aux_xentropy'):
        aux_cross_entropy = tf.losses.sparse_softmax_cross_entropy(
            logits=aux_logits, labels=labels)
        aux_loss = 0.4 * tf.reduce_mean(aux_cross_entropy, name='aux_loss')
        loss = tf.add_n([loss, aux_loss])
    return loss 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:22,代码来源:model.py

示例2: weight_decay

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def weight_decay(decay_rate, var_list, skip_biases=True):
  """Apply weight decay to vars in var_list."""
  if not decay_rate:
    return 0.

  tf.logging.info("Applying weight decay, decay_rate: %0.5f", decay_rate)

  weight_decays = []
  for v in var_list:
    # Weight decay.
    # This is a heuristic way to detect biases that works for main tf.layers.
    is_bias = len(v.shape.as_list()) == 1 and v.name.endswith("bias:0")
    if not (skip_biases and is_bias):
      with tf.device(v.device):
        v_loss = tf.nn.l2_loss(v)
      weight_decays.append(v_loss)

  return tf.add_n(weight_decays) * decay_rate 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:20,代码来源:optimize.py

示例3: average_sharded_losses

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def average_sharded_losses(sharded_losses):
  """Average losses across datashards.

  Args:
    sharded_losses: list<dict<str loss_name, Tensor loss>>. The loss
      can be a single Tensor or a 2-tuple (numerator and denominator).

  Returns:
    losses: dict<str loss_name, Tensor avg_loss>
  """
  losses = {}
  for loss_name in sorted(sharded_losses[0]):
    all_shards = [shard_losses[loss_name] for shard_losses in sharded_losses]
    if isinstance(all_shards[0], tuple):
      sharded_num, sharded_den = zip(*all_shards)
      mean_loss = (
          tf.add_n(sharded_num) / tf.maximum(
              tf.cast(1.0, sharded_den[0].dtype), tf.add_n(sharded_den)))
    else:
      mean_loss = tf.reduce_mean(all_shards)

    losses[loss_name] = mean_loss
  return losses 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:25,代码来源:t2t_model.py

示例4: _grad_sparsity

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def _grad_sparsity(self):
    """Gradient sparsity."""
    # If the sparse minibatch gradient has 10 percent of its entries
    # non-zero, its sparsity is 0.1.
    # The norm of dense gradient averaged from full dataset
    # are roughly estimated norm of minibatch
    # sparse gradient norm * sqrt(sparsity)
    # An extension maybe only correct the sparse blob.
    non_zero_cnt = tf.add_n([tf.count_nonzero(g) for g in self._grad])
    all_entry_cnt = tf.add_n([tf.size(g) for g in self._grad])
    self._sparsity = tf.cast(non_zero_cnt, self._grad[0].dtype)
    self._sparsity /= tf.cast(all_entry_cnt, self._grad[0].dtype)
    avg_op = self._moving_averager.apply([self._sparsity,])
    with tf.control_dependencies([avg_op]):
      self._sparsity_avg = self._moving_averager.average(self._sparsity)
    return avg_op 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:18,代码来源:yellowfin.py

示例5: testAddN

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def testAddN(self):
    inputs = tf.zeros([2, 4, 4, 3])
    identity1 = tf.identity(inputs)
    identity2 = tf.identity(inputs)
    identity3 = tf.identity(inputs)
    identity4 = tf.identity(inputs)
    add_n = tf.add_n([identity1, identity2, identity3, identity4])
    batch_norm = layers.batch_norm(add_n)

    manager = orm.OpRegularizerManager(
        [batch_norm.op], op_handler_dict=self._default_op_handler_dict)

    op_slices = manager.get_op_slices(identity1.op)
    self.assertLen(op_slices, 1)
    op_group = manager.get_op_group(op_slices[0]).op_slices

    # Verify all ops are in the same group.
    for test_op in (identity1.op, identity2.op, identity3.op, identity4.op,
                    add_n.op, batch_norm.op):
      test_op_slices = manager.get_op_slices(test_op)
      self.assertLen(test_op_slices, 1)
      self.assertIn(test_op_slices[0], op_group) 
开发者ID:google-research,项目名称:morph-net,代码行数:24,代码来源:op_regularizer_manager_test.py

示例6: testAddN_Duplicates

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def testAddN_Duplicates(self):
    inputs = tf.zeros([2, 4, 4, 3])
    identity = tf.identity(inputs)
    add_n = tf.add_n([identity, identity, identity, identity])
    batch_norm = layers.batch_norm(add_n)

    manager = orm.OpRegularizerManager(
        [batch_norm.op], op_handler_dict=self._default_op_handler_dict)

    op_slices = manager.get_op_slices(identity.op)
    self.assertLen(op_slices, 1)
    op_group = manager.get_op_group(op_slices[0]).op_slices

    # Verify all ops are in the same group.
    for test_op in (identity.op, add_n.op, batch_norm.op):
      test_op_slices = manager.get_op_slices(test_op)
      self.assertLen(test_op_slices, 1)
      self.assertIn(test_op_slices[0], op_group) 
开发者ID:google-research,项目名称:morph-net,代码行数:20,代码来源:op_regularizer_manager_test.py

示例7: testCorrectSourceOpsWithSkipConnection

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def testCorrectSourceOpsWithSkipConnection(self):
    inputs = tf.zeros([2, 4, 4, 3])
    x0 = layers.conv2d(
        inputs, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv0')
    x1 = tf.nn.relu(layers.batch_norm(x0, scale=True, scope='bn0'))
    x1 = layers.conv2d(
        x1, num_outputs=8, kernel_size=3, activation_fn=None, scope='conv1')
    x2 = tf.add_n([x0, x1], name='add')
    final_op = tf.nn.relu(layers.batch_norm(x2, scale=True, scope='bn1'))

    op_handler_dict = self._default_op_handler_dict
    op_reg_manager = orm.OpRegularizerManager([final_op.op], op_handler_dict)

    # All ops are in the same group
    group = list(op_reg_manager._op_group_dict.values())[0]
    source_op_names = [s.op.name for s in group.source_op_slices]
    self.assertSetEqual(set(['bn0/FusedBatchNormV3', 'bn1/FusedBatchNormV3']),
                        set(source_op_names)) 
开发者ID:google-research,项目名称:morph-net,代码行数:20,代码来源:op_regularizer_manager_test.py

示例8: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def __init__(self, regularizers_to_group):
    """Creates an instance.

    Args:
      regularizers_to_group: A list of generic_regularizers.OpRegularizer
        objects.Their regularization_vector (alive_vector) are expected to be of
        the same length.

    Raises:
      ValueError: regularizers_to_group is not of length at least 2.
    """
    if len(regularizers_to_group) < 2:
      raise ValueError('Groups must be of at least size 2.')
    self._regularization_vector = tf.add_n(
        [r.regularization_vector for r in regularizers_to_group])
    self._alive_vector = tf.cast(
        tf.ones(self._regularization_vector.get_shape()[-1]), tf.bool) 
开发者ID:google-research,项目名称:morph-net,代码行数:19,代码来源:op_regularizer_manager_test.py

示例9: add_context

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def add_context(net, context):
  """Merges visual perception with context using elementwise addition.

  Actions are reshaped to match net dimension depth-wise, and are added to
  the conv layers by broadcasting element-wise across H, W extent.

  Args:
    net: Tensor of shape [batch_size, H, W, C].
    context: Tensor of shape [batch_size * num_examples, C].
  Returns:
    Tensor with shape [batch_size * num_examples, H, W, C]
  """
  num_batch_net = tf.shape(net)[0]
  _, h, w, d1 = net.get_shape().as_list()
  _, d2 = context.get_shape().as_list()
  assert d1 == d2
  context = tf.reshape(context, [num_batch_net, -1, d2])
  net_examples = tile_to_match_context(net, context)
  # Flatten first two dimensions.
  net = tf.reshape(net_examples, [-1, h, w, d1])
  context = tf.reshape(context, [-1, 1, 1, d2])
  context = tf.tile(context, [1, h, w, 1])
  net = tf.add_n([net, context])
  return net 
开发者ID:google-research,项目名称:tensor2robot,代码行数:26,代码来源:tf_modules.py

示例10: _all_reduce_using_copy

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def _all_reduce_using_copy(tensors_across_devices, use_mean):
  """Does an all-reduce of a list of tensors by copying to the current device.

  The tensors are copied to the current device and then reduced.

  Args:
    tensors_across_devices: A list of tensors, each on a different device.
    use_mean: Whether to take the mean of the tensors instead of a sum:
  Returns:
    A reduced tensor on the current device.
  """
  reduced_tensor = tf.add_n(tensors_across_devices)
  if use_mean:
    reduced_tensor *= 1 / len(tensors_across_devices)
  return reduced_tensor 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:17,代码来源:batch_allreduce.py

示例11: aggregate_indexed_slices_gradients

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def aggregate_indexed_slices_gradients(grads):
  """Aggregates gradients containing `IndexedSlices`s."""
  if len(grads) < 1:
    return None
  elif len(grads) == 1:
    return grads[0]
  else:
    grads = [g for g in grads if g is not None]
    # If any gradient is a `Tensor`, sum them up and return a dense tensor
    # object.
    if any(isinstance(g, ops.Tensor) for g in grads):
      return math_ops.add_n(grads)

    # The following `_as_indexed_slices_list` casts ids of IndexedSlices into
    # int64. It is to make sure the inputs of `concat` all have same the data
    # type.
    grads = math_ops._as_indexed_slices_list(grads)  # pylint: disable=protected-access

    grads = [flatten_nested_indexed_slices(x) for x in grads]
    # Form IndexedSlices out of the concatenated values and indices.
    concat_grad = ops.IndexedSlices(
        array_ops.concat([x.values for x in grads], axis=0),
        array_ops.concat([x.indices for x in grads], axis=0),
        grads[0].dense_shape)

    return concat_grad 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:28,代码来源:variable_mgr_util.py

示例12: aggregate_single_gradient_using_copy

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
                                         check_inf_nan):
  """Calculate the average gradient for a shared variable across all towers.

  Note that this function provides a synchronization point across all towers.

  Args:
    grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
      (gradient, variable) pair within the outer list represents the gradient
      of the variable calculated for a single tower, and the number of pairs
      equals the number of towers.
    use_mean: if True, mean is taken, else sum of gradients is taken.
    check_inf_nan: check grads for nans and infs.

  Returns:
    The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
      gradient has been averaged across all towers. The variable is chosen from
      the first tower. The has_nan_or_inf indicates the grads has nan or inf.
  """
  grads = [g for g, _ in grad_and_vars]
  if any(isinstance(g, tf.IndexedSlices) for g in grads):
    # TODO(reedwm): All-reduce IndexedSlices more effectively.
    grad = aggregate_indexed_slices_gradients(grads)
  else:
    grad = tf.add_n(grads)

  if use_mean and len(grads) > 1:
    grad = tf.scalar_mul(1.0 / len(grads), grad)

  v = grad_and_vars[0][1]
  if check_inf_nan:
    with tf.name_scope('check_for_inf_and_nan'):
      has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
    return (grad, v), has_nan_or_inf
  else:
    return (grad, v), None


# This class is copied from
# https://github.com/tensorflow/tensorflow/blob/590d6eef7e91a6a7392c8ffffb7b58f2e0c8bc6b/tensorflow/contrib/training/python/training/device_setter.py#L56.
# We copy it since contrib has been removed from TensorFlow. 
开发者ID:tensorflow,项目名称:benchmarks,代码行数:43,代码来源:variable_mgr_util.py

示例13: reduce_by_device

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def reduce_by_device(parallelism, data, reduce_fn):
  """Reduces data per device.

  This can be useful, for example, if we want to all-reduce n tensors on k<n
  devices (like during eval when we have only one device).  We call
  reduce_by_device() to first sum the tensors per device, then call our usual
  all-reduce operation to create one sum per device, followed by
  expand_by_device, to create the appropriate number of pointers to these
  results.  See all_reduce_ring() below for an example of how this is used.

  Args:
    parallelism: a expert_utils.Parallelism object
    data: a list of Tensors with length parallelism.n
    reduce_fn: a function taking a list of Tensors.  e.g. tf.add_n

  Returns:
    device_parallelism: a Parallelism object with each device listed only once.
    reduced_data: A list of Tensors, one per device.
  """
  unique_devices = []
  device_to_data = {}
  for dev, datum in zip(parallelism.devices, data):
    if dev not in device_to_data:
      unique_devices.append(dev)
      device_to_data[dev] = [datum]
    else:
      device_to_data[dev].append(datum)
  device_parallelism = Parallelism(unique_devices)
  grouped_data = [device_to_data[dev] for dev in unique_devices]
  return device_parallelism, device_parallelism(reduce_fn, grouped_data) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:32,代码来源:expert_utils.py

示例14: loss

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def loss(self, logits, features):
    if isinstance(logits, dict):
      losses = {}
      for k, v in six.iteritems(logits):
        losses[k] = self._loss_single(
            v,
            k,
            features[k],
            weights=features.get(k + "_mask"))

        n, d = losses[k]
        if common_layers.should_generate_summaries():
          tf.summary.scalar(k + "_loss", n / d)
          tf.summary.scalar(k + "_loss_num", n)
          tf.summary.scalar(k + "_loss_den", d)
          if getattr(self.hparams, "visualize_logits_histogram", False):
            hist = tf.summary.histogram
            hist(k + "_predict", tf.argmax(tf.squeeze(v), axis=-1))
            hist(k + "_targets", features[k])

      return tf.add_n([n / d for n, d in losses.values()])
    else:
      return self._loss_single(
          logits,
          "targets",
          features["targets"],
          weights=features.get("targets_mask")) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:29,代码来源:t2t_model.py

示例15: _normalize_body_output

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import add_n [as 别名]
def _normalize_body_output(self, body_out):
    if isinstance(body_out, tuple):
      output, losses = body_out
      if isinstance(losses, (list, tuple)):
        losses = {"extra": tf.add_n([tf.reduce_mean(l) for l in losses])}
      elif isinstance(losses, dict):
        pass
      else:
        losses = {"extra": tf.reduce_mean(losses)}
    else:
      output = body_out
      losses = {"extra": 0.0}

    return output, losses 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:16,代码来源:t2t_model.py


注:本文中的tensorflow.compat.v1.add_n方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。