当前位置: 首页>>代码示例>>Python>>正文


Python nn_ops.sparse_softmax_cross_entropy_with_logits方法代码示例

本文整理汇总了Python中tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits方法的典型用法代码示例。如果您正苦于以下问题:Python nn_ops.sparse_softmax_cross_entropy_with_logits方法的具体用法?Python nn_ops.sparse_softmax_cross_entropy_with_logits怎么用?Python nn_ops.sparse_softmax_cross_entropy_with_logits使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.nn_ops的用法示例。


在下文中一共展示了nn_ops.sparse_softmax_cross_entropy_with_logits方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: loss

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              labels=array_ops.squeeze(math_ops.to_int32(labels)),
              logits=self.training_inference_graph(data)),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:hybrid_model.py

示例2: test_binary_ops

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def test_binary_ops(self):
    ops = [
        ('sigmoid_cross_entropy_with_logits',
         nn_impl.sigmoid_cross_entropy_with_logits,
         nn.sigmoid_cross_entropy_with_logits),
        ('softmax_cross_entropy_with_logits',
         nn_ops.softmax_cross_entropy_with_logits,
         nn.softmax_cross_entropy_with_logits),
        ('sparse_softmax_cross_entropy_with_logits',
         nn_ops.sparse_softmax_cross_entropy_with_logits,
         nn.sparse_softmax_cross_entropy_with_logits),
    ]
    for op_name, tf_op, lt_op in ops:
      golden_tensor = tf_op(self.original_lt.tensor, self.other_lt.tensor)
      golden_lt = core.LabeledTensor(golden_tensor, self.axes)
      actual_lt = lt_op(self.original_lt, self.other_lt)
      self.assertIn(op_name, actual_lt.name)
      self.assertLabeledTensorsEqual(golden_lt, actual_lt) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:20,代码来源:nn_test.py

示例3: loss

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def loss(self, data, labels):
    """The loss to minimize while training."""

    if self.is_regression:
      diff = self.training_inference_graph(data) - math_ops.to_float(labels)
      mean_squared_error = math_ops.reduce_mean(diff * diff)
      root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
      loss = root_mean_squared_error
    else:
      loss = math_ops.reduce_mean(
          nn_ops.sparse_softmax_cross_entropy_with_logits(
              self.training_inference_graph(data),
              array_ops.squeeze(math_ops.to_int32(labels))),
          name="loss")
    if self.regularizer:
      loss += layers.apply_regularization(self.regularizer,
                                          variables.trainable_variables())
    return loss 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:20,代码来源:hybrid_model.py

示例4: _log_prob

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def _log_prob(self, k):
    k = ops.convert_to_tensor(k, name="k")
    if self.logits.get_shape()[:-1] == k.get_shape():
      logits = self.logits
    else:
      logits = self.logits * array_ops.ones_like(
          array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
      logits_shape = array_ops.shape(logits)[:-1]
      k *= array_ops.ones(logits_shape, dtype=k.dtype)
      k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
    return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
                                                            logits=logits) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:14,代码来源:categorical.py

示例5: _log_prob

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def _log_prob(self, k):
    k = ops.convert_to_tensor(k, name="k")
    if self.logits.get_shape()[:-1] == k.get_shape():
      logits = self.logits
    else:
      logits = self.logits * array_ops.ones_like(
          array_ops.expand_dims(k, -1), dtype=self.logits.dtype)
      logits_shape = array_ops.shape(logits)[:-1]
      k *= array_ops.ones(logits_shape, dtype=k.dtype)
      k.set_shape(tensor_shape.TensorShape(logits.get_shape()[:-1]))
    return -nn_ops.sparse_softmax_cross_entropy_with_logits(logits, k) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:13,代码来源:categorical.py

示例6: sequence_loss_by_example

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.name_scope(name, "sequence_loss_by_example",
                      logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(target, logit)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps 
开发者ID:andi611,项目名称:Conditional-SeqGAN-Tensorflow,代码行数:28,代码来源:tf_seq2seq_model.py

示例7: sequence_loss_by_example

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  """Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: Optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: The log-perplexity for each sequence.

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.op_scope(logits + targets + weights, name,
                    "sequence_loss_by_example"):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps 
开发者ID:pbhatia243,项目名称:Neural_Conversation_Models,代码行数:43,代码来源:my_seq2seq.py

示例8: _log_prob

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def _log_prob(self, k):
    k = ops.convert_to_tensor(k, name="k")
    if self.validate_args:
      k = distribution_util.embed_check_integer_casting_closed(
          k, target_dtype=dtypes.int32)
    k, logits = _broadcast_cat_event_and_params(
        k, self.logits, base_dtype=self.dtype.base_dtype)

    return -nn_ops.sparse_softmax_cross_entropy_with_logits(labels=k,
                                                            logits=logits) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:12,代码来源:categorical.py

示例9: sequence_loss_by_example

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def sequence_loss_by_example(logits,
                             targets,
                             weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None,
                             name=None):
  """Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: Function (labels, logits) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
      **Note that to avoid confusion, it is required for the function to accept
      named arguments.**
    name: Optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: The log-perplexity for each sequence.

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.name_scope(name, "sequence_loss_by_example",
                      logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            labels=target, logits=logit)
      else:
        crossent = softmax_loss_function(labels=target, logits=logit)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:51,代码来源:seq2seq.py

示例10: sequence_loss_by_example

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def sequence_loss_by_example(logits,
                             targets,
                             weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None,
                             name=None):
  """Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: Function (labels-batch, inputs-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: Optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: The log-perplexity for each sequence.

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.name_scope(name, "sequence_loss_by_example",
                      logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            labels=target, logits=logit)
      else:
        crossent = softmax_loss_function(target, logit)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:49,代码来源:seq2seq.py

示例11: sequence_loss_by_example

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
    """Weighted cross-entropy loss for a sequence of logits (per example).

    Args:
      logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
      targets: List of 1D batch-sized int32 Tensors of the same length as logits.
      weights: List of 1D batch-sized float-Tensors of the same length as logits.
      average_across_timesteps: If set, divide the returned cost by the total
        label weight.
      softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
        to be used instead of the standard softmax (the default if this is None).
      name: Optional name for this operation, default: "sequence_loss_by_example".

    Returns:
      1D batch-sized float Tensor: The log-perplexity for each sequence.

    Raises:
      ValueError: If len(logits) is different from len(targets) or len(weights).
    """
    if len(targets) != len(logits) or len(weights) != len(logits):
        raise ValueError("Lengths of logits, weights, and targets must be the same "
                         "%d, %d, %d." % (len(logits), len(weights), len(targets)))
    with ops.name_scope(name, "sequence_loss_by_example",
                        logits + targets + weights):
        log_perp_list = []
        for logit, target, weight in zip(logits, targets, weights):
            if softmax_loss_function is None:
                # TODO(irving,ebrevdo): This reshape is needed because
                # sequence_loss_by_example is called with scalars sometimes, which
                # violates our general scalar strictness policy.
                target = array_ops.reshape(target, [-1])
                crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
                    logit, target)
            else:
                crossent = softmax_loss_function(logit, target)
            log_perp_list.append(crossent * weight)
        log_perps = math_ops.add_n(log_perp_list)
        if average_across_timesteps:
            total_size = math_ops.add_n(weights)
            total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
            log_perps /= total_size
    return log_perps 
开发者ID:atpaino,项目名称:deep-text-corrector,代码行数:46,代码来源:seq2seq.py

示例12: sequence_loss_by_example

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  """Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: Optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: The log-perplexity for each sequence.

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.name_scope(name, "sequence_loss_by_example",
                      logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logit, target)
      else:
        crossent = softmax_loss_function(logit, target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:46,代码来源:seq2seq.py

示例13: sequence_loss_by_example

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def sequence_loss_by_example(logits, targets, weights, ememory,
                                                         average_across_timesteps=True,
                                                         softmax_loss_function=None, name=None):
    """Weighted cross-entropy loss for a sequence of logits (per example).

    Args:
        logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
        targets: List of 1D batch-sized int32 Tensors of the same length as logits.
        weights: List of 1D batch-sized float-Tensors of the same length as logits.
        average_across_timesteps: If set, divide the returned cost by the total
            label weight.
        softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
            to be used instead of the standard softmax (the default if this is None).
        name: Optional name for this operation, default: "sequence_loss_by_example".

    Returns:
        1D batch-sized float Tensor: The log-perplexity for each sequence.

    Raises:
        ValueError: If len(logits) is different from len(targets) or len(weights).
    """
    if len(targets) != len(logits) or len(weights) != len(logits):
        raise ValueError("Lengths of logits, weights, and targets must be the same "
                                         "%d, %d, %d." % (len(logits), len(weights), len(targets)))
    with ops.name_scope(name, "sequence_loss_by_example",
                                            logits + targets + weights if ememory is None else logits + targets + weights + [ememory]):
        log_perp_list = []
        for logit, target, weight in zip(logits, targets, weights):
            if softmax_loss_function is None:
                # TODO(irving,ebrevdo): This reshape is needed because
                # sequence_loss_by_example is called with scalars sometimes, which
                # violates our general scalar strictness policy.
                #target = array_ops.reshape(target, [-1])
                #crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
                #        logit, target)
                if ememory is None:
                    target = array_ops.reshape(target, [-1])
                    label = tf.one_hot(target, depth=logit.get_shape().with_rank(2)[1], dtype=tf.float32)
                    crossent = -tf.reduce_sum(label * tf.log(logit+1e-12), 1)
                else:
                    golden = tf.gather(ememory, target)
                    golden = tf.stack([golden, 1-golden])
                    crossent = -tf.reduce_sum(golden * tf.log(logit+1e-12), 0)

            else:
                #sampled softmax not work
                crossent = softmax_loss_function(logit, target)
            log_perp_list.append(crossent * weight)
        log_perps = math_ops.add_n(log_perp_list)
        if average_across_timesteps:
            total_size = math_ops.add_n(weights)
            total_size += 1e-12    # Just to avoid division by 0 for all-0 weights.
            log_perps /= total_size
    return log_perps 
开发者ID:thu-coai,项目名称:ecm,代码行数:56,代码来源:seq2seq.py

示例14: sequence_loss_by_example

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import sparse_softmax_cross_entropy_with_logits [as 别名]
def sequence_loss_by_example(logits, targets, weights,
                             average_across_timesteps=True,
                             softmax_loss_function=None, name=None):
  """Weighted cross-entropy loss for a sequence of logits (per example).

  Args:
    logits: List of 2D Tensors of shape [batch_size x num_decoder_symbols].
    targets: List of 1D batch-sized int32 Tensors of the same length as logits.
    weights: List of 1D batch-sized float-Tensors of the same length as logits.
    average_across_timesteps: If set, divide the returned cost by the total
      label weight.
    softmax_loss_function: Function (inputs-batch, labels-batch) -> loss-batch
      to be used instead of the standard softmax (the default if this is None).
    name: Optional name for this operation, default: "sequence_loss_by_example".

  Returns:
    1D batch-sized float Tensor: The log-perplexity for each sequence.

  Raises:
    ValueError: If len(logits) is different from len(targets) or len(weights).
  """
  if len(targets) != len(logits) or len(weights) != len(logits):
    raise ValueError("Lengths of logits, weights, and targets must be the same "
                     "%d, %d, %d." % (len(logits), len(weights), len(targets)))
  with ops.name_scope(name, "sequence_loss_by_example",
                      logits + targets + weights):
    log_perp_list = []
    for logit, target, weight in zip(logits, targets, weights):
      if softmax_loss_function is None:
        # TODO(irving,ebrevdo): This reshape is needed because
        # sequence_loss_by_example is called with scalars sometimes, which
        # violates our general scalar strictness policy.
        target = array_ops.reshape(target, [-1])
        crossent = nn_ops.sparse_softmax_cross_entropy_with_logits(
            logits=logit, labels=target)
      else:
        crossent = softmax_loss_function(logits=logit, labels=target)
      log_perp_list.append(crossent * weight)
    log_perps = math_ops.add_n(log_perp_list)
    if average_across_timesteps:
      total_size = math_ops.add_n(weights)
      total_size += 1e-12  # Just to avoid division by 0 for all-0 weights.
      log_perps /= total_size
  return log_perps 
开发者ID:da03,项目名称:Attention-OCR,代码行数:46,代码来源:seq2seq.py


注:本文中的tensorflow.python.ops.nn_ops.sparse_softmax_cross_entropy_with_logits方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。