当前位置: 首页>>代码示例>>Python>>正文


Python nn.softmax函数代码示例

本文整理汇总了Python中tensorflow.python.ops.nn.softmax函数的典型用法代码示例。如果您正苦于以下问题:Python softmax函数的具体用法?Python softmax怎么用?Python softmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了softmax函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sequence_classifier

def sequence_classifier(decoding, labels, sampling_decoding=None, name=None):
  """Returns predictions and loss for sequence of predictions.

  Args:
    decoding: List of Tensors with predictions.
    labels: List of Tensors with labels.
    sampling_decoding: Optional, List of Tensor with predictions to be used
      in sampling. E.g. they shouldn't have dependncy on outputs.
      If not provided, decoding is used.
    name: Operation name.

  Returns:
    Predictions and losses tensors.
  """
  with ops.op_scope([decoding, labels], name, "sequence_classifier"):
    predictions, xent_list = [], []
    for i, pred in enumerate(decoding):
      xent_list.append(nn.softmax_cross_entropy_with_logits(
          pred, labels[i],
          name="sequence_loss/xent_raw{0}".format(i)))
      if sampling_decoding:
        predictions.append(nn.softmax(sampling_decoding[i]))
      else:
        predictions.append(nn.softmax(pred))
    xent = math_ops.add_n(xent_list, name="sequence_loss/xent")
    loss = math_ops.reduce_sum(xent, name="sequence_loss")
    return array_ops.expand_concat(1, predictions), loss
开发者ID:0ruben,项目名称:tensorflow,代码行数:27,代码来源:seq2seq_ops.py

示例2: softmax

def softmax(x, axis=-1):
  """The softmax activation function transforms the outputs so that all values are in

  range (0, 1) and sum to 1. It is often used as the activation for the last
  layer of a classification network because the result could be interpreted as
  a probability distribution. The softmax of x is calculated by
  exp(x)/tf.reduce_sum(exp(x)).

  Arguments:
      x : Input tensor.
      axis: Integer, axis along which the softmax normalization is applied.

  Returns:
      Tensor, output of softmax transformation (all values are non-negative
        and sum to 1).

  Raises:
      ValueError: In case `dim(x) == 1`.
  """
  ndim = K.ndim(x)
  if ndim == 2:
    return nn.softmax(x)
  elif ndim > 2:
    e = math_ops.exp(x - math_ops.reduce_max(x, axis=axis, keepdims=True))
    s = math_ops.reduce_sum(e, axis=axis, keepdims=True)
    return e / s
  else:
    raise ValueError('Cannot apply softmax to a tensor that is 1D. '
                     'Received input: %s' % (x,))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:29,代码来源:activations.py

示例3: apply_attention_scores

  def apply_attention_scores(self, scores, value, value_mask=None):
    """Applies attention scores to the given value tensor.

    To use this method in your attention layer, follow the steps:

    * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
      `[batch_size, Tv]` to calculate the attention `scores`.
    * Pass `scores` and `value` tensors to this method. The method applies
      `value_mask`, calculates `attention_distribution = softmax(scores)`, then
      returns `matmul(attention_distribution, value).
    * Apply `query_mask` and return the result.

    Args:
      scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
      value: Value tensor of shape `[batch_size, Tv, dim]`.
      value_mask: A boolean mask `Tensor` of shape `[batch_size, Tv]`.
        If given, will apply the mask such that values at positions where
        `mask==False` do not contribute to the result.

    Returns:
      Tensor of shape `[batch_size, Tq, dim]`.
    """
    if value_mask is not None:
      # Mask of shape [batch_size, 1, Tv] that is True in padding position.
      padding_mask = array_ops.expand_dims(
          math_ops.logical_not(value_mask), axis=1)
      # Bias so padding positions do not contribute to attention distribution.
      scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx())
    attention_distribution = nn.softmax(scores)
    return math_ops.matmul(attention_distribution, value)
开发者ID:kylin9872,项目名称:tensorflow,代码行数:30,代码来源:dense_attention.py

示例4: softmax_classifier

def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
  """Returns prediction and loss for softmax classifier.

  This function returns "probabilities" and a cross entropy loss. To obtain
  predictions, use `tf.argmax` on the returned probabilities.

  This function requires labels to be passed in one-hot encoding.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], one-hot labels of the output
      classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    `tuple` of softmax predictions and loss `Tensor`s.
  """
  with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
    logits = nn.xw_plus_b(tensor_in, weights, biases)
    if class_weight is not None:
      logits = math_ops.multiply(logits, class_weight)
    return nn.softmax(logits), losses.softmax_cross_entropy(labels, logits)
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:32,代码来源:losses_ops.py

示例5: _apply_scores

  def _apply_scores(self, scores, value, scores_mask=None):
    """Applies attention scores to the given value tensor.

    To use this method in your attention layer, follow the steps:

    * Use `query` tensor of shape `[batch_size, Tq]` and `key` tensor of shape
      `[batch_size, Tv]` to calculate the attention `scores`.
    * Pass `scores` and `value` tensors to this method. The method applies
      `scores_mask`, calculates `attention_distribution = softmax(scores)`, then
      returns `matmul(attention_distribution, value).
    * Apply `query_mask` and return the result.

    Args:
      scores: Scores float tensor of shape `[batch_size, Tq, Tv]`.
      value: Value tensor of shape `[batch_size, Tv, dim]`.
      scores_mask: A boolean mask `Tensor` of shape `[batch_size, 1, Tv]` or
        `[batch_size, Tq, Tv]`. If given, scores at positions where
        `scores_mask==False` do not contribute to the result. It must contain
        at least one `True` value in each line along the last dimension.

    Returns:
      Tensor of shape `[batch_size, Tq, dim]`.
    """
    if scores_mask is not None:
      padding_mask = math_ops.logical_not(scores_mask)
      # Bias so padding positions do not contribute to attention distribution.
      scores -= 1.e9 * math_ops.cast(padding_mask, dtype=K.floatx())
    attention_distribution = nn.softmax(scores)
    return math_ops.matmul(attention_distribution, value)
开发者ID:aritratony,项目名称:tensorflow,代码行数:29,代码来源:dense_attention.py

示例6: softmax_classifier

def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
  """Returns prediction and loss for softmax classifier.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], labels of the output classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    Prediction and loss tensors.
  """
  with ops.name_scope(name, "softmax_classifier", [tensor_in, labels]):
    logits = nn.xw_plus_b(tensor_in, weights, biases)
    if class_weight is not None:
      logits = math_ops.mul(logits, class_weight)
    return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:26,代码来源:losses_ops.py

示例7: _convert_to_estimator_model_result

 def _convert_to_estimator_model_result(self, logits_fn_result):
   logits, loss, train_op = logits_fn_result
   return {
       Classifier.CLASS_OUTPUT:
           math_ops.argmax(logits, len(logits.get_shape()) - 1),
       Classifier.PROBABILITY_OUTPUT: nn.softmax(logits)
   }, loss, train_op
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:7,代码来源:classifier.py

示例8: logits_to_predictions

  def logits_to_predictions(self, logits, proba=False):
    if self.num_label_columns == 1:
      logits = array_ops.concat([array_ops.zeros_like(logits), logits], 1)

    if proba:
      return nn.softmax(logits)
    else:
      return math_ops.argmax(logits, 1)
开发者ID:Albert-Z-Guo,项目名称:tensorflow,代码行数:8,代码来源:target_column.py

示例9: _logits_to_predictions

  def _logits_to_predictions(self, logits, proba=False):
    if self._n_classes == 2:
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])

    if proba:
      return nn.softmax(logits)
    else:
      return math_ops.argmax(logits, 1)
开发者ID:Ambier,项目名称:tensorflow,代码行数:8,代码来源:dnn_linear_combined.py

示例10: testGradient

 def testGradient(self):
   x_shape = [5, 10]
   x_np = np.random.randn(*x_shape).astype(np.float64)
   with self.test_session():
     x_tf = constant_op.constant(x_np)
     y_tf = nn.softmax(x_tf)
     err = gc.ComputeGradientError(x_tf, x_shape, y_tf, x_shape)
   eps = 1e-8
   self.assertLess(err, eps)
开发者ID:nickicindy,项目名称:tensorflow,代码行数:9,代码来源:nn_test.py

示例11: _logits_to_prediction

  def _logits_to_prediction(self, logits=None):
    predictions = {PredictionKey.LOGITS: logits}
    if self.logits_dimension == 1:
      predictions[PredictionKey.LOGISTIC] = math_ops.sigmoid(logits)
      logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[PredictionKey.PROBABILITIES] = nn.softmax(logits)
    predictions[PredictionKey.CLASSES] = math_ops.argmax(logits, 1)

    return predictions
开发者ID:caikehe,项目名称:tensorflow,代码行数:9,代码来源:head.py

示例12: _predictions

def _predictions(logits, n_classes):
    """Returns predictions for the given logits and n_classes."""
    predictions = {}
    if n_classes == 2:
        predictions[_LOGISTIC] = math_ops.sigmoid(logits)
        logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[_PROBABILITIES] = nn.softmax(logits)
    predictions[_CLASSES] = array_ops.reshape(math_ops.argmax(logits, 1), shape=(-1, 1))
    return predictions
开发者ID:pronobis,项目名称:tensorflow,代码行数:9,代码来源:dnn.py

示例13: _logits_to_prediction

 def _logits_to_prediction(self, logits=None):
   predictions = {PedictionKey.LOGITS: logits}
   if self.logits_dimension == 1:
     predictions[PedictionKey.LOGISTIC] = math_ops.sigmoid(logits)
     logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
   predictions[PedictionKey.PROBABILITIES] = nn.softmax(logits)
   # Workaround for argmax dropping the second demension.
   predictions[PedictionKey.CLASSES] = array_ops.expand_dims(
       math_ops.argmax(logits, 1), 1)
   return predictions
开发者ID:MrCrumpets,项目名称:tensorflow,代码行数:10,代码来源:head.py

示例14: testSoftmax

 def testSoftmax(self):
   x_shape = [5, 10]
   x_np = np.random.randn(*x_shape).astype(np.float32)
   y_np = self._softmax(x_np)
   with self.test_session():
     x_tf = constant_op.constant(x_np)
     y_tf = nn.softmax(x_tf)
     y_tf_np = y_tf.eval()
   eps = 1e-3
   self.assertAllClose(y_tf_np, y_np, eps)
开发者ID:nickicindy,项目名称:tensorflow,代码行数:10,代码来源:nn_test.py

示例15: _logits_to_predictions

  def _logits_to_predictions(self, logits):
    """Returns a dict of predictions.

    Args:
      logits: logits `Tensor` after applying possible centered bias.

    Returns:
      Dict of prediction `Tensor` keyed by `PredictionKey`.
    """
    predictions = {prediction_key.PredictionKey.LOGITS: logits}
    predictions[prediction_key.PredictionKey.PROBABILITIES] = nn.softmax(
        logits)
    predictions[prediction_key.PredictionKey.CLASSES] = math_ops.argmax(
        logits, 1)

    return predictions
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:16,代码来源:head.py


注:本文中的tensorflow.python.ops.nn.softmax函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。