当前位置: 首页>>代码示例>>Python>>正文


Python nn_ops.softmax方法代码示例

本文整理汇总了Python中tensorflow.python.ops.nn_ops.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python nn_ops.softmax方法的具体用法?Python nn_ops.softmax怎么用?Python nn_ops.softmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.nn_ops的用法示例。


在下文中一共展示了nn_ops.softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _kl_categorical_categorical

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _kl_categorical_categorical(a, b, name=None):
  """Calculate the batched KL divergence KL(a || b) with a and b Categorical.

  Args:
    a: instance of a Categorical distribution object.
    b: instance of a Categorical distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_categorical_categorical".

  Returns:
    Batchwise KL(a || b)
  """
  with ops.name_scope(name, "kl_categorical_categorical",
                      values=[a.logits, b.logits]):
    # sum(probs log(probs / (1 - probs)))
    delta_log_probs1 = (nn_ops.log_softmax(a.logits) -
                        nn_ops.log_softmax(b.logits))
    return math_ops.reduce_sum(nn_ops.softmax(a.logits) * delta_log_probs1,
                               axis=-1) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:categorical.py

示例2: _attention

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("attention"):
      k = vs.get_variable(
          "attn_w", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("attn_v", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:24,代码来源:rnn_cell.py

示例3: _kl_categorical_categorical

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _kl_categorical_categorical(a, b, name=None):
  """Calculate the batched KL divergence KL(a || b) with a, b OneHotCategorical.

  Args:
    a: instance of a OneHotCategorical distribution object.
    b: instance of a OneHotCategorical distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_categorical_categorical".

  Returns:
    Batchwise KL(a || b)
  """
  with ops.name_scope(name, "kl_categorical_categorical", values=[
      a.logits, b.logits]):
    # sum(p ln(p / q))
    return math_ops.reduce_sum(
        nn_ops.softmax(a.logits) * (nn_ops.log_softmax(a.logits)
                                    - nn_ops.log_softmax(b.logits)),
        axis=-1) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:onehot_categorical.py

示例4: _kl_categorical_categorical

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _kl_categorical_categorical(a, b, name=None):
  """Calculate the batched KL divergence KL(a || b) with a and b Categorical.

  Args:
    a: instance of a Categorical distribution object.
    b: instance of a Categorical distribution object.
    name: (optional) Name to use for created operations.
      default is "kl_categorical_categorical".

  Returns:
    Batchwise KL(a || b)
  """
  with ops.name_scope(
    name, "kl_categorical_categorical", [a.logits, b.logits]):
    # sum(p*ln(p/q))
    return math_ops.reduce_sum(
        nn_ops.softmax(a.logits)*(nn_ops.log_softmax(a.logits)
            - nn_ops.log_softmax(b.logits)), reduction_indices=[-1]) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:20,代码来源:categorical.py

示例5: test_unary_ops

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def test_unary_ops(self):
    ops = [
        ('relu', nn_ops.relu, nn.relu),
        ('relu6', nn_ops.relu6, nn.relu6),
        ('crelu', nn_ops.crelu, nn.crelu),
        ('elu', nn_ops.elu, nn.elu),
        ('softplus', nn_ops.softplus, nn.softplus),
        ('l2_loss', nn_ops.l2_loss, nn.l2_loss),
        ('softmax', nn_ops.softmax, nn.softmax),
        ('log_softmax', nn_ops.log_softmax, nn.log_softmax),
    ]
    for op_name, tf_op, lt_op in ops:
      golden_tensor = tf_op(self.original_lt.tensor)
      golden_lt = core.LabeledTensor(golden_tensor, self.axes)
      actual_lt = lt_op(self.original_lt)
      self.assertIn(op_name, actual_lt.name)
      self.assertLabeledTensorsEqual(golden_lt, actual_lt) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:19,代码来源:nn_test.py

示例6: _attention

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("attention"):
      k = vs.get_variable(
          "attn_w", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("attn_v", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      if self._linear3 is None:
        self._linear3 = _Linear(query, self._attn_vec_size, True)
      y = self._linear3(query)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
开发者ID:shaohua0116,项目名称:Multiview2Novelview,代码行数:26,代码来源:rnn_cell.py

示例7: testSoftmax3DUnknownSize

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def testSoftmax3DUnknownSize(self):
    logits = np.ones((2, 3, 2))
    logits[0, 0, 0] = 0
    logits[1, 1, 1] = 0
    logit_placeholder = array_ops.placeholder(
        dtypes.float32, shape=(None, None, 2))
    feed_dict = {logit_placeholder: logits}
    exp_prediction = 0.5 * np.ones((2, 3, 2))
    exp_prediction[0, 0, 0] = self.low
    exp_prediction[0, 0, 1] = self.high
    exp_prediction[1, 1, 0] = self.high
    exp_prediction[1, 1, 1] = self.low

    prediction = _layers.softmax(logit_placeholder)
    with self.cached_session() as sess:
      prediction = sess.run(prediction, feed_dict=feed_dict)
      self.assertAllClose(exp_prediction, prediction) 
开发者ID:google-research,项目名称:tf-slim,代码行数:19,代码来源:layers_test.py

示例8: _attention

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("Attention"):
      k = vs.get_variable("AttnW", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("AttnV", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:23,代码来源:rnn_cell.py

示例9: _attention

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _attention(self, query, attn_inputs):
    with vs.variable_scope("attention"):
      attn_query = tf.layers.dense(
        inputs=query, units=self._attn_vec_size, use_bias=True)
      attn_keys = tf.layers.dense(
        inputs=attn_inputs, units=self._attn_vec_size, use_bias=True)
      attn_contents = tf.layers.dense(
        inputs=attn_inputs, units=self._attn_size, use_bias=True)
      
      v_attn = vs.get_variable("attn_v", [self._attn_vec_size])
      scores = attn_sum_bahdanau(v_attn, attn_keys, attn_query)
      
      if self.attn_masks is not None:
        score_masks = self.attn_masks
        scores = scores * score_masks + (1.0 - score_masks) * tf.float32.min

      attn_weights = nn_ops.softmax(scores)
      new_attns = math_ops.reduce_sum(
        tf.expand_dims(attn_weights, -1) * attn_contents, [1])
      return new_attns 
开发者ID:crazydonkey200,项目名称:neural-symbolic-machines,代码行数:22,代码来源:tf_utils.py

示例10: focal_loss

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def focal_loss(labels, logits, gamma=2.0):
    r"""
    Multi-class focal loss implementation: https://arxiv.org/abs/1708.02002
    :param labels: [batch_size, ] - Tensor of the correct class ids
    :param logits: [batch_size, num_classes] - Unscaled logits
    :param gamma: focal loss weight
    :return: [batch_size, ] - Tensor of average costs for each batch element
    """

    num_classes = array_ops.shape(logits)[1]
    onehot_labels = array_ops.one_hot(labels, num_classes, dtype=logits.dtype)

    p = nn_ops.softmax(logits)
    p = clip_ops.clip_by_value(p, 1e-7, 1.0 - 1e-7)

    f_loss = - onehot_labels * math_ops.pow(1.0 - p, gamma) * math_ops.log(p) \
             - (1 - onehot_labels) * math_ops.pow(p, gamma) * math_ops.log(1.0 - p)

    cost = math_ops.reduce_sum(f_loss, axis=1)
    return cost 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:22,代码来源:devel.py

示例11: mc_loss

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def mc_loss(labels, logits):
    r"""
    A multi-class cross-entropy loss
    :param labels: [batch_size, ] - Tensor of the correct class ids
    :param logits: [batch_size, num_classes] - Unscaled logits
    :return: [batch_size, ] - Tensor of average costs for each batch element
    """

    num_classes = array_ops.shape(logits)[1]
    onehot_labels = array_ops.one_hot(labels, num_classes, dtype=logits.dtype)

    p = nn_ops.softmax(logits)
    p = clip_ops.clip_by_value(p, 1e-7, 1.0 - 1e-7)

    ce_loss = - onehot_labels * math_ops.log(p) - (1 - onehot_labels) * math_ops.log(1.0-p)

    cost = math_ops.reduce_sum(ce_loss, axis=1)
    return cost 
开发者ID:georgesterpu,项目名称:avsr-tf1,代码行数:20,代码来源:devel.py

示例12: _SoftmaxGrad

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _SoftmaxGrad(op, grad_softmax):
  """The derivative of the softmax nonlinearity.

  We assume that probs is of shape [batch_size * dim]
  The formula for dsoftmax / dx = (diag(softmax) - softmax * softmax').
  This matrix is diagonal minus a rank one matrix, so it is easy to implement
  as follows:

    grad_x = grad_softmax * softmax - sum(grad_softmax * softmax) * softmax

  Args:
     op: the Softmax op.
     grad_softmax:  the tensor representing the gradient w.r.t. the
       softmax output.

  Returns:
     gradient w.r.t the input to the softmax

  """
  # TODO(ilyasu): assert that the tensor has two dimensions at
  # graph-construction time?  Alternatively: do different things
  # depending on the dimensionality of the input tensors.
  softmax = op.outputs[0]
  grad_x = ((grad_softmax - array_ops.reshape(
      math_ops.reduce_sum(grad_softmax * softmax, [1]), [-1, 1])) * softmax)
  return grad_x 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:28,代码来源:nn_grad.py

示例13: _LogSoftmaxGrad

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _LogSoftmaxGrad(op, grad):
  """The gradient for log_softmax.

      log_softmax = input - log(sum(exp(input))
      dlog_softmax/dinput = diag - softmax(input)

  Args:
    op: The log softmax op.
    grad: The tensor representing the gradient w.r.t. the output.

  Returns:
    The gradients w.r.t. the input.
  """
  softmax = math_ops.exp(op.outputs[0])
  return grad - math_ops.reduce_sum(grad, 1, keep_dims=True) * softmax 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:17,代码来源:nn_grad.py

示例14: _SoftmaxCrossEntropyWithLogitsGrad

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def _SoftmaxCrossEntropyWithLogitsGrad(op, grad_loss, grad_grad):
  """Gradient function for SoftmaxCrossEntropyWithLogits."""
  # grad_loss is the backprop for cost, and we multiply it with the gradients
  # (which is output[1])
  # grad_grad is the backprop for softmax gradient.
  # There is no gradient for the labels
  #
  # Second derivative is just softmax derivative w.r.t. logits.
  softmax_grad = op.outputs[1]
  grad = _BroadcastMul(grad_loss, softmax_grad)

  def IsZero(g):
    # Some introspection to check if the gradient is feeding zeros
    if g.op.type in ("ZerosLike", "Zeros"):
      return True
    const_fill_value = tensor_util.constant_value(g)
    return const_fill_value is not None and (const_fill_value == 0).all()

  if not IsZero(grad_grad):
    logits = op.inputs[0]
    softmax = nn_ops.softmax(logits)

    grad += ((grad_grad - array_ops.squeeze(
        math_ops.matmul(grad_grad[:, None, :],
                        softmax[:, :, None]), axis=1)) * softmax)

  return grad, None 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:29,代码来源:nn_grad.py

示例15: inference_graph

# 需要导入模块: from tensorflow.python.ops import nn_ops [as 别名]
# 或者: from tensorflow.python.ops.nn_ops import softmax [as 别名]
def inference_graph(self, data, data_spec=None):
    """Returns the op that performs inference on a batch of data."""

    return nn_ops.softmax(self._base_inference(data, data_spec=data_spec)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:6,代码来源:hybrid_model.py


注:本文中的tensorflow.python.ops.nn_ops.softmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。