當前位置: 首頁>>代碼示例>>Python>>正文


Python array_ops.ones_like方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.array_ops.ones_like方法的典型用法代碼示例。如果您正苦於以下問題:Python array_ops.ones_like方法的具體用法?Python array_ops.ones_like怎麽用?Python array_ops.ones_like使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.array_ops的用法示例。


在下文中一共展示了array_ops.ones_like方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _sample_n

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _sample_n(self, n, seed=None):
    expanded_concentration1 = array_ops.ones_like(
        self.total_concentration, dtype=self.dtype) * self.concentration1
    expanded_concentration0 = array_ops.ones_like(
        self.total_concentration, dtype=self.dtype) * self.concentration0
    gamma1_sample = random_ops.random_gamma(
        shape=[n],
        alpha=expanded_concentration1,
        dtype=self.dtype,
        seed=seed)
    gamma2_sample = random_ops.random_gamma(
        shape=[n],
        alpha=expanded_concentration0,
        dtype=self.dtype,
        seed=distribution_util.gen_new_seed(seed, "beta"))
    beta_sample = gamma1_sample / (gamma1_sample + gamma2_sample)
    return beta_sample 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:beta.py

示例2: _log_prob

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _log_prob(self, event):
    event = self._maybe_assert_valid_sample(event)
    # TODO(jaana): The current sigmoid_cross_entropy_with_logits has
    # inconsistent  behavior for logits = inf/-inf.
    event = math_ops.cast(event, self.logits.dtype)
    logits = self.logits
    # sigmoid_cross_entropy_with_logits doesn't broadcast shape,
    # so we do this here.

    def _broadcast(logits, event):
      return (array_ops.ones_like(event) * logits,
              array_ops.ones_like(logits) * event)

    # First check static shape.
    if (event.get_shape().is_fully_defined() and
        logits.get_shape().is_fully_defined()):
      if event.get_shape() != logits.get_shape():
        logits, event = _broadcast(logits, event)
    else:
      logits, event = control_flow_ops.cond(
          distribution_util.same_dynamic_shape(logits, event),
          lambda: (logits, event),
          lambda: _broadcast(logits, event))
    return -nn.sigmoid_cross_entropy_with_logits(labels=event, logits=logits) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:26,代碼來源:bernoulli.py

示例3: ones_like

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def ones_like(x, dtype=None, name=None):
  """Instantiates an all-ones variable of the same shape as another tensor.

  Arguments:
      x: Keras variable or tensor.
      dtype: String, dtype of returned Keras variable.
           None uses the dtype of x.
      name: String, name for the variable to create.

  Returns:
      A Keras variable with the shape of x filled with ones.

  Example:
  ```python
      >>> from keras import backend as K
      >>> kvar = K.variable(np.random.random((2,3)))
      >>> kvar_ones = K.ones_like(kvar)
      >>> K.eval(kvar_ones)
      array([[ 1.,  1.,  1.],
             [ 1.,  1.,  1.]], dtype=float32)
  ```
  """
  return array_ops.ones_like(x, dtype=dtype, name=name) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:25,代碼來源:backend.py

示例4: _log_prob

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _log_prob(self, x):
    x = self._assert_valid_sample(x)
    # broadcast logits or x if need be.
    logits = self.logits
    if (not x.get_shape().is_fully_defined() or
        not logits.get_shape().is_fully_defined() or
        x.get_shape() != logits.get_shape()):
      logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
      x = array_ops.ones_like(logits, dtype=x.dtype) * x

    logits_shape = array_ops.shape(math_ops.reduce_sum(logits, -1))
    logits_2d = array_ops.reshape(logits, [-1, self.event_size])
    x_2d = array_ops.reshape(x, [-1, self.event_size])
    ret = -nn_ops.softmax_cross_entropy_with_logits(labels=x_2d,
                                                    logits=logits_2d)
    # Reshape back to user-supplied batch and sample dims prior to 2D reshape.
    ret = array_ops.reshape(ret, logits_shape)
    return ret 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:20,代碼來源:onehot_categorical.py

示例5: _sample_n

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _sample_n(self, n, seed=None):
    low = self._low
    high = self._high
    with ops.name_scope("transform"):
      n = ops.convert_to_tensor(n, name="n")
      x_samps = self.distribution.sample(n, seed=seed)
      ones = array_ops.ones_like(x_samps)

      # Snap values to the intervals (j - 1, j].
      result_so_far = math_ops.ceil(x_samps)

      if low is not None:
        result_so_far = array_ops.where(result_so_far < low,
                                        low * ones, result_so_far)

      if high is not None:
        result_so_far = array_ops.where(result_so_far > high,
                                        high * ones, result_so_far)

      return result_so_far 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:22,代碼來源:quantized_distribution.py

示例6: _bdtr

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _bdtr(k, n, p):
  """The binomial cumulative distribution function.

  Args:
    k: floating point `Tensor`.
    n: floating point `Tensor`.
    p: floating point `Tensor`.

  Returns:
    `sum_{j=0}^k p^j (1 - p)^(n - j)`.
  """
  # Trick for getting safe backprop/gradients into n, k when
  #   betainc(a = 0, ..) = nan
  # Write:
  #   where(unsafe, safe_output, betainc(where(unsafe, safe_input, input)))
  ones = array_ops.ones_like(n - k)
  k_eq_n = math_ops.equal(k, n)
  safe_dn = array_ops.where(k_eq_n, ones, n - k)
  dk = math_ops.betainc(a=safe_dn, b=k + 1, x=1 - p)
  return array_ops.where(k_eq_n, ones, dk) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:22,代碼來源:binomial.py

示例7: _safe_div

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: A `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:25,代碼來源:loss_ops.py

示例8: hinge_loss

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def hinge_loss(logits, labels=None, scope=None):
  """Method that returns the loss tensor for hinge loss.

  Args:
    logits: The logits, a float tensor.
    labels: The ground truth output tensor. Its shape should match the shape of
      logits. The values of the tensor are expected to be 0.0 or 1.0.
    scope: The scope for the operations performed in computing the loss.

  Returns:
    A `Tensor` of same shape as `logits` and `labels` representing the loss
      values across the batch.

  Raises:
    ValueError: If the shapes of `logits` and `labels` don't match.
  """
  with ops.name_scope(scope, "hinge_loss", [logits, labels]) as scope:
    logits.get_shape().assert_is_compatible_with(labels.get_shape())
    # We first need to convert binary labels to -1/1 labels (as floats).
    labels = math_ops.to_float(labels)
    all_ones = array_ops.ones_like(labels)
    labels = math_ops.subtract(2 * labels, all_ones)
    return nn_ops.relu(
        math_ops.subtract(all_ones, math_ops.multiply(labels, logits))) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:26,代碼來源:loss_ops.py

示例9: _safe_div

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _safe_div(numerator, denominator, name="value"):
  """Computes a safe divide which returns 0 if the denominator is zero.

  Note that the function contains an additional conditional check that is
  necessary for avoiding situations where the loss is zero causing NaNs to
  creep into the gradient computation.

  Args:
    numerator: An arbitrary `Tensor`.
    denominator: `Tensor` whose shape matches `numerator` and whose values are
      assumed to be non-negative.
    name: An optional name for the returned op.

  Returns:
    The element-wise value of the numerator divided by the denominator.
  """
  return array_ops.where(
      math_ops.greater(denominator, 0),
      math_ops.div(numerator, array_ops.where(
          math_ops.equal(denominator, 0),
          array_ops.ones_like(denominator), denominator)),
      array_ops.zeros_like(numerator),
      name=name) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:25,代碼來源:losses_impl.py

示例10: average_impurity

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def average_impurity(self):
    """Constructs a TF graph for evaluating the average leaf impurity of a tree.

    If in regression mode, this is the leaf variance. If in classification mode,
    this is the gini impurity.

    Returns:
      The last op in the graph.
    """
    children = array_ops.squeeze(array_ops.slice(
        self.variables.tree, [0, 0], [-1, 1]), squeeze_dims=[1])
    is_leaf = math_ops.equal(constants.LEAF_NODE, children)
    leaves = math_ops.to_int32(array_ops.squeeze(array_ops.where(is_leaf),
                                                 squeeze_dims=[1]))
    counts = array_ops.gather(self.variables.node_sums, leaves)
    gini = self._weighted_gini(counts)
    # Guard against step 1, when there often are no leaves yet.
    def impurity():
      return gini
    # Since average impurity can be used for loss, when there's no data just
    # return a big number so that loss always decreases.
    def big():
      return array_ops.ones_like(gini, dtype=dtypes.float32) * 10000000.
    return control_flow_ops.cond(math_ops.greater(
        array_ops.shape(leaves)[0], 0), impurity, big) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:27,代碼來源:tensor_forest.py

示例11: remove

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def remove(self, ids):
    """Remove the ids (and their associated scores) from the TopN."""
    with ops.control_dependencies(self.last_ops):
      scatter_op = state_ops.scatter_update(
          self.id_to_score,
          ids,
          array_ops.ones_like(
              ids, dtype=dtypes.float32) * dtypes.float32.min)
      # We assume that removed ids are almost always in the shortlist,
      # so it makes no sense to hide the Op behind a tf.cond
      shortlist_ids_to_remove, new_length = tensor_forest_ops.top_n_remove(
          self.sl_ids, ids)
      u1 = state_ops.scatter_update(
          self.sl_ids,
          array_ops.concat([[0], shortlist_ids_to_remove], 0),
          array_ops.concat(
              [new_length, array_ops.ones_like(shortlist_ids_to_remove) * -1],
              0))
      u2 = state_ops.scatter_update(
          self.sl_scores,
          shortlist_ids_to_remove,
          dtypes.float32.min * array_ops.ones_like(
              shortlist_ids_to_remove, dtype=dtypes.float32))
      self.last_ops = [scatter_op, u1, u2] 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:26,代碼來源:topn.py

示例12: _log_prob

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _log_prob(self, x):
    x = ops.convert_to_tensor(x, name="x")
    # broadcast logits or x if need be.
    logits = self.logits
    if (not x.get_shape().is_fully_defined() or
        not logits.get_shape().is_fully_defined() or
        x.get_shape() != logits.get_shape()):
      logits = array_ops.ones_like(x, dtype=logits.dtype) * logits
      x = array_ops.ones_like(logits, dtype=x.dtype) * x

    logits_shape = array_ops.shape(logits)
    if logits.get_shape().ndims == 2:
      logits_2d = logits
      x_2d = x
    else:
      logits_2d = array_ops.reshape(logits, [-1, self.num_classes])
      x_2d = array_ops.reshape(x, [-1, self.num_classes])
    ret = -nn_ops.softmax_cross_entropy_with_logits(labels=x_2d,
                                                    logits=logits_2d)
    ret = array_ops.reshape(ret, logits_shape)
    return ret 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:23,代碼來源:onehot_categorical.py

示例13: _sample_n

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _sample_n(self, n, seed=None):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff
    with ops.name_scope("transform"):
      n = ops.convert_to_tensor(n, name="n")
      x_samps = self.distribution.sample(n, seed=seed)
      ones = array_ops.ones_like(x_samps)

      # Snap values to the intervals (j - 1, j].
      result_so_far = math_ops.ceil(x_samps)

      if lower_cutoff is not None:
        result_so_far = array_ops.where(result_so_far < lower_cutoff,
                                        lower_cutoff * ones, result_so_far)

      if upper_cutoff is not None:
        result_so_far = array_ops.where(result_so_far > upper_cutoff,
                                        upper_cutoff * ones, result_so_far)

      return result_so_far 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:22,代碼來源:quantized_distribution.py

示例14: __call__

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def __call__(self, inputs, state, scope=None):
    """Run the cell with the declared zoneouts."""

    # compute output and new state as before
    output, new_state = self._cell(inputs, state, scope)

    # if either hidden state or memory cell zoneout is applied, then split state and process
    if self._has_hidden_state_zoneout or self._has_memory_cell_zoneout:
      # split state
      c_old, m_old = state
      c_new, m_new = new_state

      # apply zoneout to memory cell and hidden state
      c_and_m = []
      for s_old, s_new, p, has_zoneout in [(c_old, c_new, self._memory_cell_keep_prob,  self._has_memory_cell_zoneout), 
                                           (m_old, m_new, self._hidden_state_keep_prob, self._has_hidden_state_zoneout)]:
        if has_zoneout:
          if self._is_training:
            mask = nn_ops.dropout(array_ops.ones_like(s_new), p, seed=self._seed) * p # this should just random ops instead. See dropout code for how.
            s = ((1. - mask) * s_old) + (mask * s_new)
          else:
            s = ((1. - p) * s_old) + (p * s_new)
        else:
          s = s_new

        c_and_m.append(s)

      # package final results
      new_state = LSTMStateTuple(*c_and_m)
      output = new_state.h

    return output, new_state 
開發者ID:aqlaboratory,項目名稱:rgn,代碼行數:34,代碼來源:rnn_cell_extended.py

示例15: _create_slots

# 需要導入模塊: from tensorflow.python.ops import array_ops [as 別名]
# 或者: from tensorflow.python.ops.array_ops import ones_like [as 別名]
def _create_slots(self, var_list):
    for v in var_list:
      self._zeros_slot(v, "momentum", self._name)
      self._zeros_slot(v, "gbar", self._name)
      g_tensor = ops.convert_to_tensor(v)
      gain_init = self._initial_gain * array_ops.ones_like(g_tensor)
      _ = self._get_or_make_slot(v, self._initial_scale * array_ops.ones((1)),
                                 "lr_scale", self._name)
      _ = self._get_or_make_slot(v, gain_init, "gain", self._name)
      _ = self._get_or_make_slot(v, array_ops.zeros((1)), "counter", self._name) 
開發者ID:tensorflow,項目名稱:lingvo,代碼行數:12,代碼來源:egdd.py


注:本文中的tensorflow.python.ops.array_ops.ones_like方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。