当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.zeros_like函数代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.zeros_like函数的典型用法代码示例。如果您正苦于以下问题:Python zeros_like函数的具体用法?Python zeros_like怎么用?Python zeros_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了zeros_like函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fwd_gradients

def fwd_gradients(ys, xs, grad_xs=None, stop_gradients=None):
  """Compute forward-mode gradients."""
  # See b/37888268.

  # This version of forward-mode autodiff is based on code by Tim Cooijmans
  # and handles list arguments and certain special cases such as when the
  # ys doesn't depend on one or more of the xs, and when ops.IndexedSlices are
  # generated by the first gradients_impl.gradients call.

  us = [array_ops.zeros_like(y) + float("nan") for y in ys]
  dydxs = gradients_impl.gradients(
      ys, xs, grad_ys=us, stop_gradients=stop_gradients)

  # Deal with strange types that gradients_impl.gradients returns but can't
  # deal with.
  dydxs = [
      ops.convert_to_tensor(dydx)
      if isinstance(dydx, ops.IndexedSlices) else dydx for dydx in dydxs
  ]
  dydxs = [
      array_ops.zeros_like(x) if dydx is None else dydx
      for x, dydx in zip(xs, dydxs)
  ]

  dysdx = gradients_impl.gradients(dydxs, us, grad_ys=grad_xs)

  return dysdx
开发者ID:abidrahmank,项目名称:tensorflow,代码行数:27,代码来源:utils.py

示例2: _log_cdf

  def _log_cdf(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= upper_cutoff,
    #         = 0, if y < lower_cutoff,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    result_so_far = self.distribution.log_cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = math_ops.select(j < lower_cutoff, neg_inf, result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
开发者ID:Qstar,项目名称:tensorflow,代码行数:30,代码来源:quantized_distribution.py

示例3: _cdf

  def _cdf(self, y):
    low = self._low
    high = self._high

    # Recall the promise:
    # cdf(y) := P[Y <= y]
    #         = 1, if y >= high,
    #         = 0, if y < low,
    #         = P[X <= y], otherwise.

    # P[Y <= j] = P[floor(Y) <= j] since mass is only at integers, not in
    # between.
    j = math_ops.floor(y)

    # P[X <= j], used when low < X < high.
    result_so_far = self.distribution.cdf(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if low is not None:
      result_so_far = array_ops.where(j < low,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if high is not None:
      result_so_far = array_ops.where(j >= high,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)

    return result_so_far
开发者ID:finardi,项目名称:tensorflow,代码行数:32,代码来源:quantized_distribution.py

示例4: _log_survival_function

  def _log_survival_function(self, y):
    low = self._low
    high = self._high

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= high,
    #                       = 1, if y < low,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when low < X < high.
    result_so_far = self.distribution.log_survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if low is not None:
      result_so_far = array_ops.where(j < low,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)
    if high is not None:
      neg_inf = -np.inf * array_ops.ones_like(result_so_far)
      result_so_far = array_ops.where(j >= high, neg_inf, result_so_far)

    return result_so_far
开发者ID:finardi,项目名称:tensorflow,代码行数:31,代码来源:quantized_distribution.py

示例5: _survival_function

  def _survival_function(self, y):
    lower_cutoff = self._lower_cutoff
    upper_cutoff = self._upper_cutoff

    # Recall the promise:
    # survival_function(y) := P[Y > y]
    #                       = 0, if y >= upper_cutoff,
    #                       = 1, if y < lower_cutoff,
    #                       = P[X > y], otherwise.

    # P[Y > j] = P[ceiling(Y) > j] since mass is only at integers, not in
    # between.
    j = math_ops.ceil(y)

    # P[X > j], used when lower_cutoff < X < upper_cutoff.
    result_so_far = self.distribution.survival_function(j)

    # Broadcast, because it's possible that this is a single distribution being
    # evaluated on a number of samples, or something like that.
    j += array_ops.zeros_like(result_so_far)

    # Re-define values at the cutoffs.
    if lower_cutoff is not None:
      result_so_far = math_ops.select(j < lower_cutoff,
                                      array_ops.ones_like(result_so_far),
                                      result_so_far)
    if upper_cutoff is not None:
      result_so_far = math_ops.select(j >= upper_cutoff,
                                      array_ops.zeros_like(result_so_far),
                                      result_so_far)

    return result_so_far
开发者ID:Qstar,项目名称:tensorflow,代码行数:32,代码来源:quantized_distribution.py

示例6: Loop

 def Loop(cell, w, i):
   x = array_ops.unstack(i, self.NUM_UNROLL)
   m = array_ops.zeros_like(x[0])
   c = array_ops.zeros_like(x[0])
   for i in range(self.NUM_UNROLL):
     m, c = cell(x[i], m, c, w)
   return m
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:7,代码来源:function_test.py

示例7: LSTMLoop10

 def LSTMLoop10(weights, inp):
   x = array_ops.unstack(inp, self.NUM_UNROLL)
   m = array_ops.zeros_like(x[0])
   c = array_ops.zeros_like(x[0])
   assert self.NUM_UNROLL % 10 == 0
   for i in range(0, self.NUM_UNROLL, 10):
     m, c = Loop10(weights, m, c, *x[i:i + 10])
   return m
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:8,代码来源:function_test.py

示例8: _get_chol_and_x_compatible_shape

  def _get_chol_and_x_compatible_shape(self, x):
    """Return self.chol and x, (possibly) broadcast to compatible shape."""
    # x and chol are "compatible" if their shape matches except for the last two
    # dimensions of chol are [k, k], and the last two of x are [k, 1].
    # E.g. x.shape = [A, B, k, 1], and chol.shape = [A, B, k, k]
    # This is required for the batch_triangular_solve, which does not broadcast.

    # TODO(langmore) This broadcast replicates matrices unnecesarily!  In the
    # case where
    # x.shape = [M1,...,Mr, N1,...,Nb, k], and chol.shape = [N1,...,Nb, k, k]
    # (which is common if x was sampled), the front dimensions of x can be
    # "flipped" to the end, making
    # x_flipped.shape = [N1,...,Nb, k, M1*...*Mr],
    # and this can be handled by the linear solvers.  This is preferred, because
    # it does not replicate the matrix, or create any new data.

    # We assume x starts without the trailing singleton dimension, e.g.
    # x.shape = [B, k].
    chol = self._chol
    with ops.op_scope([x] + self.inputs, 'get_chol_and_x_compatible_shape'):
      # If we determine statically that shapes match, we're done.
      if x.get_shape() == chol.get_shape()[:-1]:
        x_expanded = array_ops.expand_dims(x, -1)
        return chol, x_expanded

      # Dynamic check if shapes match or not.
      vector_shape = self.vector_shape()  # Shape of chol minus last dim.
      are_same_rank = math_ops.equal(
          array_ops.rank(x), array_ops.rank(vector_shape))

      def shapes_match_if_same_rank():
        return math_ops.reduce_all(math_ops.equal(
            array_ops.shape(x), vector_shape))

      shapes_match = control_flow_ops.cond(are_same_rank,
                                           shapes_match_if_same_rank,
                                           lambda: ops.convert_to_tensor(False))

      # Make tensors (never instantiated) holding the broadcast shape.
      # matrix_broadcast_dummy is the shape we will broadcast chol to.
      matrix_bcast_dummy = chol + array_ops.expand_dims(x, -1)
      # vector_bcast_dummy is the shape we will bcast x to, before we expand it.
      chol_minus_last_dim = math_ops.reduce_sum(chol, reduction_indices=[-1])
      vector_bcast_dummy = x + chol_minus_last_dim

      chol_bcast = chol + array_ops.zeros_like(matrix_bcast_dummy)
      x_bcast = x + array_ops.zeros_like(vector_bcast_dummy)

      chol_result = control_flow_ops.cond(shapes_match, lambda: chol,
                                          lambda: chol_bcast)
      chol_result.set_shape(matrix_bcast_dummy.get_shape())
      x_result = control_flow_ops.cond(shapes_match, lambda: x, lambda: x_bcast)
      x_result.set_shape(vector_bcast_dummy.get_shape())

      x_expanded = array_ops.expand_dims(x_result, -1)

      return chol_result, x_expanded
开发者ID:31H0B1eV,项目名称:tensorflow,代码行数:57,代码来源:operator_pd_cholesky.py

示例9: _cdf

  def _cdf(self, counts):
    counts = self._maybe_assert_valid_sample(counts)
    probs = self.probs
    if not (counts.shape.is_fully_defined()
            and self.probs.shape.is_fully_defined()
            and counts.shape.is_compatible_with(self.probs.shape)):
      # If both shapes are well defined and equal, we skip broadcasting.
      probs += array_ops.zeros_like(counts)
      counts += array_ops.zeros_like(self.probs)

    return _bdtr(k=counts, n=self.total_count, p=probs)
开发者ID:AlbertXiebnu,项目名称:tensorflow,代码行数:11,代码来源:binomial.py

示例10: _forward

  def _forward(self, x):
    if self._unroll_loop:
      event_size = tensor_shape.dimension_value(
          x.shape.with_rank_at_least(1)[-1])
      if event_size is None:
        raise ValueError(
            "The final dimension of `x` must be known at graph construction "
            "time if `unroll_loop=True`. `x.shape: %r`" % x.shape)
      y = array_ops.zeros_like(x, name="y0")

      for _ in range(event_size):
        shift, log_scale = self._shift_and_log_scale_fn(y)
        # next_y = scale * x + shift
        next_y = x
        if log_scale is not None:
          next_y *= math_ops.exp(log_scale)
        if shift is not None:
          next_y += shift
        y = next_y
      return y

    event_size = array_ops.shape(x)[-1]
    # If the event size is available at graph construction time, we can inform
    # the graph compiler of the maximum number of steps. If not,
    # static_event_size will be None, and the maximum_iterations argument will
    # have no effect.
    static_event_size = tensor_shape.dimension_value(
        x.shape.with_rank_at_least(1)[-1])
    y0 = array_ops.zeros_like(x, name="y0")
    # call the template once to ensure creation
    _ = self._shift_and_log_scale_fn(y0)

    def _loop_body(index, y0):
      """While-loop body for autoregression calculation."""
      # Set caching device to avoid re-getting the tf.Variable for every while
      # loop iteration.
      with variable_scope_lib.variable_scope(
          variable_scope_lib.get_variable_scope()) as vs:
        if vs.caching_device is None:
          vs.set_caching_device(lambda op: op.device)
        shift, log_scale = self._shift_and_log_scale_fn(y0)
      y = x
      if log_scale is not None:
        y *= math_ops.exp(log_scale)
      if shift is not None:
        y += shift
      return index + 1, y

    _, y = control_flow_ops.while_loop(
        cond=lambda index, _: index < event_size,
        body=_loop_body,
        loop_vars=(0, y0),
        maximum_iterations=static_event_size)
    return y
开发者ID:ahmedsaiduk,项目名称:tensorflow,代码行数:54,代码来源:masked_autoregressive.py

示例11: fwd_gradients

def fwd_gradients(ys, xs, grad_xs=None, assert_unused=False):
  """Computes forward-mode derivatives.

  This is accomplished in pure-python using tensorflow's existing (reverse-mode)
  gradients. There is additional overhead on graph construction, but runtime
  performance should be equal to a manual implementation [citation needed].

  See https://j-towns.github.io/2017/06/12/A-new-trick.html and
  https://github.com/HIPS/autograd/pull/175 for the original discussion of this
  method, and https://github.com/renmengye/tensorflow-forward-ad for a "direct"
  implementation.

  Args:
    ys: A list of tensors.
    xs: A list of tensors.
    grad_xs: An optional list of tensors. If provided, must have the same length
      and shapes compatible with xs.
    assert_unused: Add assertions that intermediate values are not computed.
  Returns:
    A list of tensors of the same shapes as ys. The directional derivatives of
    ys with respect to xs in the direction grad_xs. Leaving grad_xs unspecified
    is equivalent to passing in 1s for each x in xs.
  """
  # This version of forward-mode autodiff is based on code by Tim Cooijmans
  # and handles list arguments and certain special cases such as when the
  # ys doesn't depend on one or more of the xs, and when tf.IndexedSlices are
  # generated by the first tf.gradients call.

  us = [array_ops.zeros_like(y) + float('nan') for y in ys]

  dydxs = gradients(ys, xs, grad_ys=us)

  # deal with strange types that tf.gradients returns but can't deal with
  dydxs = [ops.convert_to_tensor(dydx) if isinstance(dydx, ops.IndexedSlices)
           else dydx for dydx in dydxs]

  if assert_unused:
    with ops.control_dependencies(dydxs):
      assert_unused = control_flow_ops.Assert(False, [1], name='fwd_gradients')
    with ops.control_dependencies([assert_unused]):
      dydxs = array_ops.identity_n(dydxs)

  dydxs = [array_ops.zeros_like(x) if dydx is None else dydx
           for x, dydx in zip(xs, dydxs)]
  for x, dydx in zip(xs, dydxs):
    dydx.set_shape(x.shape)

  dysdx = gradients(dydxs, us, grad_ys=grad_xs)

  return dysdx
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:50,代码来源:fwd_gradients.py

示例12: sparsemax_loss

def sparsemax_loss(logits, sparsemax, labels, name=None):
  """Computes sparsemax loss function [1].

  [1]: https://arxiv.org/abs/1602.02068

  Args:
    logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
      `float64`.
    sparsemax: A `Tensor`. Must have the same type as `logits`.
    labels: A `Tensor`. Must have the same type as `logits`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `logits`.
  """

  with ops.name_scope(name, "sparsemax_loss",
                      [logits, sparsemax, labels]) as name:
    logits = ops.convert_to_tensor(logits, name="logits")
    sparsemax = ops.convert_to_tensor(sparsemax, name="sparsemax")
    labels = ops.convert_to_tensor(labels, name="labels")

    # In the paper, they call the logits z.
    # A constant can be substracted from logits to make the algorithm
    # more numerically stable in theory. However, there are really no major
    # source numerical instability in this algorithm.
    z = logits

    # sum over support
    # Use a conditional where instead of a multiplication to support z = -inf.
    # If z = -inf, and there is no support (sparsemax = 0), a multiplication
    # would cause 0 * -inf = nan, which is not correct in this case.
    sum_s = array_ops.where(
        math_ops.logical_or(sparsemax > 0, math_ops.is_nan(sparsemax)),
        sparsemax * (z - 0.5 * sparsemax), array_ops.zeros_like(sparsemax))

    # - z_k + ||q||^2
    q_part = labels * (0.5 * labels - z)
    # Fix the case where labels = 0 and z = -inf, where q_part would
    # otherwise be 0 * -inf = nan. But since the lables = 0, no cost for
    # z = -inf should be consideredself.
    # The code below also coveres the case where z = inf. Howeverm in this
    # caose the sparsemax will be nan, which means the sum_s will also be nan,
    # therefor this case doesn't need addtional special treatment.
    q_part_safe = array_ops.where(
        math_ops.logical_and(math_ops.equal(labels, 0), math_ops.is_inf(z)),
        array_ops.zeros_like(z), q_part)

    return math_ops.reduce_sum(sum_s + q_part_safe, axis=1)
开发者ID:Ajaycs99,项目名称:tensorflow,代码行数:49,代码来源:sparsemax_loss.py

示例13: _forward

 def _forward(self, x):
   event_size = array_ops.shape(x)[-1]
   y0 = array_ops.zeros_like(x, name="y0")
   # call the template once to ensure creation
   _ = self._shift_and_log_scale_fn(y0)
   def _loop_body(index, y0):
     """While-loop body for autoregression calculation."""
     # Set caching device to avoid re-getting the tf.Variable for every while
     # loop iteration.
     with variable_scope_lib.variable_scope(
         variable_scope_lib.get_variable_scope()) as vs:
       if vs.caching_device is None:
         vs.set_caching_device(lambda op: op.device)
       shift, log_scale = self._shift_and_log_scale_fn(y0)
     y = x
     if log_scale is not None:
       y *= math_ops.exp(log_scale)
     if shift is not None:
       y += shift
     return index + 1, y
   _, y = control_flow_ops.while_loop(
       cond=lambda index, _: index < event_size,
       body=_loop_body,
       loop_vars=[0, y0])
   return y
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:25,代码来源:masked_autoregressive.py

示例14: _logits_to_prediction

  def _logits_to_prediction(self, logits=None):
    predictions = {}
    predictions[PredictionKey.LOGITS] = logits
    logits = array_ops.concat(1, [array_ops.zeros_like(logits), logits])
    predictions[PredictionKey.CLASSES] = math_ops.argmax(logits, 1)

    return predictions
开发者ID:caikehe,项目名称:tensorflow,代码行数:7,代码来源:head.py

示例15: _f

 def _f():
   # Note that there is a race condition here, so we do a best effort
   # updates here. We reset update_in_steps first so that other workers
   # don't duplicate the updates. Also we update cluster_center_vars
   # before resetting total_counts to avoid large updates to
   # cluster_centers_updated based on partially updated
   # cluster_center_vars.
   with ops.control_dependencies([
       state_ops.assign(update_in_steps,
                        self._mini_batch_steps_per_iteration - 1)
   ]):
     with ops.colocate_with(
         cluster_centers_updated, ignore_existing=True):
       if self._distance_metric == COSINE_DISTANCE:
         cluster_centers = nn_impl.l2_normalize(
             cluster_centers_updated, dim=1)
       else:
         cluster_centers = cluster_centers_updated
     with ops.colocate_with(cluster_centers_var, ignore_existing=True):
       with ops.control_dependencies(
           [state_ops.assign(cluster_centers_var, cluster_centers)]):
         with ops.colocate_with(None, ignore_existing=True):
           with ops.control_dependencies([
               state_ops.assign(total_counts,
                                array_ops.zeros_like(total_counts))
           ]):
             return array_ops.identity(update_in_steps)
开发者ID:AnddyWang,项目名称:tensorflow,代码行数:27,代码来源:clustering_ops.py


注:本文中的tensorflow.python.ops.array_ops.zeros_like函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。