当前位置: 首页>>代码示例>>Python>>正文


Python array_ops.gather_nd方法代码示例

本文整理汇总了Python中tensorflow.python.ops.array_ops.gather_nd方法的典型用法代码示例。如果您正苦于以下问题:Python array_ops.gather_nd方法的具体用法?Python array_ops.gather_nd怎么用?Python array_ops.gather_nd使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.array_ops的用法示例。


在下文中一共展示了array_ops.gather_nd方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: scheduled_sampling

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def scheduled_sampling(self, batch_size, sampling_probability, true, estimate):
    with variable_scope.variable_scope("ScheduledEmbedding"):
      # Return -1s where we do not sample, and sample_ids elsewhere
      select_sampler = bernoulli.Bernoulli(probs=sampling_probability, dtype=tf.bool)
      select_sample = select_sampler.sample(sample_shape=batch_size)
      sample_ids = array_ops.where(
                  select_sample,
                  tf.range(batch_size),
                  gen_array_ops.fill([batch_size], -1))
      where_sampling = math_ops.cast(
          array_ops.where(sample_ids > -1), tf.int32)
      where_not_sampling = math_ops.cast(
          array_ops.where(sample_ids <= -1), tf.int32)
      _estimate = array_ops.gather_nd(estimate, where_sampling)
      _true = array_ops.gather_nd(true, where_not_sampling)

      base_shape = array_ops.shape(true)
      result1 = array_ops.scatter_nd(indices=where_sampling, updates=_estimate, shape=base_shape)
      result2 = array_ops.scatter_nd(indices=where_not_sampling, updates=_true, shape=base_shape)
      result = result1 + result2
      return result1 + result2 
开发者ID:yaserkl,项目名称:TransferRL,代码行数:23,代码来源:run_summarization.py

示例2: dense_to_sparse

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:24,代码来源:layers.py

示例3: dense_to_sparse_tensor

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def dense_to_sparse_tensor(dense_tensor, ignore_value=None):
  """Converts dense `Tensor` to `SparseTensor`, dropping `ignore_value` cells.

  Args:
    dense_tensor: A `Tensor`.
    ignore_value: Entries in `dense_tensor` equal to this value will be
      absent from the return `SparseTensor`. If `None`, default value of
      `dense_tensor` dtype will be used (e.g. '' for `str`, 0 for `int`).

  Returns:
    A `SparseTensor` with the same shape as `dense_tensor`.

  Raises:
    ValueError: when `dense_tensor`'s rank is `None`.
  """
  with ops.name_scope("DenseToSparseTensor"):
    dense_tensor = ops.convert_to_tensor(dense_tensor)
    ignore_value = _ignore_value_tensor(dense_tensor.dtype, ignore_value)
    indices = array_ops.where(
        math_ops.not_equal(dense_tensor, ignore_value), name="indices")
    return sparse_tensor.SparseTensor(
        indices=indices,
        values=array_ops.gather_nd(dense_tensor, indices, name="values"),
        dense_shape=array_ops.shape(
            dense_tensor, out_type=dtypes.int64, name="dense_shape")) 
开发者ID:google-research,项目名称:tf-slim,代码行数:27,代码来源:sparse_ops.py

示例4: _SparseTensorDenseAddGrad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def _SparseTensorDenseAddGrad(op, out_grad):
  sp_indices = op.inputs[0]
  # (sparse_indices, sparse_values, sparse_shape, dense)
  return (None, array_ops.gather_nd(out_grad, sp_indices), None, out_grad) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:6,代码来源:sparse_grad.py

示例5: _SparseReduceSumGrad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def _SparseReduceSumGrad(op, out_grad):
  """Similar to gradient for the Sum Op (i.e. tf.reduce_sum())."""
  sp_indices = op.inputs[0]
  sp_shape = op.inputs[2]
  output_shape_kept_dims = math_ops.reduced_shape(sp_shape, op.inputs[3])
  out_grad_reshaped = array_ops.reshape(out_grad, output_shape_kept_dims)
  scale = sp_shape // math_ops.to_int64(output_shape_kept_dims)
  # (sparse_indices, sparse_values, sparse_shape, reduction_axes)
  return (None, array_ops.gather_nd(out_grad_reshaped, sp_indices // scale),
          None, None) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:12,代码来源:sparse_grad.py

示例6: _SparseDenseCwiseMulOrDivGrad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat(
      [array_ops.ones(num_added_dims, ops.dtypes.int64), y_shape], 0)

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(scaled_indices,
                                   array_ops.concat([[0], num_added_dims], 0),
                                   [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:34,代码来源:sparse_grad.py

示例7: _ScatterNdGrad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def _ScatterNdGrad(op, grad):
  indices = op.inputs[0]
  updates_grad = array_ops.gather_nd(grad, indices)
  return [None, updates_grad, None] 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:6,代码来源:array_grad.py

示例8: _define_distance_to_clusters

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def _define_distance_to_clusters(self, data):
    """Defines the Mahalanobis distance to the assigned Gaussian."""
    # TODO(xavigonzalvo): reuse (input - mean) * cov^-1 * (input -
    # mean) from log probability function.
    self._all_scores = []
    for shard in data:
      all_scores = []
      shard = array_ops.expand_dims(shard, 0)
      for c in xrange(self._num_classes):
        if self._covariance_type == FULL_COVARIANCE:
          cov = self._covs[c, :, :]
        elif self._covariance_type == DIAG_COVARIANCE:
          cov = array_ops.diag(self._covs[c, :])
        inverse = linalg_ops.matrix_inverse(cov + self._min_var)
        inv_cov = array_ops.tile(
            array_ops.expand_dims(inverse, 0),
            array_ops.stack([self._num_examples, 1, 1]))
        diff = array_ops.transpose(shard - self._means[c, :, :], perm=[1, 0, 2])
        m_left = math_ops.matmul(diff, inv_cov)
        all_scores.append(
            math_ops.sqrt(
                math_ops.matmul(
                    m_left, array_ops.transpose(
                        diff, perm=[0, 2, 1]))))
      self._all_scores.append(
          array_ops.reshape(
              array_ops.concat(all_scores, 1),
              array_ops.stack([self._num_examples, self._num_classes])))

    # Distance to the associated class.
    self._all_scores = array_ops.concat(self._all_scores, 0)
    assignments = array_ops.concat(self.assignments(), 0)
    rows = math_ops.to_int64(math_ops.range(0, self._num_examples))
    indices = array_ops.concat(
        [array_ops.expand_dims(rows, 1), array_ops.expand_dims(assignments, 1)],
        1)
    self._scores = array_ops.gather_nd(self._all_scores, indices) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:39,代码来源:gmm_ops.py

示例9: _SparseDenseCwiseMulOrDivGrad

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def _SparseDenseCwiseMulOrDivGrad(op, grad, is_mul):
  """Common code for SparseDenseCwise{Mul,Div} gradients."""
  x_indices = op.inputs[0]
  x_shape = op.inputs[2]
  y = op.inputs[3]

  y_shape = math_ops.to_int64(array_ops.shape(y))
  num_added_dims = array_ops.expand_dims(
      array_ops.size(x_shape) - array_ops.size(y_shape), 0)
  augmented_y_shape = array_ops.concat(0, [array_ops.ones(num_added_dims,
                                                          ops.dtypes.int64),
                                           y_shape])

  scaling = x_shape // augmented_y_shape
  scaled_indices = x_indices // scaling
  scaled_indices = array_ops.slice(scaled_indices,
                                   array_ops.concat(0, [[0], num_added_dims]),
                                   [-1, -1])
  dense_vals = array_ops.gather_nd(y, scaled_indices)

  if is_mul:
    dx = grad * dense_vals
    dy_val = grad * op.inputs[1]
  else:
    dx = grad / dense_vals
    dy_val = grad * (-op.inputs[1] / math_ops.square(dense_vals))
  # indices can repeat after scaling, so we can't use sparse_to_dense().
  dy = sparse_ops.sparse_add(
      array_ops.zeros_like(y),
      sparse_tensor.SparseTensor(scaled_indices, dy_val, y_shape))

  # (sp_indices, sp_vals, sp_shape, dense)
  return (None, dx, None, dy) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:35,代码来源:sparse_grad.py

示例10: next_inputs

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def next_inputs(self, time, outputs, state, sample_ids, name=None):
    with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs",
                        [time, outputs, state, sample_ids]):
      (finished, base_next_inputs, state) = (
          super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
              time=time,
              outputs=outputs,
              state=state,
              sample_ids=sample_ids,
              name=name))

      def maybe_sample():
        """Perform scheduled sampling."""
        where_sampling = math_ops.cast(
            array_ops.where(sample_ids > -1), dtypes.int32)
        where_not_sampling = math_ops.cast(
            array_ops.where(sample_ids <= -1), dtypes.int32)
        sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
        inputs_not_sampling = array_ops.gather_nd(
            base_next_inputs, where_not_sampling)
        sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
        base_shape = array_ops.shape(base_next_inputs)
        return (array_ops.scatter_nd(indices=where_sampling,
                                     updates=sampled_next_inputs,
                                     shape=base_shape)
                + array_ops.scatter_nd(indices=where_not_sampling,
                                       updates=inputs_not_sampling,
                                       shape=base_shape))

      all_finished = math_ops.reduce_all(finished)
      next_inputs = control_flow_ops.cond(
          all_finished, lambda: base_next_inputs, maybe_sample)
      return (finished, next_inputs, state) 
开发者ID:NVIDIA,项目名称:OpenSeq2Seq,代码行数:35,代码来源:helper.py

示例11: next_inputs

# 需要导入模块: from tensorflow.python.ops import array_ops [as 别名]
# 或者: from tensorflow.python.ops.array_ops import gather_nd [as 别名]
def next_inputs(self, time, outputs, state, sample_ids, name=None):
        """Gets the outputs for next step."""
        with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperNextInputs",
                            [time, outputs, state, sample_ids]):
            (finished, base_next_inputs, state) = (
                super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
                    time=time,
                    outputs=outputs,
                    state=state,
                    sample_ids=sample_ids,
                    name=name))

            def maybe_sample():
                """Perform scheduled sampling."""
                where_sampling = math_ops.cast(
                    array_ops.where(sample_ids > -1), dtypes.int32)
                where_not_sampling = math_ops.cast(
                    array_ops.where(sample_ids <= -1), dtypes.int32)
                sample_ids_sampling = array_ops.gather_nd(sample_ids, where_sampling)
                inputs_not_sampling = array_ops.gather_nd(
                    base_next_inputs, where_not_sampling)

                if self._embedding_args_cnt == 1:
                    sampled_next_inputs = self._embedding_fn(
                        sample_ids_sampling)
                elif self._embedding_args_cnt == 2:
                    # Prepare the position embedding of the next step
                    times = tf.ones(self._batch_size, dtype=tf.int32) * (time+1)
                    sampled_next_inputs = self._embedding_fn(
                        sample_ids_sampling, times)
                base_shape = array_ops.shape(base_next_inputs)
                return (array_ops.scatter_nd(indices=where_sampling,
                                             updates=sampled_next_inputs,
                                             shape=base_shape)
                        + array_ops.scatter_nd(indices=where_not_sampling,
                                               updates=inputs_not_sampling,
                                               shape=base_shape))

            all_finished = math_ops.reduce_all(finished)
            next_inputs = control_flow_ops.cond(
                all_finished, lambda: base_next_inputs, maybe_sample)
            return (finished, next_inputs, state) 
开发者ID:qkaren,项目名称:Counterfactual-StoryRW,代码行数:44,代码来源:tf_helpers.py


注:本文中的tensorflow.python.ops.array_ops.gather_nd方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。