当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.IndexedSlices方法代码示例

本文整理汇总了Python中tensorflow.IndexedSlices方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.IndexedSlices方法的具体用法?Python tensorflow.IndexedSlices怎么用?Python tensorflow.IndexedSlices使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.IndexedSlices方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _clip_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def _clip_gradients(self, grad):
    """Clips gradients if the hyperparameter `gradient_clip_norm` requires it.

    Sparse tensors, in the form of IndexedSlices returned for the
    gradients of embeddings, require special handling.

    Args:
      grad: Gradient Tensor, IndexedSlices, or None.

    Returns:
      Optionally clipped gradient.
    """
    if grad is not None and self.hyperparams.gradient_clip_norm > 0:
      logging.info('Clipping gradient %s', grad)
      if isinstance(grad, tf.IndexedSlices):
        tmp = tf.clip_by_norm(grad.values, self.hyperparams.gradient_clip_norm)
        return tf.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        return tf.clip_by_norm(grad, self.hyperparams.gradient_clip_norm)
    else:
      return grad 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:23,代码来源:graph_builder.py

示例2: _add_gradients_summaries

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def _add_gradients_summaries(grads_and_vars):
  """Add histogram summaries to gradients.

  Note: The summaries are also added to the SUMMARIES collection.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The _list_ of the added summaries for grads_and_vars.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, tf.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(tf.summary.histogram(var.op.name + ':gradient',
                                            grad_values))
      summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
                                            tf.global_norm([grad_values])))
    else:
      tf.logging.info('Var %s has no gradient', var.op.name)
  return summaries 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:model_deploy.py

示例3: add_histogram_summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def add_histogram_summary(summary_name: str, value: tf.Tensor):
    """
    Add histogram summary and also replace NaN in the value if needed on runtime

    Parameters
    ----------
    summary_name
        name of the summary
    value
        histogram value to add to summary with name

    """
    if isinstance(value, tf.IndexedSlices):
        tf.summary.histogram(
            summary_name, tf_ops.replace_nan_with_zeros(value.values))
    else:
        tf.summary.histogram(summary_name, tf_ops.replace_nan_with_zeros(value)) 
开发者ID:audi,项目名称:nucleus7,代码行数:19,代码来源:model_utils.py

示例4: compute_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def compute_gradients(self, loss, var_list=None,
                          gate_gradients=tf.train.Optimizer.GATE_OP,
                          aggregation_method=None,
                          colocate_gradients_with_ops=False,
                          grad_loss=None):
        grads_and_vars = self._optimizer.compute_gradients(
            loss * self._scale, var_list, gate_gradients,
            aggregation_method, colocate_gradients_with_ops, grad_loss)

        scaled_grads_and_vars = []

        for grad, var in grads_and_vars:
            if isinstance(grad, tf.IndexedSlices):
                grad = tf.IndexedSlices(grad.values / self._scale,
                                        grad.indices,  grad.dense_shape)
            elif isinstance(grad, tf.Tensor):
                grad = grad / self._scale

            scaled_grads_and_vars.append((grad, var))

        return scaled_grads_and_vars 
开发者ID:THUNLP-MT,项目名称:THUMT,代码行数:23,代码来源:optimizers.py

示例5: sparse_to_dense_grads

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def sparse_to_dense_grads(grads_and_vars):
  """Convert sparse gradients to dense gradients.

  All sparse gradients, which are represented as instances of tf.IndexedSlices,
  are converted to dense Tensors. Dense gradients, which are represents as
  Tensors, are unchanged.

  The purpose of this conversion is that for small embeddings, which are used by
  this model, applying dense gradients with the AdamOptimizer is faster than
  applying sparse gradients.

  Args
    grads_and_vars: A list of (gradient, variable) tuples. Each gradient can
      be a Tensor or an IndexedSlices. Tensors are unchanged, and IndexedSlices
      are converted to dense Tensors.
  Returns:
    The same list of (gradient, variable) as `grads_and_vars`, except each
    IndexedSlices gradient is converted to a Tensor.
  """

  # Calling convert_to_tensor changes IndexedSlices into Tensors, and leaves
  # Tensors unchanged.
  return [(tf.convert_to_tensor(g), v) for g, v in grads_and_vars] 
开发者ID:IntelAI,项目名称:models,代码行数:25,代码来源:neumf_model.py

示例6: run_sparse_sample

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def run_sparse_sample(iterations, expected, optimizer):
    var_0 = tf.Variable([1.0, 2.0])
    var_1 = tf.Variable([3.0, 4.0])

    grad_0 = tf.IndexedSlices(
        tf.constant([0.1, 0.2]), tf.constant([0, 1]), tf.constant([2])
    )
    grad_1 = tf.IndexedSlices(
        tf.constant([0.3, 0.4]), tf.constant([0, 1]), tf.constant([2])
    )

    grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1]))

    for _ in range(iterations):
        optimizer.apply_gradients(grads_and_vars)

    np.testing.assert_allclose(var_0.read_value(), expected[0], atol=2e-4)
    np.testing.assert_allclose(var_1.read_value(), expected[1], atol=2e-4) 
开发者ID:tensorflow,项目名称:addons,代码行数:20,代码来源:novograd_test.py

示例7: run_sparse_sample

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def run_sparse_sample(iterations, optimizer, seed=0x2019):
    np.random.seed(seed)
    tf.random.set_seed(seed)

    val_0 = np.random.random((2,))
    val_1 = np.random.random((2,))

    var_0 = tf.Variable(val_0, dtype=tf.dtypes.float32)
    var_1 = tf.Variable(val_1, dtype=tf.dtypes.float32)

    grad_0 = tf.IndexedSlices(
        tf.constant([np.random.standard_normal()]), tf.constant([0]), tf.constant([2]),
    )
    grad_1 = tf.IndexedSlices(
        tf.constant([np.random.standard_normal()]), tf.constant([1]), tf.constant([2]),
    )

    grads_and_vars = list(zip([grad_0, grad_1], [var_0, var_1]))

    for _ in range(iterations):
        optimizer.apply_gradients(grads_and_vars)

    return [val_0, val_1], [var_0, var_1] 
开发者ID:tensorflow,项目名称:addons,代码行数:25,代码来源:lookahead_test.py

示例8: matmul_right

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def matmul_right(self, x, adjoint=False, adjoint_arg=False, name="matmul"):  # pylint: disable=missing-docstring

    with self._name_scope(name):

      if isinstance(x, tf.IndexedSlices):
        return self._matmul_right_sparse(
            x, adjoint=adjoint, adjoint_arg=adjoint_arg)

      x = tf.convert_to_tensor(x, name="x")
      self._check_input_dtype(x)

      self_dim = -1 if adjoint else -2
      arg_dim = -2 if adjoint_arg else -1
      self.shape[self_dim].assert_is_compatible_with(x.get_shape()[arg_dim])

      return self._matmul_right(x, adjoint=adjoint, adjoint_arg=adjoint_arg) 
开发者ID:tensorflow,项目名称:kfac,代码行数:18,代码来源:linear_operator.py

示例9: layer_params_to_mat2d

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def layer_params_to_mat2d(vector):
  """Converts a vector shaped like layer parameters to a 2D matrix.

  In particular, we reshape the weights/filter component of the vector to be
  2D, flattening all leading (input) dimensions. If there is a bias component,
  we concatenate it to the reshaped weights/filter component.

  Args:
    vector: A Tensor or pair of Tensors shaped like layer parameters.

  Returns:
    A 2D Tensor with the same coefficients and the same output dimension.
  """
  if isinstance(vector, (tuple, list)):
    w_part, b_part = vector
    w_part_reshaped = tf.reshape(w_part, [-1, w_part.shape.as_list()[-1]])
    return tf.concat((w_part_reshaped, tf.reshape(b_part, [1, -1])), axis=0)
  elif isinstance(vector, tf.IndexedSlices):
    return vector
  else:  # Tensor or Tensor-like.
    return tf.reshape(vector, [-1, vector.shape.as_list()[-1]]) 
开发者ID:tensorflow,项目名称:kfac,代码行数:23,代码来源:utils.py

示例10: mat2d_to_layer_params

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def mat2d_to_layer_params(vector_template, mat2d):
  """Converts a canonical 2D matrix representation back to a vector.

  Args:
    vector_template: A Tensor or pair of Tensors shaped like layer parameters.
    mat2d: A 2D Tensor with the same shape as the value of
        layer_params_to_mat2d(vector_template).

  Returns:
    A Tensor or pair of Tensors with the same coefficients as mat2d and the same
        shape as vector_template.
  """
  if isinstance(vector_template, (tuple, list)):
    w_part, b_part = mat2d[:-1], mat2d[-1]
    return tf.reshape(w_part, vector_template[0].shape), b_part
  elif isinstance(vector_template, tf.IndexedSlices):
    if not isinstance(mat2d, tf.IndexedSlices):
      raise TypeError(
          "If vector_template is an IndexedSlices, so should mat2d.")
    return mat2d
  else:
    return tf.reshape(mat2d, vector_template.shape) 
开发者ID:tensorflow,项目名称:kfac,代码行数:24,代码来源:utils.py

示例11: matmul_diag_sparse

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def matmul_diag_sparse(A_diag, B, name=None):  # pylint: disable=invalid-name
  """Computes matmul(A, B) where A is a diagonal matrix, B is sparse.

  Args:
    A_diag: diagonal entries of matrix A of shape [m, m].
    B: tf.IndexedSlices. Represents matrix of shape [m, n].
    name: str. Name of op.

  Returns:
    tf.IndexedSlices resulting from matmul(A, B).

  Raises:
    ValueError: If A_diag is not rank-1.
    ValueError: If B doesn't represent a matrix.
  """
  with tf.name_scope(name, "matmul_diag_sparse", [A_diag, B]):
    A_diag = tf.convert_to_tensor(A_diag)
    if A_diag.shape.ndims != 1:
      raise ValueError("A_diag must be a rank-1 Tensor.")
    if B.indices.shape.ndims != 1 or B.values.shape.ndims != 2:
      raise ValueError("B must represent a matrix. Found: %s." % B)
    a = tf.gather(A_diag, B.indices)
    a = tf.reshape(a, list(a.shape) + [1] * (B.values.shape.ndims - 1))
    return tf.IndexedSlices(a * B.values, B.indices, dense_shape=B.dense_shape) 
开发者ID:tensorflow,项目名称:kfac,代码行数:26,代码来源:utils.py

示例12: gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def gradients(self, objective, parameters):
    """Compute gradients of the objective with respect to the parameters.

    Args:
      objective: The objective op (e.g. output of self.objective())
      parameters: A list of tensors (the parameters to optimize)

    Returns:
      A list of tensors representing the gradient for each parameter,
        returned in the same order as the given list
    """
    grads = tf.gradients(objective, list(parameters))
    noisy_grads = []

    for grad in grads:
      if isinstance(grad, tf.IndexedSlices):
        noise = self.noise_stdev * tf.random_normal(tf.shape(grad.values))
        new_grad = tf.IndexedSlices(grad.values + noise, grad.indices)
      else:
        new_grad = grad + self.noise_stdev * tf.random_normal(grad.get_shape())
      noisy_grads.append(new_grad)

    return noisy_grads 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:25,代码来源:problem_generator.py

示例13: _deduplicate_indexed_slices

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def _deduplicate_indexed_slices(values, indices):
    """Sums `values` associated with any non-unique `indices`.
    Args:
      values: A `Tensor` with rank >= 1.
      indices: A one-dimensional integer `Tensor`, indexing into the first
      dimension of `values` (as in an IndexedSlices object).
    Returns:
      A tuple of (`summed_values`, `unique_indices`) where `unique_indices` is a
      de-duplicated version of `indices` and `summed_values` contains the sum of
      `values` slices associated with each unique index.
    """
    unique_indices, new_index_positions = tf.unique(indices)
    summed_values = tf.unsorted_segment_sum(
      values, new_index_positions,
      tf.shape(unique_indices)[0])
    return (summed_values, unique_indices) 
开发者ID:searobbersduck,项目名称:ELMo_Chin,代码行数:18,代码来源:training.py

示例14: clip_by_global_norm_summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables):
    # wrapper around tf.clip_by_global_norm that also does summary ops of norms

    # compute norms
    # use global_norm with one element to handle IndexedSlices vs dense
    norms = [tf.global_norm([t]) for t in t_list]

    # summary ops before clipping
    summary_ops = []
    for ns, v in zip(norms, variables):
        name = 'norm_pre_clip/' + v.name.replace(":", "_")
        summary_ops.append(tf.summary.scalar(name, ns))

    # clip 
    clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm)

    # summary ops after clipping
    norms_post = [tf.global_norm([t]) for t in clipped_t_list]
    for ns, v in zip(norms_post, variables):
        name = 'norm_post_clip/' + v.name.replace(":", "_")
        summary_ops.append(tf.summary.scalar(name, ns))

    summary_ops.append(tf.summary.scalar(norm_name, tf_norm))

    return clipped_t_list, tf_norm, summary_ops 
开发者ID:searobbersduck,项目名称:ELMo_Chin,代码行数:27,代码来源:training.py

示例15: _all_reduce_using_copy

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import IndexedSlices [as 别名]
def _all_reduce_using_copy(tensors_across_devices, use_mean):
  """Does an all-reduce of a list of tensors by copying to the current device.

  The tensors are copied to the current device and then reduced.

  Args:
    tensors_across_devices: A list of tensors, each on a different device.
    use_mean: Whether to take the mean of the tensors instead of a sum:
  Returns:
    A reduced tensor on the current device.
  """
  assert tensors_across_devices
  if isinstance(tensors_across_devices[0], tf.IndexedSlices):
    reduced_tensor = gradients_impl._AggregateIndexedSlicesGradients(
        tensors_across_devices)
    if use_mean:
      val = tf.multiply(reduced_tensor.values,
                        float(1. / len(tensors_across_devices)))
      reduced_tensor = tf.IndexedSlices(val, reduced_tensor.indices,
                                        reduced_tensor.dense_shape)
  else:
    reduced_tensor = tf.add_n(tensors_across_devices)
    if use_mean:
      reduced_tensor *= 1. / len(tensors_across_devices)
  return reduced_tensor 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:27,代码来源:batch_allreduce.py


注:本文中的tensorflow.IndexedSlices方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。