当前位置: 首页>>代码示例>>Python>>正文


Python gen_array_ops.identity函数代码示例

本文整理汇总了Python中tensorflow.python.ops.gen_array_ops.identity函数的典型用法代码示例。如果您正苦于以下问题:Python identity函数的具体用法?Python identity怎么用?Python identity使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了identity函数的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _ensure_unique_tensor_objects

def _ensure_unique_tensor_objects(parameter_positions, args):
  """Make each of the parameter_positions in args a unique ops.Tensor object.

  Ensure that each parameter is treated independently.
  For example:

  def f(x, y): return x * y
  g = gradients_function(f)
  one = tf.constant(1.)

  g(one, one) should return [1., 1.]
  (even though the two arguments are the same Tensor object).

  Args:
    parameter_positions: List of indices into args defining the arguments to
      differentiate against.
    args: A list of arguments to the function to be differentiated.

  Returns:
    args, possibly edited in-place.
  """
  s = set()
  for (i, t) in enumerate(args):
    if i in parameter_positions:
      tid = ops.tensor_id(t)
      if tid in s:
        args[i] = gen_array_ops.identity(args[i])
      else:
        s.add(tid)
  return args
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:30,代码来源:backprop.py

示例2: decorated

  def decorated(*args, **kwds):
    """Computes the value and gradient of the decorated function."""
    parameter_positions = _get_arg_spec(f, params, args)
    assert not kwds, "The gradient function can't take keyword arguments."
    this_tape = tape.push_new_tape(persistent=persistent)
    try:
      sources = []
      args = [
          ops.convert_to_tensor(args[i])
          if i in parameter_positions else args[i]
          for i in range(len(args))
      ]
      args = _ensure_unique_tensor_objects(parameter_positions, args)
      for i in parameter_positions:
        sources.append(args[i])
        tape.watch(this_tape, args[i])
      result = f(*args)
      if result is None:
        raise ValueError("Cannot differentiate a function that returns None; "
                         "did you forget to return a value from {}?".format(
                             f.__name__))
      flat_result = nest.flatten(result)
      flat_result = [gen_array_ops.identity(x) for x in flat_result]
      result = nest.pack_sequence_as(result, flat_result)
    finally:
      tape.pop_tape(this_tape)
    def vjp(dy=None):
      if dy is not None:
        dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
      return imperative_grad.imperative_grad(
          this_tape, nest.flatten(result), sources, output_gradients=dy)

    return result, vjp
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:33,代码来源:backprop.py

示例3: compute_gradients

  def compute_gradients(self, loss, *args, **kwargs):
    # Record current global step for worker.
    with ops.colocate_with(loss):
      self._local_step = training_util.get_global_step() + 0

    with ops.control_dependencies([self._local_step]):
      loss = gen_array_ops.identity(loss)
      return self._opt.compute_gradients(loss, *args, **kwargs)
开发者ID:sandeepgupta2k4,项目名称:tensorflow,代码行数:8,代码来源:drop_stale_gradient_optimizer.py

示例4: decorated

  def decorated(*args, **kwargs):
    """Decorated function with custom gradient."""
    if context.in_graph_mode():
      if kwargs:
        raise ValueError(
            "custom_gradient in graph mode doesn't support keyword arguments.")
      name = "CustomGradient-%s" % tf_ops.uid()
      args = [tf_ops.convert_to_tensor(x) for x in args]
      result, grad_fn = f(*args)
      flat_result = nest.flatten(result)
      all_tensors = flat_result + args

      @tf_ops.RegisterGradient(name)
      def internal_grad_fn(unused_op, *result_grads):  # pylint: disable=unused-variable
        gradients = nest.flatten(grad_fn(*result_grads[:len(flat_result)]))
        # Need to return one value per input to the IdentityN, so pad the
        # gradients of the inputs of the custom_gradient function with the
        # gradients of the outputs as well.
        return ([None] * len(flat_result)) + gradients

      with tf_ops.get_default_graph().gradient_override_map(
          {"IdentityN": name}):
        all_tensors = array_ops.identity_n(all_tensors)
      return nest.pack_sequence_as(
          structure=result, flat_sequence=all_tensors[:len(flat_result)])

    input_tensors = [tf_ops.convert_to_tensor(x) for x in args]

    with tape.stop_recording():
      result, grad_fn = f(*args, **kwargs)
      flat_result = nest.flatten(result)
      # TODO(apassos) consider removing the identity below.
      flat_result = [gen_array_ops.identity(x) for x in flat_result]

    def actual_grad_fn(*outputs):
      return nest.flatten(grad_fn(*outputs))

    tape.record_operation(
        f.__name__,
        flat_result,
        input_tensors,
        actual_grad_fn)
    flat_result = list(flat_result)
    return nest.pack_sequence_as(result, flat_result)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:44,代码来源:custom_gradient.py

示例5: _eager_mode_decorator

def _eager_mode_decorator(f, *args, **kwargs):
  """Implement custom gradient decorator for eager mode."""
  with backprop.GradientTape() as tape:
    result, grad_fn = f(*args, **kwargs)
  all_inputs = list(args) + list(kwargs.values())
  # The variables that grad_fn needs to return gradients for are the set of
  # variables used that are *not* part of the inputs.
  variables = [v for v in set(tape.watched_variables()) if v not in all_inputs]
  grad_argspec = tf_inspect.getfullargspec(grad_fn)
  if (variables and ("variables" not in grad_argspec.args) and
      not grad_argspec.varkw):
    raise TypeError("If using @custom_gradient with a function that "
                    "uses variables, then grad_fn must accept a keyword "
                    "argument 'variables'.")
  flat_result = nest.flatten(result)
  # TODO(apassos) consider removing the identity below.
  flat_result = [gen_array_ops.identity(x) for x in flat_result]

  input_tensors = [ops.convert_to_tensor(x) for x
                   in list(args) + list(variables)]
  arg_count = len(args)
  def actual_grad_fn(*result_grads):
    """Custom grad fn wrapper."""
    if variables:
      input_grads, variable_grads = grad_fn(*result_grads, variables=variables)
      if len(variable_grads) != len(variables):
        raise ValueError("Must return gradient for each variable from "
                         "@custom_gradient grad_fn.")
    else:
      input_grads = grad_fn(*result_grads)
      variable_grads = []
    flat_grads = nest.flatten(input_grads)
    if len(flat_grads) != arg_count:
      raise ValueError(
          "custom_gradient function expected to return", arg_count,
          "gradients but returned", len(flat_grads), "instead.")
    return nest.flatten(input_grads) + variable_grads

  tape_lib.record_operation(f.__name__, flat_result, input_tensors,
                            actual_grad_fn)
  flat_result = list(flat_result)
  return nest.pack_sequence_as(result, flat_result)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:42,代码来源:custom_gradient.py

示例6: benchmark_tf_gradient_forward_identity

 def benchmark_tf_gradient_forward_identity(self):
   with backprop.GradientTape() as tape:
     m = self._m_2
     tape.watch(m)
     self._run(lambda: gen_array_ops.identity(m), 30000)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:5,代码来源:benchmarks_test.py

示例7: benchmark_tf_identity

 def benchmark_tf_identity(self):
   m = self._m_2
   self._run(lambda: gen_array_ops.identity(m), 30000)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:3,代码来源:benchmarks_test.py

示例8: _DropGradientOp

 def _DropGradientOp():
   return gen_array_ops.identity(1.0)
开发者ID:sandeepgupta2k4,项目名称:tensorflow,代码行数:2,代码来源:drop_stale_gradient_optimizer.py

示例9: _AcceptGradientOp

 def _AcceptGradientOp():
   with ops.control_dependencies(
       [self._opt.apply_gradients(
           grads_and_vars, global_step=global_step, name=name)]):
     return gen_array_ops.identity(0.0)
开发者ID:sandeepgupta2k4,项目名称:tensorflow,代码行数:5,代码来源:drop_stale_gradient_optimizer.py

示例10: benchmark_tf_gradient_function_no_op

 def benchmark_tf_gradient_function_no_op(self):
   with context.device(CPU):
     m = gen_array_ops.identity(self._m_2)
     self._run(lambda: backprop.gradients_function(lambda x: x, [0])(m), 30000)
开发者ID:becster,项目名称:tensorflow,代码行数:4,代码来源:benchmarks_test.py

示例11: benchmark_slowpath_tf_identity

 def benchmark_slowpath_tf_identity(self):
   self._run(lambda: gen_array_ops.identity(1), 30000)
开发者ID:becster,项目名称:tensorflow,代码行数:2,代码来源:benchmarks_test.py


注:本文中的tensorflow.python.ops.gen_array_ops.identity函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。