当前位置: 首页>>代码示例>>Python>>正文


Python tape.push_new_tape函数代码示例

本文整理汇总了Python中tensorflow.python.eager.tape.push_new_tape函数的典型用法代码示例。如果您正苦于以下问题:Python push_new_tape函数的具体用法?Python push_new_tape怎么用?Python push_new_tape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了push_new_tape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: grad_fn

  def grad_fn(*args):
    """Computes the gradient of the wrapped function."""
    tape.push_new_tape()
    try:
      end_node = f(*args)
      if end_node is None:
        raise ValueError("Cannot differentiate a function that returns None; "
                         "did you forget to return a value from {}?".format(
                             f.__name__))
    finally:
      popped_tape = tape.pop_tape()
    # Sorting variables by id, which is monotonically increasing in construction
    # order. This ensures unique order across executions.
    variables = list(sorted(popped_tape.watched_variables(),
                            key=lambda v: v.handle._id))  # pylint: disable=protected-access
    sources = [x.handle for x in variables]

    if not sources:
      raise ValueError("No trainable variables were accessed while the "
                       "function was being computed.")
    grad = imperative_grad.imperative_grad(_default_vspace,
                                           popped_tape,
                                           nest.flatten(end_node),
                                           sources)
    return end_node, list(zip(grad, variables))
开发者ID:autodrive,项目名称:tensorflow,代码行数:25,代码来源:backprop.py

示例2: decorated

 def decorated(*args, **kwds):
   """Computes the value and gradient of the decorated function."""
   parameter_positions = _get_arg_spec(f, params, args)
   assert not kwds, "The gradient function can't take keyword arguments."
   tape.push_new_tape()
   try:
     sources = []
     args = [
         ops.convert_to_tensor(args[i])
         if i in parameter_positions else args[i]
         for i in range(len(args))
     ]
     args = _ensure_unique_tensor_objects(parameter_positions, args)
     for i in parameter_positions:
       sources.append(args[i])
       tape.watch(args[i])
     result = f(*args)
     if result is None:
       raise ValueError("Cannot differentiate a function that returns None; "
                        "did you forget to return a value from {}?".format(
                            f.__name__))
     flat_result = nest.flatten(result)
     flat_result = [gen_array_ops.identity(x) for x in flat_result]
     result = nest.pack_sequence_as(result, flat_result)
   finally:
     t = tape.pop_tape()
   def vjp(dy=None):
     if dy is not None:
       dy = [ops.convert_to_tensor(x) for x in nest.flatten(dy)]
     return imperative_grad.imperative_grad(
         _default_vspace, t, nest.flatten(result), sources,
         output_gradients=dy)
   return result, vjp
开发者ID:SylChan,项目名称:tensorflow,代码行数:33,代码来源:backprop.py

示例3: grad_fn

 def grad_fn(*args):
   """Computes the gradient of the wrapped function."""
   tape.push_new_tape()
   end_node = f(*args)
   variables = tape.top_tape_watched_variables()
   sources = [x.handle for x in variables]
   grad = imperative_grad(end_node, sources)
   return end_node, list(zip(grad, variables))
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:8,代码来源:backprop.py

示例4: grad_fn

  def grad_fn(*args):
    """Computes the gradient of the wrapped function."""
    tape.push_new_tape()
    end_node = f(*args)
    variables = tape.top_tape_watched_variables()
    sources = [x.handle for x in variables]

    if not sources:
      raise ValueError("no trainable variables were accessed while the "
                       "function was being computed.")
    grad = imperative_grad.imperative_grad(_default_vspace,
                                           tape.pop_tape(),
                                           nest.flatten(end_node),
                                           sources)
    return end_node, list(zip(grad, variables))
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:15,代码来源:backprop.py

示例5: _defun_internal

def _defun_internal(name, func, args, kwds):
  """Defines and returns graph-mode version of func."""
  graph_key = ops.get_default_graph()._graph_key  # pylint: disable=protected-access
  with context.graph_mode():
    captures = {}
    tmp_graph = CapturingGraph(captures)
    # Inherit the graph key, since this is used for matching variables in
    # optimizers.
    tmp_graph._graph_key = graph_key  # pylint: disable=protected-access
    # Copy the graph collections to ensure summaries and other things work. This
    # lets the function access (but not mutate) collections of the containing
    # graph, such as the global step and the summary writer collections.
    curr_graph = ops.get_default_graph()
    for collection in curr_graph.collections:
      tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
          collection)
    with tmp_graph.as_default():
      func_inputs = _get_defun_inputs(args)

      with capture_tensors(captures):
        this_tape = tape.push_new_tape()
        try:
          func_outputs = func(*func_inputs, **kwds)
        finally:
          tape.pop_tape(this_tape)
        variables = this_tape.watched_variables()

        # Returning a closed-over tensor as an output does not trigger a
        # call to convert_to_tensor, so we manually capture all such tensors.
        outputs_list = _flatten(func_outputs)
        func_def_outputs = [
            _convert_to_graph_tensor(x) for x in outputs_list if x is not None
        ]

      ids = list(sorted(captures.keys()))
      if ids:
        extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
      else:
        extra_inputs = []
        extra_placeholders = []
      output_shapes = tuple(
          x.shape if isinstance(x, ops.Tensor) else None
          for x in outputs_list)

  flat_inputs = [x for x in nest.flatten(func_inputs)
                 if isinstance(x, ops.Tensor)]
  all_inputs = flat_inputs + list(extra_placeholders)
  all_ignored_ops = frozenset(x.op for x in all_inputs)
  fname = _inference_name(name)
  operations = tuple(x for x in tmp_graph.get_operations()
                     if x not in all_ignored_ops)
  # Register any other functions defined in the graph
  # TODO(ashankar): Oh lord, forgive me for this lint travesty.
  if context.in_eager_mode():
    for f in tmp_graph._functions.values():  # pylint: disable=protected-access
      # TODO(ashankar): What about the gradient registry?
      _register(f._c_func)  # pylint: disable=protected-access
  return GraphModeFunction(
      fname, all_inputs, extra_inputs, tmp_graph, operations, func_def_outputs,
      func_outputs, output_shapes, variables)
开发者ID:ChengYuXiang,项目名称:tensorflow,代码行数:60,代码来源:function.py

示例6: decorated

 def decorated(*args, **kwds):
   """Computes the value and gradient of the decorated function."""
   dy = kwds.pop("dy", None)
   if dy is not None:
     dy = ops.convert_to_tensor(dy)
   assert not kwds, "The gradient function can't take keyword arguments."
   tape.push_new_tape()
   sources = []
   args = [ops.convert_to_tensor(x) for x in args]
   for i in parameter_positions:
     sources.append(args[i])
     tape.watch(args[i])
   result = f(*args)
   return result, imperative_grad(
       result,
       sources,
       output_gradients=dy)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:17,代码来源:backprop.py

示例7: grad_fn

 def grad_fn(*args, **kwds):
   """Computes the gradient of the wrapped function."""
   tape.push_new_tape()
   end_node = f(*args)
   start_node = tape.pop_tape()
   ag_core.active_progenitors.remove(start_node)
   if not ag_core.isnode(end_node):
     raise ValueError(
         "Target not part of a computation being traced. %s" % end_node)
   if start_node not in end_node.progenitors:
     raise ValueError("Target not derived from source. %s %s" %
                      (end_node.progenitors, repr(start_node)))
   output_gradients = kwds.get("output_gradients", None)
   if output_gradients is None:
     output_gradients = _ones(end_node.shape, end_node.dtype)
   grad = ag_core.backward_pass(output_gradients, end_node, start_node)
   return end_node.value, _aggregate_grads(grad.gradients)
开发者ID:keveman,项目名称:tensorflow,代码行数:17,代码来源:backprop.py

示例8: testTapeGC

  def testTapeGC(self):
    # TODO(apassos) figure out how to test this without using tape internal
    # APIs.
    tape.push_new_tape()

    def f():
      x = constant_op.constant(1.0)
      tape.watch(x)
      x = gradient_is_constant(x)
      x = gradient_is_constant(x)
      x = gradient_is_constant(x)

    f()
    t = tape.pop_tape()
    tensor_tape, op_tape = t.export()
    self.assertEqual(len(tensor_tape), 1)  # The watched tensor will remain on
                                           # the tape
    self.assertEqual(len(op_tape), 0)  # No operations should remain on the tape
开发者ID:DjangoPeng,项目名称:tensorflow,代码行数:18,代码来源:tape_test.py

示例9: _push_tape

 def _push_tape(self, existing_tape=False):
   if self._recording:
     raise ValueError("Tape is already recording.")
   if existing_tape:
     if self._tape is None:
       raise ValueError("There is no existing tape.")
     tape.push_tape(self._tape)
   else:
     self._tape = tape.push_new_tape(persistent=self._persistent)
   self._recording = True
开发者ID:Eagle732,项目名称:tensorflow,代码行数:10,代码来源:backprop.py

示例10: _push_tape

 def _push_tape(self):
   if self._recording:
     raise ValueError("Tape is already recording.")
   if self._tape is None:
     self._tape = tape.push_new_tape(
         persistent=self._persistent,
         watch_accessed_variables=self._watch_accessed_variables)
   else:
     tape.push_tape(self._tape)
   self._recording = True
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:10,代码来源:backprop.py

示例11: decorated

 def decorated(*args, **kwds):
   """Computes the value and gradient of the decorated function."""
   dy = kwds.pop("dy", None)
   if dy is not None:
     dy = ops.convert_to_tensor(dy)
   assert not kwds, "The gradient function can't take keyword arguments."
   tape.push_new_tape()
   sources = []
   args = [
       ops.convert_to_tensor(args[i]) if i in parameter_positions else args[i]
       for i in range(len(args))
   ]
   args = _ensure_unique_tensor_objects(parameter_positions, args)
   for i in parameter_positions:
     sources.append(args[i])
     tape.watch(args[i])
   result = f(*args)
   return result, imperative_grad.imperative_grad(
       _default_vspace, nest.flatten(result), sources,
       output_gradients=nest.flatten(dy) if dy is not None else None)
开发者ID:rajeev921,项目名称:tensorflow,代码行数:20,代码来源:backprop.py

示例12: grad_fn

  def grad_fn(*args):
    """Computes the gradient of the wrapped function."""
    tape.push_new_tape()
    try:
      end_node = f(*args)
      if end_node is None:
        raise ValueError("Cannot differentiate a function that returns None; "
                         "did you forget to return a value from {}?".format(
                             f.__name__))
    finally:
      popped_tape = tape.pop_tape()
      variables = popped_tape.watched_variables()
    sources = [x.handle for x in variables]

    if not sources:
      raise ValueError("No trainable variables were accessed while the "
                       "function was being computed.")
    grad = imperative_grad.imperative_grad(_default_vspace,
                                           popped_tape,
                                           nest.flatten(end_node),
                                           sources)
    return end_node, list(zip(grad, variables))
开发者ID:SylChan,项目名称:tensorflow,代码行数:22,代码来源:backprop.py

示例13: grad_fn

  def grad_fn(*args, **kwds):
    """Computes the gradient of the wrapped function."""
    this_tape = tape.push_new_tape()
    try:
      end_node = f(*args, **kwds)
      if end_node is None:
        raise ValueError("Cannot differentiate a function that returns None; "
                         "did you forget to return a value from {}?".format(
                             f.__name__))
    finally:
      tape.pop_tape(this_tape)
    # Note: variables are returned in construction order. This ensures unique
    # order across executions.
    variables = this_tape.watched_variables()
    if not variables:
      raise ValueError("No trainable variables were accessed while the "
                       "function was being computed.")

    sources = [v.handle for v in variables]
    grad = imperative_grad.imperative_grad(this_tape, nest.flatten(end_node),
                                           sources)
    return end_node, list(zip(grad, variables))
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:22,代码来源:backprop.py

示例14: _defun_internal

def _defun_internal(name, func, args, kwds):
  """Defines and returns graph-mode version of func."""
  container_prefix = ops.get_default_graph()._container_prefix  # pylint: disable=protected-access
  with context.graph_mode():
    captures = {}
    tmp_graph = CapturingGraph(captures)
    # Inherit the container prefix, since this is used for error checking when
    # isolating eager execution (the container prefix at creation must match the
    # container prefix when used, and variables accessed in the defun will be
    # used in the outside context).
    tmp_graph._container_prefix = container_prefix  # pylint: disable=protected-access
    # Copy the graph collections to ensure summaries and other things work. This
    # lets the function access (but not mutate) collections of the containing
    # graph, such as the global step and the summary writer collections.
    curr_graph = ops.get_default_graph()
    for collection in curr_graph.collections:
      tmp_graph.get_collection_ref(collection)[:] = curr_graph.get_collection(
          collection)
    with tmp_graph.as_default():
      func_inputs = _get_defun_inputs(args)

      with capture_tensors(captures):
        tape.push_new_tape()
        try:
          func_outputs = func(*func_inputs, **kwds)
        finally:
          variables = tape.pop_tape().watched_variables()
      ids = list(sorted(captures.keys()))
      if ids:
        extra_inputs, extra_placeholders = zip(* [captures[x] for x in ids])
      else:
        extra_inputs = []
        extra_placeholders = []
      outputs_list = nest.flatten(func_outputs)
      output_shapes = [x.shape for x in outputs_list if x is not None]

  flat_inputs = [
      x for x in nest.flatten(func_inputs) if isinstance(x, ops.Tensor)
  ]
  all_inputs = flat_inputs + list(extra_placeholders)

  func_def_outputs = [x for x in outputs_list if x is not None]
  inference_function_def = make_function_def(
      tmp_graph, tmp_graph.get_operations(), all_inputs, func_def_outputs)
  # Register any other functions defined in the graph
  # TODO(ashankar): Oh lord, forgive me for this lint travesty.
  for f in tmp_graph._functions.values():  # pylint: disable=protected-access
    # TODO(ashankar): What about the gradient registry?
    _register_with_name(f.name, f.definition)
  _register_with_name(_inference_name(name), inference_function_def)

  return GraphModeFunction(
      all_inputs,
      extra_inputs,
      inference_function_def,
      tmp_graph,
      tmp_graph.get_operations(),
      func_outputs,
      _map_sequence_obj_to_idx(func_def_outputs),
      output_shapes,
      variables=variables)
开发者ID:TianyouLi,项目名称:tensorflow,代码行数:61,代码来源:function.py

示例15: __enter__

 def __enter__(self):
   self._tape = tape.push_new_tape(persistent=self._persistent)
   return self
开发者ID:andrewharp,项目名称:tensorflow,代码行数:3,代码来源:backprop.py


注:本文中的tensorflow.python.eager.tape.push_new_tape函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。