当前位置: 首页>>代码示例>>Python>>正文


Python ops.init_scope函数代码示例

本文整理汇总了Python中tensorflow.python.framework.ops.init_scope函数的典型用法代码示例。如果您正苦于以下问题:Python init_scope函数的具体用法?Python init_scope怎么用?Python init_scope使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了init_scope函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: container

  def container(self, container_name):
    """Returns a context manager that specifies the resource container to use.

    Overridden from `tf.Graph` to update both the init_scope container
    and the present inner container. This is necessary to make sure setting
    containers applies correctly both to created variables and to stateful
    ops.

    Args:
      container_name: container name string.

    Returns:
      A context manager for defining resource containers for stateful ops,
        yields the container name.
    """
    original_container = self._container
    # pylint: disable=protected-access
    with ops.init_scope():
      original_init_container = ops.get_default_graph()._container
    try:
      self._container = container_name
      with ops.init_scope():
        ops.get_default_graph()._container = container_name
      yield self._container
    finally:
      self._container = original_container
      with ops.init_scope():
        ops.get_default_graph()._container = original_init_container
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:28,代码来源:function.py

示例2: _real_mirrored_creator

 def _real_mirrored_creator(devices, *args, **kwargs):  # pylint: disable=g-missing-docstring
   value_list = []
   for i, d in enumerate(devices):
     with ops.device(d):
       if i > 0:
         # Give replicas meaningful distinct names:
         var0name = value_list[0].name.split(":")[0]
         # We append a / to variable names created on replicas with id > 0 to
         # ensure that we ignore the name scope and instead use the given
         # name as the absolute name of the variable.
         kwargs["name"] = "%s/replica_%d/" % (var0name, i)
         # Initialize replicas with the same value:
         if context.executing_eagerly() or ops.inside_function():
           with ops.init_scope():
             kwargs["initial_value"] = array_ops.identity(
                 value_list[0].value())
         else:
           def initial_value_fn(device=d):
             with ops.device(device):
               return array_ops.identity(value_list[0].initial_value)
           kwargs["initial_value"] = initial_value_fn
       with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
         v = next_creator(*args, **kwargs)
       assert not isinstance(v, values.TPUMirroredVariable)
       value_list.append(v)
   return value_list
开发者ID:jackd,项目名称:tensorflow,代码行数:26,代码来源:tpu_strategy.py

示例3: apply_gradients

  def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    var_list = [ v for _,v in grads_and_vars]
    d_vars = []
    g_vars = []
    all_grads = [ g for g, _ in grads_and_vars ]
    for grad,var in grads_and_vars:
        if var in self.gan.d_vars():
            d_vars += [var]
        elif var in self.gan.g_vars():
            g_vars += [var]
        else:
            raise("Couldn't find var in g_vars or d_vars")

    with ops.init_scope():
        self.optimizer._create_slots([v for g,v in grads_and_vars])

    self._prepare()
    d_grads = all_grads[:len(d_vars)]
    if self.config.type == 'sga':
        Jgrads = tf.gradients(d_grads, d_vars, grad_ys=d_grads, stop_gradients=d_vars) + [tf.zeros_like(g) for g in g_vars]
    elif self.config.type == 'magnitude':
        consensus_reg = [tf.square(g) for g in d_grads if g is not None]
        Jgrads = tf.gradients(consensus_reg, d_vars) + [tf.zeros_like(g) for g in g_vars]
    else:
        consensus_reg = 0.5 * sum(
                tf.reduce_sum(tf.square(g)) for g in d_grads if g is not None
        )
        Jgrads = tf.gradients(consensus_reg, d_vars, stop_gradients=d_vars) + [tf.zeros_like(g) for g in g_vars]
    new_grads = [g+jg*self._beta if jg is not None else g for g,v,jg in zip(all_grads, var_list, Jgrads)]
    new_grads_and_vars = list(zip(new_grads, var_list)).copy()
    return self.optimizer.apply_gradients(new_grads_and_vars, global_step=global_step, name=name)
开发者ID:255BITS,项目名称:hyperchamber-gan,代码行数:31,代码来源:consensus_optimizer.py

示例4: _check_same_graph

  def _check_same_graph(self):
    """Checks that the module is not being connect to multiple Graphs.

    An instance of a Sonnet module 'owns' the variables it contains, and permits
    seamless variable sharing. As such, connecting a single module instance to
    multiple Graphs is not possible - this function will raise an error should
    that occur.

    Raises:
      DifferentGraphError: if the module is connected to a different Graph than
        it was previously used in.
    """
    with ops.init_scope():
      # We need `init_scope` incase we're running inside a defun. In that case
      # what we want is information about where the function will be called not
      # where the function is being built.
      current_graph = tf.get_default_graph()
      will_call_in_eager_context = tf.executing_eagerly()

    if self._graph is None:
      self._graph = current_graph
      self._set_module_info()

    if not will_call_in_eager_context:
      # Same graph checks only make sense when calling from graph mode (in eager
      # mode there is a single process level context where all modules are
      # created).
      if self._graph != current_graph:
        raise DifferentGraphError("Cannot connect module to multiple Graphs.")
开发者ID:ccchang0111,项目名称:sonnet,代码行数:29,代码来源:base.py

示例5: apply_gradients

  def apply_gradients(self, grads_and_vars, name=None):
    """Apply gradients to variables.

    This is the second part of `minimize()`. It returns an `Operation` that
    applies gradients.

    Args:
      grads_and_vars: List of (gradient, variable) pairs as returned by
        `compute_gradients()`.
      name: Optional name for the returned operation.  Default to the name
        passed to the `Optimizer` constructor.

    Returns:
      An `Operation` that applies the specified gradients. If `global_step`
      was not None, that operation also increments `global_step`.

    Raises:
      TypeError: If `grads_and_vars` is malformed.
      ValueError: If none of the variables have gradients.
    """
    grads_and_vars = _filter_grads(grads_and_vars)
    var_list = [v for (_, v) in grads_and_vars]
    if distribution_strategy_context.has_distribution_strategy():
      reduced_grads = merge_grads(grads_and_vars)
      grads_and_vars = zip(reduced_grads, var_list)

    with ops.init_scope():
      self._prepare()
      self._create_slots(var_list)
    update_ops = []

    def update_grad_to_var(grad, var):
      """Apply gradient to variable."""
      if isinstance(var, ops.Tensor):
        raise NotImplementedError("Trying to update a Tensor ", var)
      if isinstance(grad, ops.IndexedSlices):
        if var.constraint is not None:
          raise RuntimeError(
              "Cannot use a constraint function on a sparse variable.")
        return self._resource_apply_sparse_duplicate_indices(
            grad.values, var, grad.indices)
      update_op = self._resource_apply_dense(grad, var)
      if var.constraint is not None:
        with ops.control_dependencies([update_op]):
          return var.assign(var.constraint(var))
      else:
        return update_op

    with ops.name_scope(name, self._name) as name:
      for grad, var in grads_and_vars:
        scope_name = ("" if ops.executing_eagerly_outside_functions() else
                      "_" + var.op.name)
        with ops.name_scope("update" + scope_name):
          update_ops.append(update_grad_to_var(grad, var))
      # control dependencies does not work in per replica mode, please change
      # this once b/118841692 is fixed.
      # with ops.control_dependencies(update_ops):
      #   apply_updates = self._iterations.assign_add(1).op
      apply_updates = merge_update_step(update_ops, self.iterations)
      return apply_updates
开发者ID:aeverall,项目名称:tensorflow,代码行数:60,代码来源:optimizer_v2.py

示例6: apply_gradients

  def apply_gradients(self, grads_and_vars, name=None):
    """Apply gradients to variables.

    This is the second part of `minimize()`. It returns an `Operation` that
    applies gradients.

    Args:
      grads_and_vars: List of (gradient, variable) pairs.
      name: Optional name for the returned operation.  Default to the name
        passed to the `Optimizer` constructor.

    Returns:
      An `Operation` that applies the specified gradients. If `global_step`
      was not None, that operation also increments `global_step`.

    Raises:
      TypeError: If `grads_and_vars` is malformed.
      ValueError: If none of the variables have gradients.
    """
    grads_and_vars = _filter_grads(grads_and_vars)
    var_list = [v for (_, v) in grads_and_vars]

    self._create_hypers()
    with ops.init_scope():
      self._create_slots(var_list)

    self._prepare(var_list)

    return distribute_ctx.get_replica_context().merge_call(
        self._distributed_apply, args=(grads_and_vars,), kwargs={"name": name})
开发者ID:terrytangyuan,项目名称:tensorflow,代码行数:30,代码来源:optimizer_v2.py

示例7: _real_mirrored_creator

 def _real_mirrored_creator(devices, *args, **kwargs):  # pylint: disable=g-missing-docstring
   index = {}
   for i, d in enumerate(devices):
     with ops.init_scope(), ops.device(d):
       if i > 0:
         # Give replicas meaningful distinct names:
         var0name = index[devices[0]].name.split(":")[0]
         # We append a / to variable names created on replicas with id > 0 to
         # ensure that we ignore the name scope and instead use the given
         # name as the absolute name of the variable.
         kwargs["name"] = "%s/replica_%d/" % (var0name, i)
         # Initialize replicas with the same value:
         def initial_value_fn(device=d):
           if context.executing_eagerly():
             init_value = index[devices[0]].value()
             return array_ops.identity(init_value)
           else:
             with ops.device(device):
               init_value = index[devices[0]].initial_value
               return array_ops.identity(init_value)
         kwargs["initial_value"] = initial_value_fn
       with context.context().device_policy(context.DEVICE_PLACEMENT_SILENT):
         # Don't record operations (e.g. other variable reads) during
         # variable creation.
         with tape.stop_recording():
           v = next_creator(*args, **kwargs)
       assert not isinstance(v, values.DistributedVariable)
       index[d] = v
   return index
开发者ID:aeverall,项目名称:tensorflow,代码行数:29,代码来源:mirrored_strategy.py

示例8: value_tensors

  def value_tensors(self):
    """Create value `Tensor`s for this object's attributes.

    Does not require that the Python object has been created. Used for
    restore-on-create when executing eagerly.

    Returns:
      A dictionary mapping from object attribute names to `Tensor`s.
    """
    value_tensors = {}
    for serialized_tensor in self.object_proto.attributes:
      checkpoint_key = serialized_tensor.checkpoint_key
      dtype = self._checkpoint.dtype_map[checkpoint_key]
      base_type = dtype.base_dtype
      with ops.init_scope():
        with ops.device("/cpu:0"):
          # Run the restore itself on the CPU.
          value, = io_ops.restore_v2(
              prefix=self._checkpoint.save_path,
              tensor_names=[checkpoint_key],
              shape_and_slices=[""],
              dtypes=[base_type],
              name="%s_checkpoint_read" % (serialized_tensor.name,))
        # Copy the value to the current device if necessary.
        value_tensors[serialized_tensor.name] = array_ops.identity(value)
      return value_tensors
开发者ID:neilireson,项目名称:tensorflow,代码行数:26,代码来源:base.py

示例9: _default_getter

def _default_getter(name, shape, dtype, initializer=None,
                    partition_info=None, **kwargs):
  """A pared-down version of get_variable which does not reuse variables."""
  dtype = dtypes.as_dtype(dtype)
  shape_object = tensor_shape.as_shape(shape)
  with ops.init_scope():
    if initializer is None:
      initializer, initializing_from_value = (
          variable_scope._get_default_variable_store()._get_default_initializer(  # pylint: disable=protected-access
              name=name, shape=shape_object, dtype=dtype))
    else:
      initializing_from_value = not callable(initializer)
    # Same logic as get_variable
    variable_dtype = dtype.base_dtype
    if initializing_from_value:
      if shape is not None:
        raise ValueError("If initializer is a constant, do not specify shape.")
      initial_value = initializer
    else:
      # Instantiate initializer if provided initializer is a type object.
      if isinstance(initializer, type(init_ops.Initializer)):
        initializer = initializer(dtype=dtype)
      def initial_value():
        return initializer(
            shape_object.as_list(), dtype=dtype, partition_info=partition_info)
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value,
        name=name,
        dtype=variable_dtype,
        **kwargs
    )
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:31,代码来源:checkpointable_utils.py

示例10: _capture_by_value

 def _capture_by_value(
     self,
     op_type,
     inputs,
     dtypes,  # pylint: disable=redefined-outer-name
     input_types=None,
     name=None,
     attrs=None,
     op_def=None,
     compute_shapes=True,
     compute_device=True):
   # When capturing by value, do the read outside
   reverse_captures = dict((v, k) for k, v in self.captures.items())
   uncaptured_inputs = [reverse_captures.get(t, t) for t in inputs]
   with ops.init_scope():
     if context.executing_eagerly():
       attr_list = ("dtype", int(attrs["dtype"].type))
       value, = execute.execute(
           compat.as_bytes(op_type), 1, uncaptured_inputs, attr_list,
           context.context())
     else:
       op = ops.get_default_graph().create_op(
           op_type, uncaptured_inputs, dtypes, input_types, name, attrs,
           op_def, compute_shapes, compute_device)
       value = op.outputs[0]
   captured_value = self.capture(value)
   return captured_value.op
开发者ID:kylin9872,项目名称:tensorflow,代码行数:27,代码来源:func_graph.py

示例11: load_function_def_library

def load_function_def_library(library):
  """Load a set of functions as concrete functions without captured inputs.

  Functions names are manipulated during load such that they do not overlap
  with previously created ones.

  Args:
    library: FunctionDefLibrary proto message.

  Returns:
    Map of original function names in the library to instances of
    `ConcreteFunction` without captured inputs.

  Raises:
    ValueError: if functions dependencies have a cycle.
  """
  functions = {}

  for fdef in _sort_function_defs(library):
    copy = _fix_fdef(fdef, functions)

    func_graph = function_def_lib.function_def_to_graph(copy)
    for dep in _list_function_deps(fdef):
      functions[dep].add_to_graph(func_graph)
    func = function_lib.ConcreteFunction(func_graph)
    func.add_to_graph()

    functions[fdef.signature.name] = func

    # Also register the gradients in the current root context.
    with ops.init_scope():
      func._register_gradient()  # pylint: disable=protected-access

  return functions
开发者ID:rmlarsen,项目名称:tensorflow,代码行数:34,代码来源:function_deserialization.py

示例12: __init__

 def __init__(self, path):
   """Record the full path to the asset."""
   # The init_scope prevents functions from capturing `path` in an
   # initialization graph, since it is transient and should not end up in a
   # serialized function body.
   with ops.init_scope(), ops.device("CPU"):
     self._path = ops.internal_convert_to_tensor(path, dtype=dtypes.string,
                                                 name="asset_path")
开发者ID:aritratony,项目名称:tensorflow,代码行数:8,代码来源:tracking.py

示例13: _get_beta_accumulators

 def _get_beta_accumulators(self):
   with ops.init_scope():
     if context.executing_eagerly():
       graph = None
     else:
       graph = ops.get_default_graph()
     return (self._get_non_slot_variable("beta1_power", graph=graph),
             self._get_non_slot_variable("beta2_power", graph=graph))
开发者ID:Wajih-O,项目名称:tensorflow,代码行数:8,代码来源:adam.py

示例14: initialize_variables

 def initialize_variables():
   for v, init in initializer_map.items():
     with ops.init_scope():
       if resource_variable_ops.var_is_initialized_op(v.handle):
         # Ignore variables which are already initialized at trace time.
         continue
     v.assign(lift_to_graph.lift_to_graph(
         [init], ops.get_default_graph())[init])
开发者ID:kylin9872,项目名称:tensorflow,代码行数:8,代码来源:def_function.py

示例15: create_file_writer_v2

def create_file_writer_v2(logdir,
                          max_queue=None,
                          flush_millis=None,
                          filename_suffix=None,
                          name=None):
  """Creates a summary file writer for the given log directory.

  Args:
    logdir: a string specifying the directory in which to write an event file.
    max_queue: the largest number of summaries to keep in a queue; will
     flush once the queue gets bigger than this. Defaults to 10.
    flush_millis: the largest interval between flushes. Defaults to 120,000.
    filename_suffix: optional suffix for the event file name. Defaults to `.v2`.
    name: a name for the op that creates the writer.

  Returns:
    A SummaryWriter object.
  """
  if logdir is None:
    raise ValueError("logdir cannot be None")
  inside_function = ops.inside_function()
  with ops.name_scope(name, "create_file_writer") as scope, ops.device("cpu:0"):
    # Run init inside an init_scope() to hoist it out of tf.functions.
    with ops.init_scope():
      if context.executing_eagerly():
        _check_create_file_writer_args(
            inside_function,
            logdir=logdir,
            max_queue=max_queue,
            flush_millis=flush_millis,
            filename_suffix=filename_suffix)
      logdir = ops.convert_to_tensor(logdir, dtype=dtypes.string)
      if max_queue is None:
        max_queue = constant_op.constant(10)
      if flush_millis is None:
        flush_millis = constant_op.constant(2 * 60 * 1000)
      if filename_suffix is None:
        filename_suffix = constant_op.constant(".v2")
      # Prepend the PID and a process-local UID to the filename suffix to avoid
      # filename collisions within the machine (the filename already contains
      # the hostname to avoid cross-machine collisions).
      unique_prefix = constant_op.constant(".%s.%s" % (os.getpid(), ops.uid()))
      filename_suffix = unique_prefix + filename_suffix
      # Use a unique shared_name to prevent resource sharing.
      if context.executing_eagerly():
        shared_name = context.shared_name()
      else:
        shared_name = ops.name_from_scope_name(scope)  # pylint: disable=protected-access
      return ResourceSummaryWriter(
          shared_name=shared_name,
          init_op_fn=functools.partial(
              gen_summary_ops.create_summary_file_writer,
              logdir=logdir,
              max_queue=max_queue,
              flush_millis=flush_millis,
              filename_suffix=filename_suffix),
          name=name,
          v2=True)
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:58,代码来源:summary_ops_v2.py


注:本文中的tensorflow.python.framework.ops.init_scope函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。