当前位置: 首页>>代码示例>>Python>>正文


Python variable_scope._get_default_variable_store函数代码示例

本文整理汇总了Python中tensorflow.python.ops.variable_scope._get_default_variable_store函数的典型用法代码示例。如果您正苦于以下问题:Python _get_default_variable_store函数的具体用法?Python _get_default_variable_store怎么用?Python _get_default_variable_store使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了_get_default_variable_store函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _default_initializer

def _default_initializer(name, shape, dtype):
  """The default initializer for variables."""
  # pylint: disable=protected-access
  store = variable_scope._get_default_variable_store()
  initializer = store._get_default_initializer(name, shape=shape, dtype=dtype)
  # pylint: enable=protected-access
  return initializer[0]
开发者ID:kimr843,项目名称:tensorflow,代码行数:7,代码来源:graph_callable.py

示例2: __init__

 def __init__(self, variable_scope_name):
   self._variable_scope_name = variable_scope_name
   default = variable_scope._get_default_variable_store()  # pylint: disable=protected-access
   if default._store_eager_variables:  # pylint: disable=protected-access
     self._eager_variable_store = variable_scope.EagerVariableStore(default)
   else:
     self._eager_variable_store = variable_scope.EagerVariableStore()
开发者ID:AndrewTwinz,项目名称:tensorflow,代码行数:7,代码来源:template.py

示例3: getvar

 def getvar(self,
            getter,
            name,
            shape=None,
            dtype=None,
            initializer=None,
            trainable=True,
            collections=None,
            **kwargs):
   """A custom variable getter."""
   # Here, we switch the default graph to the outer graph and ask the
   # variable scope in which the function is defined to give us the
   # variable. The variable is stashed in extra_vars and returned to
   # the caller.
   #
   # We capture these variables so that the variable definition is
   # hoisted upward to the outer most graph.
   with self._outer_graph.as_default():
     # pylint: disable=protected-access
     var = self._vscope.get_variable(
         vs._get_default_variable_store(),
         name,
         shape=shape,
         dtype=dtype,
         initializer=initializer,
         trainable=trainable,
         collections=collections)
     self.extra_vars.append(var)
     return var
开发者ID:Hwhitetooth,项目名称:tensorflow,代码行数:29,代码来源:function.py

示例4: testNamelessStore

 def testNamelessStore(self):
   vs = variable_scope._get_default_variable_store()
   vs.get_variable("v1", [2])
   vs.get_variable("v2", [2])
   expected_names = ["%s:0" % name for name in ["v1", "v2"]]
   self.assertEqual(set(expected_names),
                    set([v.name for v in vs._vars.values()]))
开发者ID:peace195,项目名称:tensorflow,代码行数:7,代码来源:variable_scope_test.py

示例5: _default_getter

def _default_getter(name, shape, dtype, initializer=None,
                    partition_info=None, **kwargs):
  """A pared-down version of get_variable which does not reuse variables."""
  dtype = dtypes.as_dtype(dtype)
  shape_object = tensor_shape.as_shape(shape)
  with ops.init_scope():
    if initializer is None:
      initializer, initializing_from_value = (
          variable_scope._get_default_variable_store()._get_default_initializer(  # pylint: disable=protected-access
              name=name, shape=shape_object, dtype=dtype))
    else:
      initializing_from_value = not callable(initializer)
    # Same logic as get_variable
    variable_dtype = dtype.base_dtype
    if initializing_from_value:
      if shape is not None:
        raise ValueError("If initializer is a constant, do not specify shape.")
      initial_value = initializer
    else:
      # Instantiate initializer if provided initializer is a type object.
      if isinstance(initializer, type(init_ops.Initializer)):
        initializer = initializer(dtype=dtype)
      def initial_value():
        return initializer(
            shape_object.as_list(), dtype=dtype, partition_info=partition_info)
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value,
        name=name,
        dtype=variable_dtype,
        **kwargs
    )
开发者ID:Jackiefan,项目名称:tensorflow,代码行数:31,代码来源:checkpointable_utils.py

示例6: testNameExists

 def testNameExists(self):
   vs = variable_scope._get_default_variable_store()
   # No check by default, so we can both create and get existing names.
   v = vs.get_variable("v", [1])
   v1 = vs.get_variable("v", [1])
   assert v == v1
   # When reuse is False, we fail when variables are already there.
   vs.get_variable("w", [1], reuse=False)  # That's ok.
   with self.assertRaises(ValueError):
     vs.get_variable("v", [1], reuse=False)  # That fails.
   # When reuse is True, we fail when variables are new.
   vs.get_variable("v", [1], reuse=True)  # That's ok.
   with self.assertRaises(ValueError):
     vs.get_variable("u", [1], reuse=True)  # That fails.
开发者ID:peace195,项目名称:tensorflow,代码行数:14,代码来源:variable_scope_test.py

示例7: getvar

 def getvar(self, name, shape, dtype, initializer, **kwargs):
   """A custom variable getter."""
   # TODO(zhifengc): We probably need to support other 10-ish options
   # vs.get_variable supports.
   #
   # Here, we switch the default graph to the outer graph and ask the
   # variable scope in which the function is defined to give us the
   # variable. The variable is stashed in extra_vars and returned to
   # the caller.
   #
   # We capture these variables so that the variable definition is
   # hoisted upward to the outer most graph.
   with self._outer_graph.as_default():
     # pylint: disable=protected-access
     var = self._vscope.get_variable(vs._get_default_variable_store(), name,
                                     shape, dtype, initializer)
     self.extra_vars.append(var)
     return var
开发者ID:KalraA,项目名称:tensorflow,代码行数:18,代码来源:function.py

示例8: __init__

  def __init__(self, name, func, create_scope_now=False, unique_name=None,
               custom_getter=None):
    """Creates a template for the given function.

    Args:
      name: A name for the scope created by this template. The
        name will be made unique by appending `_N` to the it (see how
        `tf.variable_scope` treats the `default_name` for details).
      func: The function to apply each time.
      create_scope_now: Whether to create the scope at Template construction
        time, rather than first call. Defaults to false. Creating the scope at
        construction time may be more convenient if the template is passed
        through much lower level code, and you want to be sure of the scope
        name without knowing exactly where it will be first called. If set to
        True, the scope will be created in the constructor, and all subsequent
        times in __call__, leading to a trailing numeral being added to the
        names of all created Tensors. If set to False, the scope will be created
        at the first call location.
      unique_name: When used, it overrides name_ and is not made unique. If a
        template of the same scope/unique_name already exists and reuse is
        false, an error is raised. Defaults to None.
      custom_getter: optional custom getter to pass to variable_scope()

    Raises:
      RuntimeError: if eager mode is not enabled.
      ValueError: if the name is None or unique_name is provided.
    """
    if not context.in_eager_mode():
      raise RuntimeError(
          "{} objects can only be used when eager execution is enabled, use "
          "tf.Template for graph construction".
          format(type(self)))
    if unique_name:
      raise ValueError("unique_name cannot be used in eager mode.")
    super(EagerTemplate, self).__init__(name, func, create_scope_now,
                                        unique_name, custom_getter)
    # Create an eager variable store only if the current variable store cannot
    # store eager variables. This should allow for correct nesting.
    default_vstore = variable_scope._get_default_variable_store()  # pylint: disable=protected-access
    if default_vstore._store_eager_variables:  # pylint: disable=protected-access
      raise ValueError("Nested EagerTemaplates are not currently supported.")
    else:
      self._eager_variable_store = variable_scope.EagerVariableStore()
开发者ID:Lin-jipeng,项目名称:tensorflow,代码行数:43,代码来源:template.py

示例9: getvar

 def getvar(
     self,
     getter,
     name,
     shape=None,
     dtype=None,
     initializer=None,
     reuse=None,
     trainable=True,
     collections=None,  # pylint: disable=redefined-outer-name
     use_resource=None,
     **kwargs):
   """A custom variable getter."""
   # Here, we switch the default graph to the outer graph and ask the
   # variable scope in which the function is defined to give us the
   # variable. The variable is stashed in extra_vars and returned to
   # the caller.
   #
   # We capture these variables so that the variable definition is
   # hoisted upward to the outer most graph.
   with self._outer_graph.as_default():
     # pylint: disable=protected-access
     var = self._vscope.get_variable(
         vs._get_default_variable_store(),
         name,
         shape=shape,
         dtype=dtype,
         initializer=initializer,
         reuse=reuse,
         trainable=trainable,
         collections=collections,
         use_resource=use_resource)
     self.extra_vars.append(var)
     if (isinstance(var, resource_variable_ops.ResourceVariable) and
         self._capture_resource_var_by_value):
       # For resource-based variables read the variable outside the function
       # and pass in the value. This ensures that the function is pure and
       # differentiable. TODO(apassos) this may have performance problems if
       # the function will only do embedding lookups on the variable.
       return var.value()
     return var
开发者ID:adit-chandra,项目名称:tensorflow,代码行数:41,代码来源:function.py

示例10: __init__

  def __init__(self, name=None):
    """Configure the `Network`.

    Args:
      name: The name to use for this `Network`. If specified, it must be unique
        in the context where this `Network` is first
         (1) added to another `Network` (in which case it must not share a name
           with other `Layers` added to that `Network`), or
         (2) built/called (in which case no other 'top-level' `Network`s may
          share this name).
        If unspecified or None, the `Network` will be named using its class
        name, with a number appended if necessary for uniqueness (e.g. MyNetwork
        -> 'my_network_1').

    Raises:
      ValueError: If `name` is not valid. Note that some naming errors will
        instead be raised when the `Network` is called.
    """
    if isinstance(name, variable_scope.VariableScope):
      raise ValueError("VariableScopes are not valid Network names.")
    if name is not None and "/" in name:
      raise ValueError(
          "Forward slashes ('/') are not allowed in Network names.")
    super(Network, self).__init__(name=name)
    self._layers = []
    self._sub_layer_name_uids = collections.defaultdict(int)
    # Initially None, but set to False for networks which are first built as
    # top-level.
    self._first_parent = None  # A weak reference to our first parent.
    self._non_network_sublayers = []
    self._owned_layers = {}
    # The scope to use if we end up without a parent.
    self._default_parent_variable_scope = variable_scope.get_variable_scope()
    # Hold on to the variable scope counts from init to check whether a scope
    # with the name we want was ever created in our parent scope. Without this
    # check we might have name collisions if the parent scope on init gets
    # closed before build is called.
    self._variable_scope_counts_on_init = (
        variable_scope._get_default_variable_store().variable_scopes_count)
开发者ID:AbhinavJain13,项目名称:tensorflow,代码行数:39,代码来源:network.py

示例11: __init__

 def __init__(self, name=None):
   if isinstance(name, variable_scope.VariableScope):
     raise ValueError("VariableScopes are not valid Network names.")
   if name is not None and "/" in name:
     raise ValueError(
         "Forward slashes ('/') are not allowed in Network names.")
   super(Network, self).__init__(name=name)
   self._layers = []
   self._sub_layer_name_uids = collections.defaultdict(int)
   # Initially None, but set to False for networks which are first built as
   # top-level.
   self._first_parent = None  # A weak reference to our first parent.
   self._non_network_sublayers = []
   self._owned_layers = {}
   # The scope to use if we end up without a parent.
   self._default_parent_variable_scope = variable_scope.get_variable_scope()
   # Hold on to the variable scope counts from init to check whether a scope
   # with the name we want was ever created in our parent scope. Without this
   # check we might have name collisions if the parent scope on init gets
   # closed before build is called.
   self._variable_scope_counts_on_init = (
       variable_scope._get_default_variable_store().variable_scopes_count)
开发者ID:TianyouLi,项目名称:tensorflow,代码行数:22,代码来源:network.py

示例12: init_from_checkpoint

def init_from_checkpoint(checkpoint_dir, assignment_map):
  """Using assingment map initializes current variables with loaded tensors.

  Note: This overrides default initialization ops of specified variables and
  redefines dtype.

  Assignment map supports next syntax:
    `'scope_name/': 'checkpoint_scope_name/'` - will load all variables in
      current `scope_name` from `checkpoint_scope_name` with matching variable
      names.
    `'scope_name/variable_name': 'checkpoint_scope_name/some_other_variable'` -
      will initalize `scope_name/variable_name` variable
      from `checkpoint_scope_name/some_other_variable`.
    `variable: 'scope_varaible_name'` - will initialize given variable with
      variable from the checkpoint.
    `'scope_name/': '/'` - will load all variables in current `scope_name` from
      checkpoint's root (e.g. no scope).

  Supports loading into partitioned variables, which are represented as
  '<variable>/part_<part #>'.

  Example:
  ```python
    # Create variables.
    with tf.variable_scope('test'):
      m = tf.get_variable('my_var')
    with tf.variable_scope('test2'):
      var2 = tf.get_variable('my_var')
    ...
    # Specify which variables to intialize from checkpoint.
    init_from_checkpoint(checkpoint_dir, {
      'test/my_var': 'some_var',
      'test2/', 'some_scope/'})
    ...
    # Or use `Variable` objects to identify what to initialize.
    init_from_checkpoint(checkpoint_dir, {
      var2: 'some_scope/var2',
    })
    ...
    # Initialize variables as usual.
    session.run(tf.get_all_variables())
  ```

  Args:
    checkpoint_dir: Directory with checkpoints file or path to checkpoint.
    assignment_map: Dict, where keys are names of current variables
                    (in default graph) and values are names of the variables
                    in the checkpoint.

  Raises:
    tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
    ValueError: If missing variables in current graph.
  """
  filepattern = _get_checkpoint_filename(checkpoint_dir)
  reader = load_checkpoint(checkpoint_dir)
  variable_map = reader.get_variable_to_shape_map()
  for current_name, tensor_name in six.iteritems(assignment_map):
    scopes = ""
    var = None
    # Check if this is Variable object.
    if isinstance(current_name, variables.Variable):
      var = current_name
    else:
      var_scope = vs._get_default_variable_store()
      # Check if this is variable in var_store.
      var = var_scope._vars.get(current_name, None)
      # Also check if variable is partitioned as list.
      if var is None:
        if current_name + "/part_0" in var_scope._vars:
          var = []
          i = 0
          while current_name + "/part_%d" % i in var_scope._vars:
            var.append(var_scope._vars[current_name + "/part_%d" % i])
            i += 1
    if var is not None:
      # If 1 to 1 mapping was provided, find variable in the scope.
      if tensor_name not in variable_map:
        raise ValueError("Tensor %s is not found in %s checkpoint" % (
            tensor_name, checkpoint_dir
        ))
      if isinstance(var, variables.Variable):
        # Additional at-call-time checks.
        if not var.get_shape().is_compatible_with(variable_map[tensor_name]):
          raise ValueError(
              "Shape of variable %s (%s) doesn't match with shape of "
              "tensor %s (%s) from checkpoint reader." % (
                  var.name, str(var.get_shape()),
                  tensor_name, str(variable_map[tensor_name])
              ))
      _set_variable_or_list_initializer(var, filepattern, tensor_name)
      logging.info("Initialize variable %s from checkpoint %s with %s" % (
          current_name, checkpoint_dir, tensor_name
      ))
    else:
      if "/" in current_name:
        scopes = current_name[:current_name.rindex("/")]
        current_name = current_name[current_name.rindex("/") + 1:]
      if not tensor_name.endswith("/"):
        raise ValueError(
            "Assignment map with scope only name (%s) "
#.........这里部分代码省略.........
开发者ID:01-,项目名称:tensorflow,代码行数:101,代码来源:checkpoints.py

示例13: init_from_checkpoint

def init_from_checkpoint(checkpoint_dir, assignment_map):
  """Using assingment map initializes current variables with loaded tensors.

  Note: This overrides default initialization ops of specified variables and
  redefines dtype.

  Assignment map supports next syntax:
    `'scope_name/': 'checkpoint_scope_name/'` - will load all variables in
      current `scope_name` from `checkpoint_scope_name` with matching variable
      names.
    `'scope_name/variable_name': 'checkpoint_scope_name/some_other_variable'` -
    will initalize `scope_name/variable_name` variable
    from `checkpoint_scope_name/some_other_variable`.

  Example:
  ```python
    # Create variables.
    with tf.variable_scope('test'):
      m = tf.get_variable('my_var')
    with tf.variable_scope('test2'):
      m = tf.get_variable('my_var')
    ...
    # Specify which variables to intialize from checkpoint.
    init_from_checkpoint(checkpoint_dir, {
      'test/my_var': 'some_var',
      'test2/', 'some_scope/'})
    ...
    # Initialize variables as usual.
    session.run(tf.get_all_variables())
  ```

  Args:
    checkpoint_dir: Directory with checkpoints file or path to checkpoint.
    assignment_map: Dict, where keys are names of current variables
                    (in default graph) and values are names of the variables
                    in the checkpoint.

  Raises:
    tf.errors.OpError: If missing checkpoints or tensors in checkpoints.
    ValueError: If missing variables in current graph.
  """
  reader = load_checkpoint(checkpoint_dir)
  variable_map = reader.get_variable_to_shape_map()
  for current_name, tensor_name in six.iteritems(assignment_map):
    scopes = ""
    if "/" in current_name:
      scopes = current_name[:current_name.rindex("/")]
      current_name = current_name[current_name.rindex("/") + 1:]
    if current_name:
      # If 1 to 1 mapping was provided, find variable in the scope.
      if tensor_name not in variable_map:
        raise ValueError("Tensor %s is not found in %s checkpoint" % (
            tensor_name, checkpoint_dir
        ))
      with vs.variable_scope(scopes, reuse=True):
        var = vs.get_variable(current_name)
        var._initializer_op = _checkpoint_initializer(var, reader, tensor_name)  # pylint: disable=protected-access
        logging.info("Initialize variable %s from checkpoint %s with %s" % (
            var.name, checkpoint_dir, tensor_name
        ))
    else:
      if not tensor_name.endswith("/"):
        raise ValueError(
            "Assignment map with scope only name (%s) "
            "should map to scope only (%s). "
            "Should be 'scope/': 'other_scope/'." % (
                scopes, tensor_name
            ))
      # If scope to scope mapping was provided, find all variables in the scope.
      # TODO(ipolosukhin): Refactor variable_scope module to provide nicer APIs.
      var_scope = vs._get_default_variable_store()  # pylint: disable=protected-access
      for var_name in var_scope._vars:  # pylint: disable=protected-access
        if var_name.startswith(scopes):
          # Lookup name with specified prefix and suffix from current variable.
          full_tensor_name = tensor_name + var_name[len(scopes) + 1:]
          if full_tensor_name not in variable_map:
            raise ValueError(
                "Tensor %s (%s in %s) is not found in %s checkpoint" % (
                    full_tensor_name, var_name[len(scopes) + 1:], tensor_name,
                    checkpoint_dir
                ))
          var = var_scope._vars[var_name]  # pylint: disable=protected-access
          var._initializer_op = _checkpoint_initializer(  # pylint: disable=protected-access
              var, reader, full_tensor_name)
          logging.info("Initialize variable %s from checkpoint %s with %s" % (
              var_name, checkpoint_dir, tensor_name
          ))
开发者ID:0-T-0,项目名称:tensorflow,代码行数:87,代码来源:checkpoints.py

示例14: _init_from_checkpoint

def _init_from_checkpoint(_, ckpt_dir_or_file, assignment_map):
  """See `init_from_checkpoint` for documentation."""

  ckpt_file = _get_checkpoint_filename(ckpt_dir_or_file)
  reader = load_checkpoint(ckpt_dir_or_file)
  variable_map = reader.get_variable_to_shape_map()
  for tensor_name_in_ckpt, current_var_or_name in sorted(
      six.iteritems(assignment_map)):
    var = None
    # Check if this is Variable object or list of Variable objects (in case of
    # partitioned variables).
    if _is_variable(current_var_or_name) or (
        isinstance(current_var_or_name, list)
        and all(_is_variable(v) for v in current_var_or_name)):
      var = current_var_or_name
    else:
      store_vars = vs._get_default_variable_store()._vars  # pylint:disable=protected-access
      # Check if this variable is in var_store.
      var = store_vars.get(current_var_or_name, None)
      # Also check if variable is partitioned as list.
      if var is None:
        var = _collect_partitioned_variable(current_var_or_name, store_vars)
    if var is not None:
      # If 1 to 1 mapping was provided, find variable in the checkpoint.
      if tensor_name_in_ckpt not in variable_map:
        raise ValueError("Tensor %s is not found in %s checkpoint %s" % (
            tensor_name_in_ckpt, ckpt_dir_or_file, variable_map
        ))
      if _is_variable(var):
        # Additional at-call-time checks.
        if not var.get_shape().is_compatible_with(
            variable_map[tensor_name_in_ckpt]):
          raise ValueError(
              "Shape of variable %s (%s) doesn't match with shape of "
              "tensor %s (%s) from checkpoint reader." % (
                  var.name, str(var.get_shape()),
                  tensor_name_in_ckpt, str(variable_map[tensor_name_in_ckpt])
              ))
        var_name = var.name
      else:
        var_name = ",".join([v.name for v in var])
      _set_variable_or_list_initializer(var, ckpt_file, tensor_name_in_ckpt)
      logging.debug("Initialize variable %s from checkpoint %s with %s",
                    var_name, ckpt_dir_or_file, tensor_name_in_ckpt)
    else:
      scopes = ""
      # TODO(vihanjain): Support list of 'current_var_or_name' here.
      if "/" in current_var_or_name:
        scopes = current_var_or_name[:current_var_or_name.rindex("/")]
      if not tensor_name_in_ckpt.endswith("/"):
        raise ValueError(
            "Assignment map with scope only name {} should map to scope only "
            "{}. Should be 'scope/': 'other_scope/'.".format(
                scopes, tensor_name_in_ckpt))
      # If scope to scope mapping was provided, find all variables in the scope
      # and create variable to variable mapping.
      scope_variables = set()
      for var_name in store_vars:
        if not scopes or var_name.startswith(scopes + "/"):
          # Consume /part_ if partitioned variable.
          if "/part_" in var_name:
            var_name = var_name[:var_name.index("/part_")]
          scope_variables.add(var_name)
      for var_name in sorted(scope_variables):
        # Lookup name with specified prefix and suffix from current variable.
        # If tensor_name given is '/' (root), don't use it for full name.
        full_tensor_name = var_name[len(scopes):]
        if current_var_or_name != "/":
          full_tensor_name = full_tensor_name[1:]
        if tensor_name_in_ckpt != "/":
          full_tensor_name = tensor_name_in_ckpt + full_tensor_name
        # Remove trailing '/', if any, in the full_tensor_name
        if full_tensor_name.endswith("/"):
          full_tensor_name = full_tensor_name[:-1]
        if full_tensor_name not in variable_map:
          raise ValueError(
              "Tensor %s (%s in %s) is not found in %s checkpoint" % (
                  full_tensor_name, var_name[len(scopes) + 1:],
                  tensor_name_in_ckpt, ckpt_dir_or_file
              ))
        var = store_vars.get(var_name, None)
        if var is None:
          var = _collect_partitioned_variable(var_name, store_vars)
        _set_variable_or_list_initializer(var, ckpt_file, full_tensor_name)
        logging.debug("Initialize variable %s from checkpoint %s with %s",
                      var_name, ckpt_dir_or_file, full_tensor_name)
开发者ID:sonnyhu,项目名称:tensorflow,代码行数:86,代码来源:checkpoint_utils.py

示例15: testGetVar

 def testGetVar(self):
   vs = variable_scope._get_default_variable_store()
   v = vs.get_variable("v", [1])
   v1 = vs.get_variable("v", [1])
   assert v == v1
开发者ID:peace195,项目名称:tensorflow,代码行数:5,代码来源:variable_scope_test.py


注:本文中的tensorflow.python.ops.variable_scope._get_default_variable_store函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。