当前位置: 首页>>代码示例>>Python>>正文


Python resource_variable_ops.ResourceVariable方法代码示例

本文整理汇总了Python中tensorflow.python.ops.resource_variable_ops.ResourceVariable方法的典型用法代码示例。如果您正苦于以下问题:Python resource_variable_ops.ResourceVariable方法的具体用法?Python resource_variable_ops.ResourceVariable怎么用?Python resource_variable_ops.ResourceVariable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在模块tensorflow.python.ops.resource_variable_ops的用法示例。

在下文中一共展示了resource_variable_ops.ResourceVariable方法的22个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于我们的系统推荐出更棒的Python代码示例。

示例1: variable

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def variable(initial_value=None,
             trainable=True,
             collections=None,
             validate_shape=True,
             caching_device=None,
             name=None,
             dtype=None):
  if get_variable_scope().use_resource:
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value, trainable=trainable,
        collections=collections, validate_shape=validate_shape,
        caching_device=caching_device, name=name, dtype=dtype)
  else:
    return variables.Variable(
        initial_value=initial_value, trainable=trainable,
        collections=collections, validate_shape=validate_shape,
        caching_device=caching_device, name=name, dtype=dtype) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:variable_scope.py


示例2: assert_global_step

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def assert_global_step(global_step_tensor):
  """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.

  Args:
    global_step_tensor: `Tensor` to test.
  """
  if not (isinstance(global_step_tensor, variables.Variable) or
          isinstance(global_step_tensor, ops.Tensor) or
          isinstance(global_step_tensor,
                     resource_variable_ops.ResourceVariable)):
    raise TypeError(
        'Existing "global_step" must be a Variable or Tensor: %s.' %
        global_step_tensor)

  if not global_step_tensor.dtype.base_dtype.is_integer:
    raise TypeError('Existing "global_step" does not have integer type: %s' %
                    global_step_tensor.dtype)

  if global_step_tensor.get_shape().ndims != 0:
    raise TypeError('Existing "global_step" is not scalar: %s' %
                    global_step_tensor.get_shape()) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:training_util.py


示例3: _estimate_data_distribution

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _estimate_data_distribution(c, num_examples_per_class_seen):
  """Estimate data distribution as labels are seen.

  Args:
    c: The class labels.  Type `int32`, shape `[batch_size]`.
    num_examples_per_class_seen: A `ResourceVariable` containing counts.
      Type `int64`, shape `[num_classes]`.

  Returns:
    dist: The updated distribution.  Type `float32`, shape `[num_classes]`.
  """
  num_classes = num_examples_per_class_seen.get_shape()[0].value
  # Update the class-count based on what labels are seen in
  # batch.  But do this asynchronously to avoid performing a
  # cross-device round-trip.  Just use the cached value.
  num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
      math_ops.reduce_sum(
          array_ops.one_hot(c, num_classes, dtype=dtypes.int64),
          0))
  init_prob_estimate = math_ops.truediv(
      num_examples_per_class_seen,
      math_ops.reduce_sum(num_examples_per_class_seen))
  return math_ops.cast(init_prob_estimate, dtypes.float32) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:dataset_ops.py


示例4: _gather

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _gather(params, ids, name=None):
  """Helper function for _embedding_lookup_and_transform.

  This function gathers embeddings from a single tensor. The gather deals with
  resource variables specially.

  Args:
    params: A `Tensor` of embeddings.
    ids: A `Tensor` indexing the embeddings to be retrieved from `params`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `params`.
  """
  if isinstance(params, resource_variable_ops.ResourceVariable):
    return params.sparse_read(ids, name=name)
  else:
    return array_ops.gather(params, ids, name=name) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:embedding_ops.py


示例5: __init__

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def __init__(self, var, slice_spec, name):
      self._var_device = var.device
      if isinstance(var, ops.Tensor):
        self.handle_op = var.op.inputs[0]
        tensor = var
      elif isinstance(var, resource_variable_ops.ResourceVariable):

        def _read_variable_closure(v):
          def f():
            with ops.device(v.device):
              x = v.read_value()
            with ops.device("/device:CPU:0"):
              return array_ops.identity(x)
          return f

        self.handle_op = var.handle
        tensor = _read_variable_closure(var)
      else:
        raise ValueError(
            "Saveable is neither a resource variable nor a read operation."
            " Got: %s" % repr(var))
      spec = BaseSaverBuilder.SaveSpec(tensor, slice_spec, name)
      super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
          var, [spec], name) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:saver.py


示例6: assert_global_step

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def assert_global_step(global_step_tensor):
  """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.

  Args:
    global_step_tensor: `Tensor` to test.
  """
  if not (isinstance(global_step_tensor, variables.Variable) or
          isinstance(global_step_tensor, ops.Tensor) or
          isinstance(global_step_tensor,
                     resource_variable_ops.ResourceVariable)):
    raise TypeError(
        'Existing "global_step" must be a Variable or Tensor: %s.' %
        global_step_tensor)

  if not global_step_tensor.dtype.base_dtype.is_integer:
    raise TypeError('Existing "global_step" does not have integer type: %s' %
                    global_step_tensor.dtype)

  if (global_step_tensor.get_shape().ndims != 0 and
      global_step_tensor.get_shape().is_fully_defined()):
    raise TypeError('Existing "global_step" is not scalar: %s' %
                    global_step_tensor.get_shape()) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:24,代码来源:training_util.py


示例7: _do_gather

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _do_gather(params, ids, name=None):
  """Deals with doing gather differently for resource variables."""
  if isinstance(params, resource_variable_ops.ResourceVariable):
    return params.sparse_read(ids, name=name)
  return array_ops.gather(params, ids, name=name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:7,代码来源:embedding_ops.py


示例8: _get_variable_for

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _get_variable_for(v):
  """Returns the ResourceVariable responsible for v, or v if not necessary."""
  if v.op.type == "VarHandleOp":
    for var in variables.trainable_variables():
      if (isinstance(var, resource_variable_ops.ResourceVariable)
          and var.handle.op is v.op):
        return var
    raise ValueError("Got %s but  could not locate source variable." % (str(v)))
  return v 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:11,代码来源:optimizer.py


示例9: __init__

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def __init__(self, var, slice_spec, name):
      if isinstance(var, ops.Tensor):
        self.handle_op = var.op.inputs[0]
      elif isinstance(var, resource_variable_ops.ResourceVariable):
        self.handle_op = var.handle
      else:
        raise ValueError(
            "Saveable is neither a resource variable nor a read operation."
            " Got: %s" % repr(var))
      spec = BaseSaverBuilder.SaveSpec(var, slice_spec, name)
      super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
          var, [spec], name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:14,代码来源:saver.py


示例10: getvar

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def getvar(
      self,
      getter,
      name,
      shape=None,
      dtype=None,
      initializer=None,
      reuse=None,
      trainable=True,
      collections=None,  # pylint: disable=redefined-outer-name
      use_resource=None,
      **kwargs):
    """A custom variable getter."""
    # Here, we switch the default graph to the outer graph and ask the
    # variable scope in which the function is defined to give us the
    # variable. The variable is stashed in extra_vars and returned to
    # the caller.
    #
    # We capture these variables so that the variable definition is
    # hoisted upward to the outer most graph.
    with self._outer_graph.as_default():
      # pylint: disable=protected-access
      var = self._vscope.get_variable(
          vs._get_default_variable_store(),
          name,
          shape=shape,
          dtype=dtype,
          initializer=initializer,
          reuse=reuse,
          trainable=trainable,
          collections=collections,
          use_resource=use_resource)
      self.extra_vars.append(var)
      if isinstance(var, resource_variable_ops.ResourceVariable):
        # For resource-based variables read the variable outside the function
        # and pass in the value. This ensures that the function is pure and
        # differentiable. TODO(apassos) this may have performance problems if
        # the function will only do embedding lookups on the variable.
        return var.value()
      return var 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:42,代码来源:function.py


示例11: _is_variable

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _is_variable(v):
  """Returns true if `v` is a variable."""
  return isinstance(v, (variables.Variable,
                        resource_variable_ops.ResourceVariable)) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:6,代码来源:feature_column.py


示例12: testResourceVariable

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def testResourceVariable(self):
    a = variables_lib2.local_variable(0, use_resource=False)
    b = variables_lib2.local_variable(0, use_resource=True)
    self.assertIsInstance(a, variables_lib.Variable)
    self.assertNotIsInstance(a, resource_variable_ops.ResourceVariable)
    self.assertIsInstance(b, resource_variable_ops.ResourceVariable) 
开发者ID:google-research,项目名称:tf-slim,代码行数:8,代码来源:variables_test.py


示例13: testBasicResourceVariable

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def testBasicResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [1.0, 2.0], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
        self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:28,代码来源:gradient_descent_test.py


示例14: testMinimizeResourceVariable

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def testMinimizeResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [[1.0, 2.0]], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0], dtype=dtype)
        x = tf.constant([[4.0], [5.0]], dtype=dtype)
        pred = tf.matmul(var0, x) + var1
        loss = pred*pred
        sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
        self.assertAllCloseAccordingToType([3.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
        np_grad = 2 * np_pred
        self.assertAllCloseAccordingToType(
            [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - np_grad], var1.eval()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:30,代码来源:gradient_descent_test.py


示例15: testMinimizeSparseResourceVariable

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def testMinimizeSparseResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [[1.0, 2.0]], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0], dtype=dtype)
        x = tf.constant([[4.0], [5.0]], dtype=dtype)
        pred = tf.matmul(tf.nn.embedding_lookup([var0], [0]), x)
        pred = tf.matmul(var0, x) + var1
        loss = pred*pred
        sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
        self.assertAllCloseAccordingToType([3.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
        np_grad = 2 * np_pred
        self.assertAllCloseAccordingToType(
            [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - np_grad], var1.eval()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:31,代码来源:gradient_descent_test.py


示例16: _get_variable_for

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _get_variable_for(v):
  """Returns the ResourceVariable responsible for v, or v if not necessary."""
  if context.in_eager_mode():
    return v
  if v.op.type == "VarHandleOp":
    for var in variables.trainable_variables():
      if (isinstance(var, resource_variable_ops.ResourceVariable)
          and var.handle.op is v.op):
        return var
    raise ValueError("Got %s but could not locate source variable." % (str(v)))
  return v 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:13,代码来源:optimizer.py


示例17: _assign_moving_average

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _assign_moving_average(self, variable, value, one_minus_decay):
    with ops.name_scope(None, 'AssignMovingAvg',
                        [variable, value, one_minus_decay]) as scope:
      with ops.colocate_with(variable):
        update_delta = math_ops.multiply(
            math_ops.subtract(variable.read_value(), value),
            one_minus_decay)
        if isinstance(variable, resource_variable_ops.ResourceVariable):
          # state_ops.assign_sub does an extra read_variable_op after the
          # assign. We avoid that here.
          return gen_resource_variable_ops.assign_sub_variable_op(
              variable.handle, update_delta, name=scope)
        else:
          return state_ops.assign_sub(variable, update_delta, name=scope) 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:16,代码来源:normalization.py


示例18: __init__

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def __init__(self, name, initializer, shape, dtype, trainable):
    self.name = name
    if initializer is None:
      initializer = _default_initializer(name, shape, dtype)
    initial_value = lambda: initializer(shape, dtype=dtype)

    with context.eager_mode():
      self.variable = resource_variable_ops.ResourceVariable(
          initial_value=initial_value, name=name, dtype=dtype,
          trainable=trainable)
    self.shape = shape
    self.dtype = dtype
    self.placeholder = None
    self.trainable = trainable 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:16,代码来源:graph_callable.py


示例19: _eval_helper

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _eval_helper(self, tensors):
    if isinstance(tensors, ops.EagerTensor):
      return tensors.numpy()
    if isinstance(tensors, resource_variable_ops.ResourceVariable):
      return tensors.read_value().numpy()

    if isinstance(tensors, tuple):
      return tuple([self._eval_helper(t) for t in tensors])
    elif isinstance(tensors, list):
      return [self._eval_helper(t) for t in tensors]
    elif isinstance(tensors, dict):
      assert not tensors, "Only support empty dict now."
      return dict()
    else:
      raise ValueError("Unsupported type.") 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:17,代码来源:test_util.py


示例20: OpListToDict

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def OpListToDict(op_list):
    """Create a dictionary of names to operation lists.

    Args:
      op_list: A list, tuple, or set of Variables or SaveableObjects.

    Returns:
      A dictionary of names to the operations that must be saved under
      that name.  Variables with save_slice_info are grouped together under the
      same key in no particular order.

    Raises:
      TypeError: If the type of op_list or its elements is not supported.
      ValueError: If at least two saveables share the same name.
    """
    if not isinstance(op_list, (list, tuple, set)):
      raise TypeError("Variables to save should be passed in a dict or a "
                      "list: %s" % op_list)
    op_list = set(op_list)
    names_to_saveables = {}
    # pylint: disable=protected-access
    for var in op_list:
      if isinstance(var, BaseSaverBuilder.SaveableObject):
        names_to_saveables[var.name] = var
      elif isinstance(var, variables.PartitionedVariable):
        if var.name in names_to_saveables:
          raise ValueError("At least two variables have the same name: %s" %
                           var.name)
        names_to_saveables[var.name] = var
      elif ((isinstance(var, variables.Variable) or
             isinstance(var, resource_variable_ops.ResourceVariable)) and
            var._save_slice_info):
        name = var._save_slice_info.full_name
        if name in names_to_saveables:
          if not isinstance(names_to_saveables[name], list):
            raise ValueError("Mixing slices and non-slices with the same name: "
                             "%s" % name)
          names_to_saveables[name].append(var)
        else:
          names_to_saveables[name] = [var]
      else:
        var = ops.internal_convert_to_tensor(var, as_ref=True)
        if not BaseSaverBuilder._IsVariable(var):
          raise TypeError("Variable to save is not a Variable: %s" % var)
        if var.op.type == "ReadVariableOp":
          name = var.op.inputs[0].op.name
        else:
          name = var.op.name
        if name in names_to_saveables:
          raise ValueError("At least two variables have the same name: %s" %
                           name)
        names_to_saveables[name] = var
      # pylint: enable=protected-access
    return names_to_saveables 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:56,代码来源:saver.py


示例21: _hook_variable

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _hook_variable(self, syft_type: type):
        """Adds PySyft Tensor functionality to tf.Variable.

        In practice, the user is generally working with subclasses of
        tf.Variable, e.g. ResourceVariable, so we hook methods for those and
        only override the tf.Variable constructor to provide syft registration.
        You may read about what kind of modifications are made in the methods
        that this method calls.

        Args:
            syft_type: The abstract type whose methods should all be added to
                the ResourceVariable class.
        """
        # Reinitialize init method of Torch tensor with Syft init
        self._add_registration_to___init__(tf.Variable)

        # Overload Torch tensor properties with Syft properties
        self._hook_properties(tf.Variable)

        # Overload auto overloaded with Torch methods
        exclude = [
            "__class__",
            "__delattr__",
            "__dict__",
            "__dir__",
            "__doc__",
            "__format__",
            "__getattribute__",
            "__hash__",
            "__init__",
            "__init_subclass__",
            "__weakref__",
            "__module__",
            "__ne__",
            "__new__",
            "__reduce__",
            "__reduce_ex__",
            "__setattr__",
            "__sizeof__",
            "__subclasshook__",
        ]
        self._transfer_methods_to_framework_class(ResourceVariable, syft_type, exclude)
        self._hook_properties(ResourceVariable)
        self._hook_native_methods(ResourceVariable) 
开发者ID:OpenMined,项目名称:PySyft-TensorFlow,代码行数:46,代码来源:hook.py


示例22: OpListToDict

# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def OpListToDict(op_list):
    """Create a dictionary of names to operation lists.

    Args:
      op_list: A list, tuple, or set of Variables or SaveableObjects.

    Returns:
      A dictionary of names to the operations that must be saved under
      that name.  Variables with save_slice_info are grouped together under the
      same key in no particular order.

    Raises:
      TypeError: If the type of op_list or its elements is not supported.
      ValueError: If at least two saveables share the same name.
    """
    if not isinstance(op_list, (list, tuple, set)):
      raise TypeError("Variables to save should be passed in a dict or a "
                      "list: %s" % op_list)
    op_list = set(op_list)
    names_to_saveables = {}
    # pylint: disable=protected-access
    for var in op_list:
      if isinstance(var, BaseSaverBuilder.SaveableObject):
        names_to_saveables[var.name] = var
      elif isinstance(var, variables.PartitionedVariable):
        if var.name in names_to_saveables:
          raise ValueError("At least two variables have the same name: %s" %
                           var.name)
        names_to_saveables[var.name] = var
      elif isinstance(var, variables.Variable) and var._save_slice_info:
        name = var._save_slice_info.full_name
        if name in names_to_saveables:
          if not isinstance(names_to_saveables[name], list):
            raise ValueError("Mixing slices and non-slices with the same name: "
                             "%s" % name)
          names_to_saveables[name].append(var)
        else:
          names_to_saveables[name] = [var]
      else:
        if context.in_graph_mode():
          var = ops.internal_convert_to_tensor(var, as_ref=True)
          if not BaseSaverBuilder._IsVariable(var):
            raise TypeError("Variable to save is not a Variable: %s" % var)
          if var.op.type == "ReadVariableOp":
            name = var.op.inputs[0].op.name
          else:
            name = var.op.name
          if name in names_to_saveables:
            raise ValueError("At least two variables have the same name: %s" %
                             name)
          names_to_saveables[name] = var
        else:
          if not isinstance(var, resource_variable_ops.ResourceVariable):
            raise ValueError("Can only save/restore ResourceVariable eager "
                             "mode is enabled, type: %s." % type(var))
          names_to_saveables[var._shared_name] = var

      # pylint: enable=protected-access
    return names_to_saveables 
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:61,代码来源:saver.py



注:本文中的tensorflow.python.ops.resource_variable_ops.ResourceVariable方法示例整理自Github/MSDocs等源码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。