當前位置: 首頁>>代碼示例>>Python>>正文


Python resource_variable_ops.ResourceVariable方法代碼示例

本文整理匯總了Python中tensorflow.python.ops.resource_variable_ops.ResourceVariable方法的典型用法代碼示例。如果您正苦於以下問題:Python resource_variable_ops.ResourceVariable方法的具體用法?Python resource_variable_ops.ResourceVariable怎麽用?Python resource_variable_ops.ResourceVariable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.ops.resource_variable_ops的用法示例。


在下文中一共展示了resource_variable_ops.ResourceVariable方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: variable

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def variable(initial_value=None,
             trainable=True,
             collections=None,
             validate_shape=True,
             caching_device=None,
             name=None,
             dtype=None):
  if get_variable_scope().use_resource:
    return resource_variable_ops.ResourceVariable(
        initial_value=initial_value, trainable=trainable,
        collections=collections, validate_shape=validate_shape,
        caching_device=caching_device, name=name, dtype=dtype)
  else:
    return variables.Variable(
        initial_value=initial_value, trainable=trainable,
        collections=collections, validate_shape=validate_shape,
        caching_device=caching_device, name=name, dtype=dtype) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:19,代碼來源:variable_scope.py

示例2: assert_global_step

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def assert_global_step(global_step_tensor):
  """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.

  Args:
    global_step_tensor: `Tensor` to test.
  """
  if not (isinstance(global_step_tensor, variables.Variable) or
          isinstance(global_step_tensor, ops.Tensor) or
          isinstance(global_step_tensor,
                     resource_variable_ops.ResourceVariable)):
    raise TypeError(
        'Existing "global_step" must be a Variable or Tensor: %s.' %
        global_step_tensor)

  if not global_step_tensor.dtype.base_dtype.is_integer:
    raise TypeError('Existing "global_step" does not have integer type: %s' %
                    global_step_tensor.dtype)

  if global_step_tensor.get_shape().ndims != 0:
    raise TypeError('Existing "global_step" is not scalar: %s' %
                    global_step_tensor.get_shape()) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:23,代碼來源:training_util.py

示例3: _estimate_data_distribution

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def _estimate_data_distribution(c, num_examples_per_class_seen):
  """Estimate data distribution as labels are seen.

  Args:
    c: The class labels.  Type `int32`, shape `[batch_size]`.
    num_examples_per_class_seen: A `ResourceVariable` containing counts.
      Type `int64`, shape `[num_classes]`.

  Returns:
    dist: The updated distribution.  Type `float32`, shape `[num_classes]`.
  """
  num_classes = num_examples_per_class_seen.get_shape()[0].value
  # Update the class-count based on what labels are seen in
  # batch.  But do this asynchronously to avoid performing a
  # cross-device round-trip.  Just use the cached value.
  num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
      math_ops.reduce_sum(
          array_ops.one_hot(c, num_classes, dtype=dtypes.int64),
          0))
  init_prob_estimate = math_ops.truediv(
      num_examples_per_class_seen,
      math_ops.reduce_sum(num_examples_per_class_seen))
  return math_ops.cast(init_prob_estimate, dtypes.float32) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:25,代碼來源:dataset_ops.py

示例4: _gather

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def _gather(params, ids, name=None):
  """Helper function for _embedding_lookup_and_transform.

  This function gathers embeddings from a single tensor. The gather deals with
  resource variables specially.

  Args:
    params: A `Tensor` of embeddings.
    ids: A `Tensor` indexing the embeddings to be retrieved from `params`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor` with the same type as `params`.
  """
  if isinstance(params, resource_variable_ops.ResourceVariable):
    return params.sparse_read(ids, name=name)
  else:
    return array_ops.gather(params, ids, name=name) 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:20,代碼來源:embedding_ops.py

示例5: __init__

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def __init__(self, var, slice_spec, name):
      self._var_device = var.device
      if isinstance(var, ops.Tensor):
        self.handle_op = var.op.inputs[0]
        tensor = var
      elif isinstance(var, resource_variable_ops.ResourceVariable):

        def _read_variable_closure(v):
          def f():
            with ops.device(v.device):
              x = v.read_value()
            with ops.device("/device:CPU:0"):
              return array_ops.identity(x)
          return f

        self.handle_op = var.handle
        tensor = _read_variable_closure(var)
      else:
        raise ValueError(
            "Saveable is neither a resource variable nor a read operation."
            " Got: %s" % repr(var))
      spec = BaseSaverBuilder.SaveSpec(tensor, slice_spec, name)
      super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
          var, [spec], name) 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:26,代碼來源:saver.py

示例6: assert_global_step

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def assert_global_step(global_step_tensor):
  """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.

  Args:
    global_step_tensor: `Tensor` to test.
  """
  if not (isinstance(global_step_tensor, variables.Variable) or
          isinstance(global_step_tensor, ops.Tensor) or
          isinstance(global_step_tensor,
                     resource_variable_ops.ResourceVariable)):
    raise TypeError(
        'Existing "global_step" must be a Variable or Tensor: %s.' %
        global_step_tensor)

  if not global_step_tensor.dtype.base_dtype.is_integer:
    raise TypeError('Existing "global_step" does not have integer type: %s' %
                    global_step_tensor.dtype)

  if (global_step_tensor.get_shape().ndims != 0 and
      global_step_tensor.get_shape().is_fully_defined()):
    raise TypeError('Existing "global_step" is not scalar: %s' %
                    global_step_tensor.get_shape()) 
開發者ID:PacktPublishing,項目名稱:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代碼行數:24,代碼來源:training_util.py

示例7: testMinimizeSparseResourceVariable

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def testMinimizeSparseResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [[1.0, 2.0]], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0], dtype=dtype)
        x = tf.constant([[4.0], [5.0]], dtype=dtype)
        pred = tf.matmul(tf.nn.embedding_lookup([var0], [0]), x)
        pred = tf.matmul(var0, x) + var1
        loss = pred*pred
        sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
        self.assertAllCloseAccordingToType([3.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
        np_grad = 2 * np_pred
        self.assertAllCloseAccordingToType(
            [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - np_grad], var1.eval()) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:31,代碼來源:gradient_descent_test.py

示例8: _do_gather

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def _do_gather(params, ids, name=None):
  """Deals with doing gather differently for resource variables."""
  if isinstance(params, resource_variable_ops.ResourceVariable):
    return params.sparse_read(ids, name=name)
  return array_ops.gather(params, ids, name=name) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:7,代碼來源:embedding_ops.py

示例9: _get_variable_for

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def _get_variable_for(v):
  """Returns the ResourceVariable responsible for v, or v if not necessary."""
  if v.op.type == "VarHandleOp":
    for var in variables.trainable_variables():
      if (isinstance(var, resource_variable_ops.ResourceVariable)
          and var.handle.op is v.op):
        return var
    raise ValueError("Got %s but  could not locate source variable." % (str(v)))
  return v 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:11,代碼來源:optimizer.py

示例10: __init__

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def __init__(self, var, slice_spec, name):
      if isinstance(var, ops.Tensor):
        self.handle_op = var.op.inputs[0]
      elif isinstance(var, resource_variable_ops.ResourceVariable):
        self.handle_op = var.handle
      else:
        raise ValueError(
            "Saveable is neither a resource variable nor a read operation."
            " Got: %s" % repr(var))
      spec = BaseSaverBuilder.SaveSpec(var, slice_spec, name)
      super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
          var, [spec], name) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:14,代碼來源:saver.py

示例11: getvar

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def getvar(
      self,
      getter,
      name,
      shape=None,
      dtype=None,
      initializer=None,
      reuse=None,
      trainable=True,
      collections=None,  # pylint: disable=redefined-outer-name
      use_resource=None,
      **kwargs):
    """A custom variable getter."""
    # Here, we switch the default graph to the outer graph and ask the
    # variable scope in which the function is defined to give us the
    # variable. The variable is stashed in extra_vars and returned to
    # the caller.
    #
    # We capture these variables so that the variable definition is
    # hoisted upward to the outer most graph.
    with self._outer_graph.as_default():
      # pylint: disable=protected-access
      var = self._vscope.get_variable(
          vs._get_default_variable_store(),
          name,
          shape=shape,
          dtype=dtype,
          initializer=initializer,
          reuse=reuse,
          trainable=trainable,
          collections=collections,
          use_resource=use_resource)
      self.extra_vars.append(var)
      if isinstance(var, resource_variable_ops.ResourceVariable):
        # For resource-based variables read the variable outside the function
        # and pass in the value. This ensures that the function is pure and
        # differentiable. TODO(apassos) this may have performance problems if
        # the function will only do embedding lookups on the variable.
        return var.value()
      return var 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:42,代碼來源:function.py

示例12: _is_variable

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def _is_variable(v):
  """Returns true if `v` is a variable."""
  return isinstance(v, (variables.Variable,
                        resource_variable_ops.ResourceVariable)) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:6,代碼來源:feature_column.py

示例13: testResourceVariable

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def testResourceVariable(self):
    a = variables_lib2.local_variable(0, use_resource=False)
    b = variables_lib2.local_variable(0, use_resource=True)
    self.assertIsInstance(a, variables_lib.Variable)
    self.assertNotIsInstance(a, resource_variable_ops.ResourceVariable)
    self.assertIsInstance(b, resource_variable_ops.ResourceVariable) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:8,代碼來源:variables_test.py

示例14: testBasicResourceVariable

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def testBasicResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [1.0, 2.0], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0, 4.0], dtype=dtype)
        grads0 = tf.constant([0.1, 0.1], dtype=dtype)
        grads1 = tf.constant([0.01, 0.01], dtype=dtype)
        sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
            [grads0, grads1], [var0, var1]))
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
        self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        self.assertAllCloseAccordingToType(
            [1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval()) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:28,代碼來源:gradient_descent_test.py

示例15: testMinimizeResourceVariable

# 需要導入模塊: from tensorflow.python.ops import resource_variable_ops [as 別名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 別名]
def testMinimizeResourceVariable(self):
    for dtype in [tf.half, tf.float32, tf.float64]:
      with self.test_session():
        var0 = resource_variable_ops.ResourceVariable(
            [[1.0, 2.0]], dtype=dtype)
        var1 = resource_variable_ops.ResourceVariable(
            [3.0], dtype=dtype)
        x = tf.constant([[4.0], [5.0]], dtype=dtype)
        pred = tf.matmul(var0, x) + var1
        loss = pred*pred
        sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
        # TODO(apassos) calling initialize_resources on all resources here
        # doesn't work because the sessions and graph are reused across unit
        # tests and this would mean trying to reinitialize variables. Figure out
        # a long-term solution for this.
        resources.initialize_resources([var0, var1]).run()
        # Fetch params to validate initial values
        self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
        self.assertAllCloseAccordingToType([3.0], var1.eval())
        # Run 1 step of sgd
        sgd_op.run()
        # Validate updated params
        np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
        np_grad = 2 * np_pred
        self.assertAllCloseAccordingToType(
            [[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
        self.assertAllCloseAccordingToType(
            [3.0 - np_grad], var1.eval()) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:30,代碼來源:gradient_descent_test.py


注:本文中的tensorflow.python.ops.resource_variable_ops.ResourceVariable方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。