本文整理汇总了Python中tensorflow.python.ops.resource_variable_ops.ResourceVariable方法的典型用法代码示例。如果您正苦于以下问题:Python resource_variable_ops.ResourceVariable方法的具体用法?Python resource_variable_ops.ResourceVariable怎么用?Python resource_variable_ops.ResourceVariable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.resource_variable_ops
的用法示例。
在下文中一共展示了resource_variable_ops.ResourceVariable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: variable
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def variable(initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None):
if get_variable_scope().use_resource:
return resource_variable_ops.ResourceVariable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype)
else:
return variables.Variable(
initial_value=initial_value, trainable=trainable,
collections=collections, validate_shape=validate_shape,
caching_device=caching_device, name=name, dtype=dtype)
示例2: assert_global_step
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def assert_global_step(global_step_tensor):
"""Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.
Args:
global_step_tensor: `Tensor` to test.
"""
if not (isinstance(global_step_tensor, variables.Variable) or
isinstance(global_step_tensor, ops.Tensor) or
isinstance(global_step_tensor,
resource_variable_ops.ResourceVariable)):
raise TypeError(
'Existing "global_step" must be a Variable or Tensor: %s.' %
global_step_tensor)
if not global_step_tensor.dtype.base_dtype.is_integer:
raise TypeError('Existing "global_step" does not have integer type: %s' %
global_step_tensor.dtype)
if global_step_tensor.get_shape().ndims != 0:
raise TypeError('Existing "global_step" is not scalar: %s' %
global_step_tensor.get_shape())
示例3: _estimate_data_distribution
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _estimate_data_distribution(c, num_examples_per_class_seen):
"""Estimate data distribution as labels are seen.
Args:
c: The class labels. Type `int32`, shape `[batch_size]`.
num_examples_per_class_seen: A `ResourceVariable` containing counts.
Type `int64`, shape `[num_classes]`.
Returns:
dist: The updated distribution. Type `float32`, shape `[num_classes]`.
"""
num_classes = num_examples_per_class_seen.get_shape()[0].value
# Update the class-count based on what labels are seen in
# batch. But do this asynchronously to avoid performing a
# cross-device round-trip. Just use the cached value.
num_examples_per_class_seen = num_examples_per_class_seen.assign_add(
math_ops.reduce_sum(
array_ops.one_hot(c, num_classes, dtype=dtypes.int64),
0))
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
return math_ops.cast(init_prob_estimate, dtypes.float32)
示例4: _gather
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _gather(params, ids, name=None):
"""Helper function for _embedding_lookup_and_transform.
This function gathers embeddings from a single tensor. The gather deals with
resource variables specially.
Args:
params: A `Tensor` of embeddings.
ids: A `Tensor` indexing the embeddings to be retrieved from `params`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `params`.
"""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
else:
return array_ops.gather(params, ids, name=name)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:20,代码来源:embedding_ops.py
示例5: __init__
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def __init__(self, var, slice_spec, name):
self._var_device = var.device
if isinstance(var, ops.Tensor):
self.handle_op = var.op.inputs[0]
tensor = var
elif isinstance(var, resource_variable_ops.ResourceVariable):
def _read_variable_closure(v):
def f():
with ops.device(v.device):
x = v.read_value()
with ops.device("/device:CPU:0"):
return array_ops.identity(x)
return f
self.handle_op = var.handle
tensor = _read_variable_closure(var)
else:
raise ValueError(
"Saveable is neither a resource variable nor a read operation."
" Got: %s" % repr(var))
spec = BaseSaverBuilder.SaveSpec(tensor, slice_spec, name)
super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
var, [spec], name)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:26,代码来源:saver.py
示例6: assert_global_step
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def assert_global_step(global_step_tensor):
"""Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`.
Args:
global_step_tensor: `Tensor` to test.
"""
if not (isinstance(global_step_tensor, variables.Variable) or
isinstance(global_step_tensor, ops.Tensor) or
isinstance(global_step_tensor,
resource_variable_ops.ResourceVariable)):
raise TypeError(
'Existing "global_step" must be a Variable or Tensor: %s.' %
global_step_tensor)
if not global_step_tensor.dtype.base_dtype.is_integer:
raise TypeError('Existing "global_step" does not have integer type: %s' %
global_step_tensor.dtype)
if (global_step_tensor.get_shape().ndims != 0 and
global_step_tensor.get_shape().is_fully_defined()):
raise TypeError('Existing "global_step" is not scalar: %s' %
global_step_tensor.get_shape())
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:24,代码来源:training_util.py
示例7: testMinimizeSparseResourceVariable
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def testMinimizeSparseResourceVariable(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
[3.0], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
pred = tf.matmul(tf.nn.embedding_lookup([var0], [0]), x)
pred = tf.matmul(var0, x) + var1
loss = pred*pred
sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - np_grad], var1.eval())
示例8: _do_gather
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _do_gather(params, ids, name=None):
"""Deals with doing gather differently for resource variables."""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
return array_ops.gather(params, ids, name=name)
示例9: _get_variable_for
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _get_variable_for(v):
"""Returns the ResourceVariable responsible for v, or v if not necessary."""
if v.op.type == "VarHandleOp":
for var in variables.trainable_variables():
if (isinstance(var, resource_variable_ops.ResourceVariable)
and var.handle.op is v.op):
return var
raise ValueError("Got %s but could not locate source variable." % (str(v)))
return v
示例10: __init__
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def __init__(self, var, slice_spec, name):
if isinstance(var, ops.Tensor):
self.handle_op = var.op.inputs[0]
elif isinstance(var, resource_variable_ops.ResourceVariable):
self.handle_op = var.handle
else:
raise ValueError(
"Saveable is neither a resource variable nor a read operation."
" Got: %s" % repr(var))
spec = BaseSaverBuilder.SaveSpec(var, slice_spec, name)
super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
var, [spec], name)
示例11: getvar
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def getvar(
self,
getter,
name,
shape=None,
dtype=None,
initializer=None,
reuse=None,
trainable=True,
collections=None, # pylint: disable=redefined-outer-name
use_resource=None,
**kwargs):
"""A custom variable getter."""
# Here, we switch the default graph to the outer graph and ask the
# variable scope in which the function is defined to give us the
# variable. The variable is stashed in extra_vars and returned to
# the caller.
#
# We capture these variables so that the variable definition is
# hoisted upward to the outer most graph.
with self._outer_graph.as_default():
# pylint: disable=protected-access
var = self._vscope.get_variable(
vs._get_default_variable_store(),
name,
shape=shape,
dtype=dtype,
initializer=initializer,
reuse=reuse,
trainable=trainable,
collections=collections,
use_resource=use_resource)
self.extra_vars.append(var)
if isinstance(var, resource_variable_ops.ResourceVariable):
# For resource-based variables read the variable outside the function
# and pass in the value. This ensures that the function is pure and
# differentiable. TODO(apassos) this may have performance problems if
# the function will only do embedding lookups on the variable.
return var.value()
return var
示例12: _is_variable
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def _is_variable(v):
"""Returns true if `v` is a variable."""
return isinstance(v, (variables.Variable,
resource_variable_ops.ResourceVariable))
示例13: testResourceVariable
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def testResourceVariable(self):
a = variables_lib2.local_variable(0, use_resource=False)
b = variables_lib2.local_variable(0, use_resource=True)
self.assertIsInstance(a, variables_lib.Variable)
self.assertNotIsInstance(a, resource_variable_ops.ResourceVariable)
self.assertIsInstance(b, resource_variable_ops.ResourceVariable)
示例14: testBasicResourceVariable
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def testBasicResourceVariable(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable(
[1.0, 2.0], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
[3.0, 4.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.1], dtype=dtype)
grads1 = tf.constant([0.01, 0.01], dtype=dtype)
sgd_op = tf.train.GradientDescentOptimizer(3.0).apply_gradients(zip(
[grads0, grads1], [var0, var1]))
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([1.0, 2.0], var0.eval())
self.assertAllCloseAccordingToType([3.0, 4.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
self.assertAllCloseAccordingToType(
[1.0 - 3.0 * 0.1, 2.0 - 3.0 * 0.1], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - 3.0 * 0.01, 4.0 - 3.0 * 0.01], var1.eval())
示例15: testMinimizeResourceVariable
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import ResourceVariable [as 别名]
def testMinimizeResourceVariable(self):
for dtype in [tf.half, tf.float32, tf.float64]:
with self.test_session():
var0 = resource_variable_ops.ResourceVariable(
[[1.0, 2.0]], dtype=dtype)
var1 = resource_variable_ops.ResourceVariable(
[3.0], dtype=dtype)
x = tf.constant([[4.0], [5.0]], dtype=dtype)
pred = tf.matmul(var0, x) + var1
loss = pred*pred
sgd_op = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
# TODO(apassos) calling initialize_resources on all resources here
# doesn't work because the sessions and graph are reused across unit
# tests and this would mean trying to reinitialize variables. Figure out
# a long-term solution for this.
resources.initialize_resources([var0, var1]).run()
# Fetch params to validate initial values
self.assertAllCloseAccordingToType([[1.0, 2.0]], var0.eval())
self.assertAllCloseAccordingToType([3.0], var1.eval())
# Run 1 step of sgd
sgd_op.run()
# Validate updated params
np_pred = 1.0 * 4.0 + 2.0 * 5.0 + 3.0
np_grad = 2 * np_pred
self.assertAllCloseAccordingToType(
[[1.0 - np_grad * 4.0, 2.0 - np_grad * 5.0]], var0.eval())
self.assertAllCloseAccordingToType(
[3.0 - np_grad], var1.eval())