本文整理汇总了Python中tensorflow.python.ops.resource_variable_ops.assign_variable_op方法的典型用法代码示例。如果您正苦于以下问题:Python resource_variable_ops.assign_variable_op方法的具体用法?Python resource_variable_ops.assign_variable_op怎么用?Python resource_variable_ops.assign_variable_op使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.resource_variable_ops
的用法示例。
在下文中一共展示了resource_variable_ops.assign_variable_op方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testManyAssigns
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import assign_variable_op [as 别名]
def testManyAssigns(self):
with self.test_session() as session:
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.create_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = session.run([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
示例2: restore
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import assign_variable_op [as 别名]
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return resource_variable_ops.assign_variable_op(
self.handle_op, restored_tensor)
示例3: restore
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import assign_variable_op [as 别名]
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return resource_variable_ops.assign_variable_op(
self.read_op.op.inputs[0],
restored_tensor)
示例4: restore
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import assign_variable_op [as 别名]
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
# Copy the restored tensor to the variable's device.
with ops.device(self._var_device):
restored_tensor = array_ops.identity(restored_tensor)
return resource_variable_ops.assign_variable_op(
self.handle_op, restored_tensor)
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:11,代码来源:saver.py
示例5: initializing_scope
# 需要导入模块: from tensorflow.python.ops import resource_variable_ops [as 别名]
# 或者: from tensorflow.python.ops.resource_variable_ops import assign_variable_op [as 别名]
def initializing_scope(self):
"""Context manager to capture variable creations.
Forcibly initializes all created variables.
Yields:
nothing
"""
# TODO(apassos) ignoring the regularizer and partitioner here; figure out
# how to deal with these.
def _custom_getter(getter=None, name=None, shape=None, dtype=dtypes.float32, # pylint: disable=missing-docstring
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None, # pylint: disable=redefined-outer-name
partitioner=None, validate_shape=True,
use_resource=None):
del getter, regularizer, collections, caching_device, partitioner
del use_resource, validate_shape
if name in self.tf_variables:
if reuse:
return self.tf_variables[name].initialized_value()
else:
raise ValueError("Specified reuse=%s but tried to reuse variables."
% reuse)
# TODO(apassos): ensure this is on the same device as above
v = _CapturedVariable(name, initializer, shape, dtype, trainable)
self.variables[name] = v
graph_mode_resource = resource_variable_ops.var_handle_op(
shared_name=name, shape=shape, dtype=dtype)
if initializer is None:
initializer = _default_initializer(name, shape, dtype)
resource_variable_ops.assign_variable_op(
graph_mode_resource, initializer(shape, dtype))
return _VariableFromResource(
graph_mode_resource, dtype, name, shape=v.shape)
scope = variable_scope.get_variable_scope()
with variable_scope.variable_scope(scope, custom_getter=_custom_getter):
yield
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:41,代码来源:graph_callable.py