本文整理汇总了Python中tensorflow.python.ops.gen_resource_variable_ops.assign_variable_op函数的典型用法代码示例。如果您正苦于以下问题:Python assign_variable_op函数的具体用法?Python assign_variable_op怎么用?Python assign_variable_op使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了assign_variable_op函数的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: assign
def assign(self, value, use_locking=None, name=None, read_value=True):
"""Assigns a new value to this variable.
Args:
value: A `Tensor`. The new value for this variable.
use_locking: If `True`, use locking during the assignment.
name: The name to use for the assignment.
read_value: A `bool`. Whether to read and return the new value of the
variable or not.
Returns:
If `read_value` is `True`, this method will return the new value of the
variable after the assignment has completed. Otherwise, when in graph mode
it will return the `Operation` that does the assignment, and when in eager
mode it will return `None`.
"""
# Note: not depending on the cached value here since this can used to
# initialize the variable.
with _handle_graph(self.handle):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
self._shape.assert_is_compatible_with(value_tensor.shape)
assign_op = gen_resource_variable_ops.assign_variable_op(
self.handle, value_tensor, name=name)
if read_value:
return self._lazy_read(assign_op)
return assign_op
示例2: shape_safe_assign_variable_handle
def shape_safe_assign_variable_handle(handle, shape, value, name=None):
"""Helper that checks shape compatibility and assigns variable."""
value_tensor = ops.convert_to_tensor(value)
shape.assert_is_compatible_with(value_tensor.shape)
return gen_resource_variable_ops.assign_variable_op(handle,
value_tensor,
name=name)
示例3: assign
def assign(self, value, use_locking=None, name=None, read_value=False):
"""Assign `value` to all replicas.
Outside of the tpu.rewrite context, assign explicitly to all replicas.
Inside of the tpu.rewrite context, assigns to the local replica.
Arguments:
value: Tensor to assign
use_locking: ignored
name: ignored
read_value: return the value from the assignment
Returns:
Assignment operation, or new value of the variable if `read_value` is True
"""
del use_locking
if _enclosing_tpu_context() is None:
assign_ops = []
with self._assign_dependencies():
for var in self._vars:
assign_ops.append(var.assign(value, use_locking=None, name=name))
if read_value:
with ops.control_dependencies(assign_ops):
return self.read_value()
else:
return control_flow_ops.group(assign_ops)
with _handle_graph(self.handle), self._assign_dependencies():
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
assign_op = gen_resource_variable_ops.assign_variable_op(
self.handle, value_tensor, name=name)
if read_value:
return self._read_variable_op()
return assign_op
示例4: assign
def assign(self, value, use_locking=None, name=None):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
self._shape.assert_is_compatible_with(value_tensor.shape)
return self._lazy_read(
gen_resource_variable_ops.assign_variable_op(
self.handle,
value_tensor,
name=name))
示例5: assign
def assign(self, value, use_locking=None, name=None):
with ops.control_dependencies([
gen_resource_variable_ops.assign_variable_op(
self.handle,
ops.convert_to_tensor(value, dtype=self.dtype),
name=name)
]):
return self.read_value()
示例6: assign
def assign(self, value, use_locking=None, name=None):
value_tensor = ops.convert_to_tensor(value, dtype=self.dtype)
self._shape.assert_is_compatible_with(value_tensor.shape)
with ops.control_dependencies([
gen_resource_variable_ops.assign_variable_op(
self.handle,
value_tensor,
name=name)
]):
return self.read_value()
示例7: _init_from_args
#.........这里部分代码省略.........
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=handle_name,
name=name,
graph_mode=False)
self._handle_device = (
self._handle.device if self._in_graph_mode else
context.get_default_context().device_name)
self._shape = initial_value.get_shape()
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
# pylint: disable=protected-access
if (self._in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=handle_name,
name=name,
graph_mode=self._in_graph_mode)
self._handle_device = (self._handle.device if self._in_graph_mode else
context.get_default_context().device_name)
self._shape = initial_value.get_shape()
self._initial_value = initial_value if self._in_graph_mode else None
self._handle_name = handle_name + ":0"
self._dtype = initial_value.dtype.base_dtype
self._constraint = constraint
if self._in_graph_mode:
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initializer_op = (
gen_resource_variable_ops.assign_variable_op(
self._handle,
self._try_guard_against_uninitialized_dependencies(
initial_value),
name=n))
with ops.name_scope("Read"), ops.colocate_with(self._handle):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(self._handle_device):
value = self._read_variable_op()
self._graph_element = value
if caching_device is not None:
# Variables may be created in a tf.device() or ops.colocate_with()
# context. At the same time, users would expect caching device to
# be independent of this context, and/or would not expect the
# current device context to be merged with the caching device
# spec. Therefore we reset the colocation stack before creating
# the cached value. Note that resetting the colocation stack will
# also reset the device stack.
with ops.colocate_with(None, ignore_existing=True):
with ops.device(caching_device):
self._cached_value = array_ops.identity(value)
else:
self._cached_value = None
else:
gen_resource_variable_ops.assign_variable_op(self._handle,
initial_value)
self._is_initialized_op = None
self._initializer_op = None
self._graph_element = None
if caching_device:
with ops.device(caching_device):
self._cached_value = self._read_variable_op()
else:
self._cached_value = None
if context.in_graph_mode():
ops.add_to_collections(collections, self)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)
if not self._in_graph_mode:
# After the handle has been created, set up a way to clean it up when
# executing eagerly. We'll hold the only reference to the deleter, so that
# when this object is garbage collected the deleter will be too. This
# means ResourceVariables can be part of reference cycles without those
# cycles being uncollectable, and means that no __del__ will be defined at
# all in graph mode.
self._handle_deleter = EagerResourceDeleter(
handle=self._handle, handle_device=self._handle_device)
示例8: _init_from_args
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
self._save_slice_info = None
with ops.control_dependencies(None):
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
# pylint: disable=protected-access
true_name = ops._name_from_scope_name(name)
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
self._handle = gen_resource_variable_ops.var_handle_op(
shape=self._initial_value.get_shape(),
dtype=self._initial_value.dtype.base_dtype,
shared_name=true_name, name=name)
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
self._handle = gen_resource_variable_ops.var_handle_op(
shape=self._initial_value.get_shape(),
dtype=self._initial_value.dtype.base_dtype,
shared_name=true_name, name=name)
self._dtype = self._initial_value.dtype.base_dtype
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initialize_op = gen_resource_variable_ops.assign_variable_op(
self._handle, self._initial_value, name=n)
with ops.name_scope("Read"), ops.colocate_with(self._handle):
# Manually assign reads to the handle's device to avoid log messages.
with ops.device(self._handle.device):
value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
self._graph_element = value
if caching_device is not None:
#.........这里部分代码省略.........
示例9: __init__
def __init__(self,
initial_value=None,
name=None,
caching_device=None,
trainable=True,
collections=None,
dtype=None,
shape=None):
"""Creates a variable.
Args:
initial_value: A `Tensor` or Python object convertible to a `Tensor`
representing the initial value of this variable.
name: The name of this variable. Automatically uniquified.
caching_device: device where the variable value's read by default.
trainable: Whether the global read of this variable will be used for
training.
collections: Additional collections to which the `read` operation for
this variable is to be added. Defaults to [].
dtype: The type of this variable. Can be omitted if it can be deduced
from the initial_value. If different from the type of the initial
value it will be cast to this type.
shape: The shape of this variable. Only specify if there is no initial
value but shape inference is desired.
"""
if initial_value is not None:
if callable(initial_value):
initial_value = initial_value()
initial_value = ops.convert_to_tensor(initial_value)
if dtype is None:
assert initial_value is not None, ("Trying to create a resource variable "
"with no dtype or initial value. At"
" least one of these must be set.")
dtype = initial_value.dtype
elif initial_value is not None:
initial_value = math_ops.cast(initial_value, dtype)
if shape is None:
if initial_value is not None:
shape = initial_value.get_shape().as_proto()
else:
shape = tensor_shape.unknown_shape()
else:
shape = tensor_shape.as_shape(shape)
self._dtype = dtype
with ops.name_scope(name, "Variable", [initial_value]) as name:
self._handle = gen_resource_variable_ops.var_handle_op(shared_name=name,
name=name,
dtype=dtype,
shape=shape)
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Create"):
self._initialize_op = gen_resource_variable_ops.assign_variable_op(
self._handle, initial_value)
resources.register_resource(self._handle,
self._initialize_op,
self._is_initialized_op)
with ops.name_scope("Read"):
if caching_device is not None:
with ops.device(caching_device):
self._value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
else:
self._value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
# TODO(apassos) this is terrible
self._value.initializer = self._initialize_op
_register_variable_read(
self._value, trainable=trainable, collections=collections)