本文整理汇总了Python中tensorflow.python.ops.gen_resource_variable_ops.var_is_initialized_op函数的典型用法代码示例。如果您正苦于以下问题:Python var_is_initialized_op函数的具体用法?Python var_is_initialized_op怎么用?Python var_is_initialized_op使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了var_is_initialized_op函数的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _eager_safe_variable_handle
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
"""Creates a variable handle with information to do shape inference."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if graph_mode:
return handle
# We do not want two distinct ResourceVariable objects for the same
# underlying resource in the runtime.
# When in eager mode, explicitly ensure so here. When in graph mode, it's
# ensured by always generating different variable names.
exists = gen_resource_variable_ops.var_is_initialized_op(handle)
if exists:
raise ValueError("variable object with name '%s' already created. Use "
"get_variable() if reuse is desired." %
shared_name)
with context.graph_mode(), ops.Graph().as_default() as graph:
h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
# Tensor._handle_data contains information for the shape-inference code to
# know the shape and dtype of the variable pointed to by a handle. Since
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
# pylint: disable=protected-access
if ops._USE_C_SHAPES:
handle._handle_data = get_resource_handle_data(h)
else:
if h._handle_data is None:
ops.set_shape_and_handle_data_for_outputs(h.op)
handle._handle_data = h._handle_data
# pylint: enable=protected-access
# Clean up our reference cycles to avoid making the garbage collector run.
# pylint: disable=protected-access
# OrderedDict, constructed on Graph creation, makes a simple reference loop
# and hides it in an __attribute in some Python versions. We don't need to
# throw an error if we can't find it, but if we do find it we can break the
# loop to avoid creating work for the garbage collector.
problematic_cycle = graph._functions.__dict__.get("_OrderedDict__root", None)
# pylint: enable=protected-access
if problematic_cycle:
try:
del problematic_cycle[0][:]
except TypeError:
# This is probably not one of the problematic Python versions. Continue
# with the rest of our cleanup.
pass
# Now clean up our own reference cycles by clearing all of the attributes for
# the Graph and op we created.
h.__dict__ = {}
graph.__dict__ = {}
return handle
示例2: _eager_safe_variable_handle
def _eager_safe_variable_handle(shape, dtype, shared_name, name, graph_mode):
"""Creates a variable handle with information to do shape inference."""
container = ops.get_default_graph()._container # pylint: disable=protected-access
if container is None:
container = ""
handle = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
if graph_mode:
return handle
# We do not want two distinct ResourceVariable objects for the same
# underlying resource in the runtime.
# When in eager mode, explicitly ensure so here. When in graph mode, it's
# ensured by always generating different variable names.
exists = gen_resource_variable_ops.var_is_initialized_op(handle)
if exists:
raise ValueError("variable object with name '%s' already created. Use "
"get_variable() if reuse is desired." %
shared_name)
with context.graph_mode(), ops.Graph().as_default():
h = gen_resource_variable_ops.var_handle_op(shape=shape, dtype=dtype,
shared_name=shared_name,
name=name,
container=container)
# Tensor._handle_data contains information for the shape-inference code to
# know the shape and dtype of the variable pointed to by a handle. Since
# shape inference doesn't run in eager mode we copy this data here for when
# the handle is captured by an eager mode function.
handle._handle_data = h._handle_data # pylint: disable=protected-access
return handle
示例3: is_variable_initialized
def is_variable_initialized(ref, name=None):
"""Checks whether a tensor has been initialized.
Outputs boolean scalar indicating whether the tensor has been initialized.
Args:
ref: A mutable `Tensor`.
Should be from a `Variable` node. May be uninitialized.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `bool`.
"""
if ref.dtype._is_ref_dtype:
return gen_state_ops.is_variable_initialized(ref=ref, name=name)
# Handle resource variables.
if ref.op.type == "VarHandleOp":
return gen_resource_variable_ops.var_is_initialized_op(ref.handle,
name=name)
示例4: _init_from_args
#.........这里部分代码省略.........
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=handle_name,
name=name,
graph_mode=False)
self._handle_device = (
self._handle.device if self._in_graph_mode else
context.get_default_context().device_name)
self._shape = initial_value.get_shape()
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
with ops.name_scope("Initializer"):
initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
# pylint: disable=protected-access
if (self._in_graph_mode and initial_value is not None and
initial_value.op._get_control_flow_context() is not None):
raise ValueError(
"Initializer for variable %s is from inside a control-flow "
"construct, such as a loop or conditional. When creating a "
"variable inside a loop or conditional, use a lambda as the "
"initializer." % name)
# pylint: enable=protected-access
self._handle = _eager_safe_variable_handle(
shape=initial_value.get_shape(),
dtype=initial_value.dtype.base_dtype,
shared_name=handle_name,
name=name,
graph_mode=self._in_graph_mode)
self._handle_device = (self._handle.device if self._in_graph_mode else
context.get_default_context().device_name)
self._shape = initial_value.get_shape()
self._initial_value = initial_value if self._in_graph_mode else None
self._handle_name = handle_name + ":0"
self._dtype = initial_value.dtype.base_dtype
self._constraint = constraint
if self._in_graph_mode:
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initializer_op = (
gen_resource_variable_ops.assign_variable_op(
self._handle,
self._try_guard_against_uninitialized_dependencies(
initial_value),
name=n))
with ops.name_scope("Read"), ops.colocate_with(self._handle):
# Manually assign reads to the handle's device to avoid log
# messages.
with ops.device(self._handle_device):
value = self._read_variable_op()
self._graph_element = value
if caching_device is not None:
# Variables may be created in a tf.device() or ops.colocate_with()
# context. At the same time, users would expect caching device to
# be independent of this context, and/or would not expect the
# current device context to be merged with the caching device
# spec. Therefore we reset the colocation stack before creating
# the cached value. Note that resetting the colocation stack will
# also reset the device stack.
with ops.colocate_with(None, ignore_existing=True):
with ops.device(caching_device):
self._cached_value = array_ops.identity(value)
else:
self._cached_value = None
else:
gen_resource_variable_ops.assign_variable_op(self._handle,
initial_value)
self._is_initialized_op = None
self._initializer_op = None
self._graph_element = None
if caching_device:
with ops.device(caching_device):
self._cached_value = self._read_variable_op()
else:
self._cached_value = None
if context.in_graph_mode():
ops.add_to_collections(collections, self)
elif ops.GraphKeys.GLOBAL_STEP in collections:
ops.add_to_collections(ops.GraphKeys.GLOBAL_STEP, self)
if not self._in_graph_mode:
# After the handle has been created, set up a way to clean it up when
# executing eagerly. We'll hold the only reference to the deleter, so that
# when this object is garbage collected the deleter will be too. This
# means ResourceVariables can be part of reference cycles without those
# cycles being uncollectable, and means that no __del__ will be defined at
# all in graph mode.
self._handle_deleter = EagerResourceDeleter(
handle=self._handle, handle_device=self._handle_device)
示例5: __init__
def __init__(self,
initial_value=None,
name=None,
trainable=True,
collections=None,
dtype=None,
shape=None):
"""Creates a variable.
Args:
initial_value: A `Tensor` or Python object convertible to a `Tensor`
representing the initial value of this variable.
name: The name of this variable. Automatically uniquified.
trainable: Whether the global read of this variable will be used for
training.
collections: Additional collections to which the `read` operation for
this variable is to be added. Defaults to [].
dtype: The type of this variable. Can be omitted if it can be deduced
from the initial_value. If different from the type of the initial
value it will be cast to this type.
shape: The shape of this variable. Only specify if there is no initial
value but shape inference is desired.
"""
if initial_value is not None:
initial_value = ops.convert_to_tensor(initial_value)
if dtype is None:
assert initial_value is not None, ("Trying to create a resource variable "
"with no dtype or initial value. At"
" least one of these must be set.")
dtype = initial_value.dtype
elif initial_value is not None:
initial_value = math_ops.cast(initial_value, dtype)
if shape is None:
if initial_value is not None:
shape = initial_value.get_shape().as_proto()
else:
shape = tensor_shape.unknown_shape()
else:
shape = tensor_shape.as_shape(shape)
self._dtype = dtype
with ops.name_scope(name, "Variable", [initial_value]) as name:
self._handle = gen_resource_variable_ops.var_handle_op(shared_name=name,
name=name,
dtype=dtype,
shape=shape)
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Create"):
self._initialize_op = gen_resource_variable_ops.create_variable_op(
self._handle, initial_value)
resources.register_resource(self._handle,
self._initialize_op,
self._is_initialized_op)
with ops.name_scope("Read"):
self._value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
_register_variable_read(
self._value, trainable=trainable, collections=collections)
示例6: _init_from_args
def _init_from_args(self,
initial_value=None,
trainable=True,
collections=None,
validate_shape=True,
caching_device=None,
name=None,
dtype=None):
"""Creates a variable.
Args:
initial_value: A `Tensor`, or Python object convertible to a `Tensor`,
which is the initial value for the Variable. The initial value must have
a shape specified unless `validate_shape` is set to False. Can also be a
callable with no argument that returns the initial value when called.
(Note that initializer functions from init_ops.py must first be bound
to a shape before being used here.)
trainable: If `True`, the default, also adds the variable to the graph
collection `GraphKeys.TRAINABLE_VARIABLES`. This collection is used as
the default list of variables to use by the `Optimizer` classes.
collections: List of graph collections keys. The new variable is added to
these collections. Defaults to `[GraphKeys.GLOBAL_VARIABLES]`.
validate_shape: Ignored. Provided for compatibility with tf.Variable.
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
name: Optional name for the variable. Defaults to `'Variable'` and gets
uniquified automatically.
dtype: If set, initial_value will be converted to the given type.
If None, either the datatype will be kept (if initial_value is
a Tensor) or float32 will be used (if it is a Python object convertible
to a Tensor).
Raises:
ValueError: If the initial value is not specified, or does not have a
shape and `validate_shape` is `True`.
"""
if initial_value is None:
raise ValueError("initial_value must be specified.")
init_from_fn = callable(initial_value)
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
if not isinstance(collections, (list, tuple, set)):
raise ValueError(
"collections argument to Variable constructor must be a list, tuple, "
"or set. Got %s of type %s" % (collections, type(collections)))
if trainable and ops.GraphKeys.TRAINABLE_VARIABLES not in collections:
collections = list(collections) + [ops.GraphKeys.TRAINABLE_VARIABLES]
self._save_slice_info = None
with ops.control_dependencies(None):
with ops.name_scope(name, "Variable", [] if init_from_fn else
[initial_value]) as name:
# pylint: disable=protected-access
true_name = ops._name_from_scope_name(name)
if init_from_fn:
# Use attr_scope and device(None) to simulate the behavior of
# colocate_with when the variable we want to colocate with doesn't
# yet exist.
attr = attr_value_pb2.AttrValue(
list=attr_value_pb2.AttrValue.ListValue(
s=[compat.as_bytes("loc:@%s" % true_name)]))
with ops.get_default_graph()._attr_scope({"_class": attr}):
with ops.name_scope("Initializer"), ops.device(None):
self._initial_value = ops.convert_to_tensor(
initial_value(), name="initial_value", dtype=dtype)
self._handle = gen_resource_variable_ops.var_handle_op(
shape=self._initial_value.get_shape(),
dtype=self._initial_value.dtype.base_dtype,
shared_name=true_name, name=name)
# pylint: enable=protected-access
# Or get the initial value from a Tensor or Python object.
else:
self._initial_value = ops.convert_to_tensor(
initial_value, name="initial_value", dtype=dtype)
self._handle = gen_resource_variable_ops.var_handle_op(
shape=self._initial_value.get_shape(),
dtype=self._initial_value.dtype.base_dtype,
shared_name=true_name, name=name)
self._dtype = self._initial_value.dtype.base_dtype
with ops.name_scope("IsInitialized"):
self._is_initialized_op = (
gen_resource_variable_ops.var_is_initialized_op(self._handle))
if initial_value is not None:
with ops.name_scope("Assign") as n, ops.colocate_with(self._handle):
self._initialize_op = gen_resource_variable_ops.assign_variable_op(
self._handle, self._initial_value, name=n)
with ops.name_scope("Read"), ops.colocate_with(self._handle):
# Manually assign reads to the handle's device to avoid log messages.
with ops.device(self._handle.device):
value = gen_resource_variable_ops.read_variable_op(
self._handle, dtype=self._dtype)
self._graph_element = value
if caching_device is not None:
#.........这里部分代码省略.........