本文整理汇总了Python中tensorflow.python.ops.variables方法的典型用法代码示例。如果您正苦于以下问题:Python ops.variables方法的具体用法?Python ops.variables怎么用?Python ops.variables使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops
的用法示例。
在下文中一共展示了ops.variables方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: local_variable
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def local_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value,
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
示例2: global_variable
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def global_variable(initial_value,
validate_shape=True,
name=None,
use_resource=None):
"""Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`.
Args:
initial_value: See variables.Variable.__init__.
validate_shape: See variables.Variable.__init__.
name: See variables.Variable.__init__.
use_resource: If `True` use a ResourceVariable instead of a Variable.
Returns:
New variable.
"""
return variable_scope.variable(
initial_value,
trainable=False,
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
validate_shape=validate_shape,
use_resource=use_resource,
name=name)
示例3: get_variables
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def get_variables(scope=None,
suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
示例4: get_variables
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def get_variables(scope=None,
suffix=None,
collection=ops.GraphKeys.GLOBAL_VARIABLES):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return. Can be a
variable scope or a string.
suffix: an optional suffix for filtering the variables to return.
collection: in which collection search for. Defaults to
`GraphKeys.GLOBAL_VARIABLES`.
Returns:
a list of variables in collection with scope and suffix.
"""
if scope and isinstance(scope, variable_scope.VariableScope):
scope = scope.name
if suffix is not None:
if ':' not in suffix:
suffix += ':'
scope = (scope or '') + '.*' + suffix
return ops.get_collection(collection, scope)
示例5: _sync_variables_ops
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
tf.debugging.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in tf.compat.v1.trainable_variables()
]
else:
return [tf.no_op()]
示例6: get_model_variables
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def get_model_variables(scope=None, suffix=None):
"""Gets the list of model variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.MODEL_VARIABLES)
示例7: get_local_variables
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def get_local_variables(scope=None, suffix=None):
"""Gets the list of local variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a list of variables in collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.LOCAL_VARIABLES)
示例8: get_variables_to_restore
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += get_variables(scope)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
示例9: get_variables_by_suffix
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
示例10: get_variables_by_name
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
示例11: assign_from_values_fn
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.compat.v1.Session`, that
applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
示例12: _init_from_proto
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def _init_from_proto(self, variable_def, import_scope=None):
"""Initializes from `VariableDef` proto."""
assert isinstance(variable_def, variable_pb2.VariableDef)
if not variable_def.is_resource:
raise ValueError("Trying to restore Variable as ResourceVariable.")
# Create from variable_def.
g = ops.get_default_graph()
self._handle = g.as_graph_element(
ops.prepend_name_scope(variable_def.variable_name,
import_scope=import_scope))
self._initialize_op = g.as_graph_element(
ops.prepend_name_scope(variable_def.initializer_name,
import_scope=import_scope))
if variable_def.snapshot_name:
self._cached_value = g.as_graph_element(
ops.prepend_name_scope(variable_def.snapshot_name,
import_scope=import_scope))
else:
self._cached_value = None
if variable_def.HasField("save_slice_info_def"):
self._save_slice_info = variables.Variable.SaveSliceInfo(
save_slice_info_def=variable_def.save_slice_info_def)
else:
self._save_slice_info = None
self._caching_device = None
self._dtype = dtypes.as_dtype(self._handle.op.get_attr("dtype"))
self._graph_element = self.value()
示例13: _from_proto_fn
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def _from_proto_fn(v, import_scope=None):
"""Creates Variable or ResourceVariable from VariableDef as needed."""
if v.is_resource:
return ResourceVariable.from_proto(v, import_scope=import_scope)
return variables.Variable.from_proto(v, import_scope=import_scope)
示例14: minimize
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def minimize(self, loss, global_step=None, var_list=None,
gate_gradients=GATE_OP, aggregation_method=None,
colocate_gradients_with_ops=False, name=None,
grad_loss=None):
"""Adapted from Tensorflow Optimizer base class member function:
Add operations to minimize `loss` by updating `var_list`.
This method simply combines calls `compute_gradients()` and
`apply_gradients()`. If you want to process the gradient before applying
them call `tf.gradients()` and `self.apply_gradients()` explicitly instead
of using this function.
"""
grads_and_vars = self._optimizer.compute_gradients(
loss, var_list=var_list, gate_gradients=gate_gradients,
aggregation_method=aggregation_method,
colocate_gradients_with_ops=colocate_gradients_with_ops,
grad_loss=grad_loss)
vars_with_grad = [v for g, v in grads_and_vars if g is not None]
if not vars_with_grad:
raise ValueError(
"No gradients provided for any variable, check your graph for ops"
" that do not support gradients, between variables %s and loss %s." %
([str(v) for _, v in grads_and_vars], loss))
for g, v in grads_and_vars:
print("g ", g)
print("v ", v)
return self.apply_gradients(grads_and_vars)
示例15: __init__
# 需要导入模块: from tensorflow.python import ops [as 别名]
# 或者: from tensorflow.python.ops import variables [as 别名]
def __init__(self,
ctx,
enqueue_ops,
dequeue_ops,
tpu_compile_op,
run_infeed_loop_on_coordinator=True,
rendezvous=None,
master=None,
session_config=None,
tpu_init_ops=None,
outfeed_every_n_steps=1):
self._master_job = ctx.master_job
self._enqueue_ops = enqueue_ops
self._dequeue_ops = dequeue_ops
self._rendezvous = rendezvous
self._master = master
self._session_config = session_config
self._init_ops = list(tpu_init_ops or [])
if ctx.embedding_config is None:
self._embedding_layer_config = None
else:
self._embedding_layer_config = (
ctx.embedding_config.tpu_embedding.config_proto)
self._run_infeed_loop_on_coordinator = run_infeed_loop_on_coordinator
self._initial_infeed_sleep_secs = (
ctx.config.tpu_config.initial_infeed_sleep_secs)
self._tpu_compile_op = tpu_compile_op
# When using model parallelism, the TPU is pre-initialized at startup to
# fetch mesh information. We skip re-initializing it here for
# MeshTensorFlow since it places variables on TPU directly. Reinitialize tpu
# is causing the variable corruption since the previous allocated memory
# might be overwritten for other purpose.
if (ctx.model_parallelism_enabled and
(ctx.config.tpu_config.per_host_input_for_training is
tpu_config.InputPipelineConfig.BROADCAST)):
self._should_initialize_tpu = False
else:
self._should_initialize_tpu = True
self._outfeed_every_n_steps = outfeed_every_n_steps