本文整理汇总了Python中tensorflow.python.ops.variable_scope.AUTO_REUSE属性的典型用法代码示例。如果您正苦于以下问题:Python variable_scope.AUTO_REUSE属性的具体用法?Python variable_scope.AUTO_REUSE怎么用?Python variable_scope.AUTO_REUSE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类tensorflow.python.ops.variable_scope
的用法示例。
在下文中一共展示了variable_scope.AUTO_REUSE属性的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: register_block
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import AUTO_REUSE [as 别名]
def register_block(self, layer_key, fisher_block, reuse=VARIABLE_SCOPE):
if reuse is VARIABLE_SCOPE:
reuse = variable_scope.get_variable_scope().reuse
if reuse is True or (reuse is variable_scope.AUTO_REUSE and
layer_key in self.fisher_blocks):
result = self.fisher_blocks[layer_key]
if type(result) != type(fisher_block): # pylint: disable=unidiomatic-typecheck
raise ValueError(
"Attempted to register FisherBlock of type %s when existing "
"FisherBlock has type %s." % (type(fisher_block), type(result)))
return result
if reuse is False and layer_key in self.fisher_blocks:
raise ValueError("FisherBlock for %s is already in LayerCollection." %
(layer_key,))
# Insert fisher_block into self.fisher_blocks.
if layer_key in self.fisher_blocks:
raise ValueError("Duplicate registration: {}".format(layer_key))
# Raise an error if any variable in layer_key has been registered in any
# other blocks.
variable_to_block = {
var: (params, block)
for (params, block) in self.fisher_blocks.items()
for var in ensure_sequence(params)
}
for variable in ensure_sequence(layer_key):
if variable in variable_to_block:
prev_key, prev_block = variable_to_block[variable]
raise ValueError(
"Attempted to register layer_key {} with block {}, but variable {}"
" was already registered in key {} with block {}.".format(
layer_key, fisher_block, variable, prev_key, prev_block))
self.fisher_blocks[layer_key] = fisher_block
return fisher_block
示例2: _get_or_create_stop_var
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import AUTO_REUSE [as 别名]
def _get_or_create_stop_var():
with tf.compat.v1.variable_scope(
name_or_scope='signal_early_stopping',
values=[],
reuse=tf.compat.v1.AUTO_REUSE):
return tf.compat.v1.get_variable(
name='STOP',
shape=[],
dtype=tf.dtypes.bool,
initializer=tf.compat.v1.initializers.constant(False),
collections=[tf.compat.v1.GraphKeys.GLOBAL_VARIABLES],
trainable=False)
示例3: _get_or_create_stop_var_with_aggregation
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import AUTO_REUSE [as 别名]
def _get_or_create_stop_var_with_aggregation(self):
with variable_scope.variable_scope(
name_or_scope='signal_early_stopping',
values=[],
reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
name='STOP',
shape=[],
dtype=tf.dtypes.int32,
initializer=init_ops.constant_initializer(0),
collections=[ops.GraphKeys.GLOBAL_VARIABLES],
synchronization=variable_scope.VariableSynchronization.ON_WRITE,
aggregation=variable_scope.VariableAggregation.SUM,
trainable=False)
示例4: _create_or_get_iterations_per_loop
# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import AUTO_REUSE [as 别名]
def _create_or_get_iterations_per_loop():
"""Creates or gets the iterations_per_loop variable.
In TPUEstimator, the user provided computation, the model_fn, is wrapped
inside a tf.while_loop for peak performance. The iterations of the loop are
specified by this variable, which adjusts its value on the CPU after each TPU
program execution and before the next TPU execution.
The purpose of using a variable, rather then a constant, is to allow
TPUEstimator adapt the TPU training iterations according to the final steps
specified by users. For example, if the user sets the iterations_per_loop as 4
in TPUConfig and steps as 10 in TPUEstimator.train(), the iterations_per_loop
variable will have the following value before each TPU training.
- 1-th TPU execution: iterations_per_loop = 4
- 2-th TPU execution: iterations_per_loop = 4
- 3-th TPU execution: iterations_per_loop = 2
As model_fn increases the global step once per train_op invocation, the global
step is 10 after all TPU executions, matching the steps=10 inputs passed in by
users.
Returns:
A TF non-trainable resource variable.
Raises:
RuntimeError: If multi iterations_per_loop variables were found.
"""
graph = ops.get_default_graph()
collection_name = '{}_{}'.format(_TPU_ESTIMATOR, _ITERATIONS_PER_LOOP_VAR)
iter_vars = graph.get_collection(collection_name)
if len(iter_vars) == 1:
return iter_vars[0]
elif len(iter_vars) > 1:
raise RuntimeError('Multiple iterations_per_loop_var in collection.')
with ops.colocate_with(training_util.get_global_step()):
with variable_scope.variable_scope(
_TPU_ESTIMATOR, reuse=variable_scope.AUTO_REUSE):
return variable_scope.get_variable(
_ITERATIONS_PER_LOOP_VAR,
initializer=init_ops.zeros_initializer(),
shape=[],
dtype=dtypes.int32,
trainable=False,
collections=[collection_name, ops.GraphKeys.LOCAL_VARIABLES],
use_resource=True)