本文整理汇总了Python中tensorflow.python.ops.variables.trainable_variables方法的典型用法代码示例。如果您正苦于以下问题:Python variables.trainable_variables方法的具体用法?Python variables.trainable_variables怎么用?Python variables.trainable_variables使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.variables
的用法示例。
在下文中一共展示了variables.trainable_variables方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: loss
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def loss(self, data, labels):
"""The loss to minimize while training."""
if self.is_regression:
diff = self.training_inference_graph(data) - math_ops.to_float(labels)
mean_squared_error = math_ops.reduce_mean(diff * diff)
root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
loss = root_mean_squared_error
else:
loss = math_ops.reduce_mean(
nn_ops.sparse_softmax_cross_entropy_with_logits(
labels=array_ops.squeeze(math_ops.to_int32(labels)),
logits=self.training_inference_graph(data)),
name="loss")
if self.regularizer:
loss += layers.apply_regularization(self.regularizer,
variables.trainable_variables())
return loss
示例2: create_train_op
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
if gradient_multiplier != 1.0:
variables = variables_lib.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
else:
gradient_multipliers = None
return learning.create_train_op(
total_loss, optimizer, gradient_multipliers=gradient_multipliers)
示例3: _sync_variables_ops
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def _sync_variables_ops(ctx):
"""Create varriables synchronization ops.
Gets the variables back from TPU nodes. This means the variables updated
by TPU will now be *synced* to host memory.
In BROADCAST mode, we skip this sync since the variables are ususally too
big to transmit via RPC.
Args:
ctx: A `_InternalTPUContext` instance with mode.
Returns:
A list of sync ops.
"""
if not ctx.is_input_broadcast_with_iterators():
return [
array_ops.check_numerics(v.read_value(),
'Gradient for %s is NaN' % v.name).op
for v in variables.trainable_variables()
]
else:
return [control_flow_ops.no_op()]
示例4: create_train_op
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
def transform_grads_fn(grads):
if gradient_multiplier != 1.0:
variables = variables_lib2.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
with ops.name_scope('multiply_grads'):
return training.multiply_gradients(grads, gradient_multipliers)
else:
return grads
return training.create_train_op(
total_loss, optimizer, transform_grads_fn=transform_grads_fn)
示例5: testDoubleCallInSameScopeFails
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def testDoubleCallInSameScopeFails(self):
@rev_block_lib.recompute_grad
def layer_with_recompute(inputs):
return core_layers.dense(inputs, 2)
with variable_scope.variable_scope("layer", use_resource=True):
inputs = array_ops.ones((2, 4), dtypes.float32)
out1 = layer_with_recompute(inputs)
out2 = layer_with_recompute(inputs) + out1
out = math_ops.reduce_sum(out2)
tvars = variables.trainable_variables()
assert len(tvars) == 4
with self.assertRaisesWithPredicateMatch(
ValueError, "called twice in the same enclosing scope"):
gradients_impl.gradients(out, [inputs] + tvars)
示例6: testDoubleCallInUniqueScope
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def testDoubleCallInUniqueScope(self):
@rev_block_lib.recompute_grad
def layer_with_recompute(inputs):
with variable_scope.variable_scope("inner", use_resource=True):
return core_layers.dense(inputs, 2)
with variable_scope.variable_scope("layer", use_resource=True):
inputs = array_ops.ones((2, 4), dtypes.float32)
with variable_scope.variable_scope("layer1", use_resource=True):
out1 = layer_with_recompute(inputs)
with variable_scope.variable_scope("layer2", use_resource=True):
out2 = layer_with_recompute(inputs) + out1
out = math_ops.reduce_sum(out2)
tvars = variables.trainable_variables()
assert len(tvars) == 4
grads = gradients_impl.gradients(out, [inputs] + tvars)
for grad in grads:
self.assertIsNotNone(grad)
示例7: create_train_op
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = tf.constant(self._inputs, dtype=tf.float32)
tf_labels = tf.constant(self._labels, dtype=tf.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_labels, tf_predictions)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
if gradient_multiplier != 1.0:
variables = variables_lib.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
else:
gradient_multipliers = None
return learning.create_train_op(
total_loss, optimizer, gradient_multipliers=gradient_multipliers)
示例8: loss
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def loss(self, data, labels):
"""The loss to minimize while training."""
if self.is_regression:
diff = self.training_inference_graph(data) - math_ops.to_float(labels)
mean_squared_error = math_ops.reduce_mean(diff * diff)
root_mean_squared_error = math_ops.sqrt(mean_squared_error, name="loss")
loss = root_mean_squared_error
else:
loss = math_ops.reduce_mean(
nn_ops.sparse_softmax_cross_entropy_with_logits(
self.training_inference_graph(data),
array_ops.squeeze(math_ops.to_int32(labels))),
name="loss")
if self.regularizer:
loss += layers.apply_regularization(self.regularizer,
variables.trainable_variables())
return loss
示例9: _generate_shared_variables
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def _generate_shared_variables(self):
"""Generate a global variable placed on ps for each trainable variable.
This creates a new copy of each user-defined trainable variable and places
them on ps_device. These variables store the averaged parameters.
"""
# Only the chief should initialize the variables
if self._is_chief:
collections = [ops.GraphKeys.GLOBAL_VARIABLES, "global_model"]
else:
collections = ["global_model"]
# Generate new global variables dependent on trainable variables.
with ops.device(self._device_setter):
for v in variables.trainable_variables():
_ = variable_scope.variable(
name="%s/%s" % (self._name, v.op.name),
initial_value=v.initialized_value(), trainable=False,
collections=collections)
# Place the global step in the ps so that all the workers can see it
self._global_step = variables.Variable(0, name="%s_global_step" %
self._name, trainable=False)
示例10: _rnn_get_variable
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def _rnn_get_variable(self, getter, *args, **kwargs):
variable = getter(*args, **kwargs)
trainable = (variable in tf_variables.trainable_variables() or
(isinstance(variable, tf_variables.PartitionedVariable) and
list(variable)[0] in tf_variables.trainable_variables()))
if trainable and variable not in self._trainable_weights:
self._trainable_weights.append(variable)
elif not trainable and variable not in self._non_trainable_weights:
self._non_trainable_weights.append(variable)
return variable
示例11: _get_variable_for
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def _get_variable_for(v):
"""Returns the ResourceVariable responsible for v, or v if not necessary."""
if v.op.type == "VarHandleOp":
for var in variables.trainable_variables():
if (isinstance(var, resource_variable_ops.ResourceVariable)
and var.handle.op is v.op):
return var
raise ValueError("Got %s but could not locate source variable." % (str(v)))
return v
示例12: testSequenceToSequenceDims
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def testSequenceToSequenceDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(17, 1, 5))
outputs = lstm1d.ndlstm_base(inputs, 8)
variables.global_variables_initializer().run()
names = [v.name for v in variables.trainable_variables()]
self.assertEqual(len(names), 2)
result = outputs.eval()
self.assertEqual(tuple(result.shape), (17, 1, 8))
示例13: testVariableRestoreWithArgScopeNested
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def testVariableRestoreWithArgScopeNested(self):
with self.cached_session():
a = variables_lib2.variable('a', [])
with arg_scope(
[variables_lib2.variable], trainable=False, collections=['A', 'B']):
b = variables_lib2.variable('b', [])
c = variables_lib2.variable('c', [], trainable=False)
self.assertEqual([a, c], variables_lib2.get_variables_to_restore())
self.assertEqual([a], variables_lib.trainable_variables())
self.assertEqual([b], ops.get_collection('A'))
self.assertEqual([b], ops.get_collection('B'))
示例14: testReuse
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def testReuse(self):
def f(x):
return core_layers.dense(x, self.CHANNELS // 2)
def g(x):
return core_layers.dense(x, self.CHANNELS // 2)
x = random_ops.random_uniform(
[self.BATCH_SIZE, self.CHANNELS], dtype=dtypes.float32)
x1, x2 = array_ops.split(x, 2, axis=-1)
with variable_scope.variable_scope("test"):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_before = len(variables.global_variables())
with variable_scope.variable_scope("test", reuse=True):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_after = len(variables.global_variables())
self.assertEqual(num_vars_before, num_vars_after)
loss = math_ops.reduce_mean(y1 + y2)
_ = gradients_impl.gradients(loss,
[x] + variables.trainable_variables())
with variable_scope.variable_scope("test", reuse=True):
y1, y2 = rev_block_lib.rev_block(x1, x2, f, g, num_layers=self.NUM_LAYERS)
num_vars_after = len(variables.global_variables())
self.assertEqual(num_vars_before, num_vars_after)
示例15: testTrainableFlagIsPassedOn
# 需要导入模块: from tensorflow.python.ops import variables [as 别名]
# 或者: from tensorflow.python.ops.variables import trainable_variables [as 别名]
def testTrainableFlagIsPassedOn(self):
for trainable in [True, False]:
with ops.Graph().as_default():
num_filters = 32
input_size = [5, 10, 12, 3]
images = random_ops.random_uniform(input_size, seed=1)
layers_lib.conv2d_transpose(
images, num_filters, [3, 3], stride=1, trainable=trainable)
model_variables = variables.get_model_variables()
trainable_variables = variables_lib.trainable_variables()
for model_variable in model_variables:
self.assertEqual(trainable, model_variable in trainable_variables)