本文整理汇总了Python中tensorflow.python.framework.random_seed.set_random_seed方法的典型用法代码示例。如果您正苦于以下问题:Python random_seed.set_random_seed方法的具体用法?Python random_seed.set_random_seed怎么用?Python random_seed.set_random_seed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.framework.random_seed
的用法示例。
在下文中一共展示了random_seed.set_random_seed方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testAtrousFullyConvolutionalValues
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testAtrousFullyConvolutionalValues(self):
"""Verify dense feature extraction with atrous convolution."""
nominal_stride = 32
for output_stride in [4, 8, 16, 32, None]:
with arg_scope(resnet_utils.resnet_arg_scope(is_training=False)):
with ops.Graph().as_default():
with self.test_session() as sess:
random_seed.set_random_seed(0)
inputs = create_test_input(2, 81, 81, 3)
# Dense feature extraction followed by subsampling.
output, _ = self._resnet_small(
inputs, None, global_pool=False, output_stride=output_stride)
if output_stride is None:
factor = 1
else:
factor = nominal_stride // output_stride
output = resnet_utils.subsample(output, factor)
# Make the two networks use the same weights.
variable_scope.get_variable_scope().reuse_variables()
# Feature extraction at the nominal network rate.
expected, _ = self._resnet_small(inputs, None, global_pool=False)
sess.run(variables.global_variables_initializer())
self.assertAllClose(
output.eval(), expected.eval(), atol=1e-4, rtol=1e-4)
示例2: testTrainWithNoInitAssignCanAchieveZeroLoss
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertLess(loss, .1)
示例3: testUseGlobalStep
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testUseGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# After 10 updates global_step should be 10.
self.assertAllClose(global_step, 10)
示例4: testNoneGlobalStep
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testNoneGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(
total_loss, optimizer, global_step=None)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step, 0)
示例5: testTrainWithNonDefaultGraph
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithNonDefaultGraph(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10, graph=g)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
示例6: testTrainWithSessionConfig
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithSessionConfig(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
session_config = config_pb2.ConfigProto(allow_soft_placement=True)
loss = learning.train(
train_op,
None,
number_of_steps=300,
log_every_n_steps=10,
session_config=session_config)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
示例7: testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
summary_op = summary.merge_all()
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, summary_op=summary_op)
示例8: testTrainWithNoneAsLogdirWhenUsingTraceRaisesError
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, trace_every_n_steps=10)
示例9: testTrainWithNoneAsLogdirWhenUsingSaverRaisesError
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
with self.assertRaises(ValueError):
learning.train(
train_op, None, init_op=None, number_of_steps=300, saver=saver)
示例10: testTrainWithLocalVariable
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithLocalVariable(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib2.local_variable(1.0)
tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
示例11: testGlobalStepIsIncrementedByDefault
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testGlobalStepIsIncrementedByDefault(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# After 10 updates global_step should be 10.
self.assertAllClose(global_step.eval(), 10)
示例12: testGlobalStepNotIncrementedWhenSetToNone
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
示例13: testTrainWithNoInitAssignCanAchieveZeroLoss
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)
示例14: testTrainWithLocalVariable
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testTrainWithLocalVariable(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib.local_variable(1.0)
tf_predictions = logistic_classifier(tf_inputs) * local_multiplier
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
示例15: testGradientNoise
# 需要导入模块: from tensorflow.python.framework import random_seed [as 别名]
# 或者: from tensorflow.python.framework.random_seed import set_random_seed [as 别名]
def testGradientNoise(self):
random_seed.set_random_seed(42)
with self.cached_session() as session:
x, var, loss, global_step = _setup_model()
train = optimizers_lib.optimize_loss(
loss,
global_step,
learning_rate=0.1,
optimizer="SGD",
gradient_noise_scale=10.0)
variables.global_variables_initializer().run()
session.run(train, feed_dict={x: 5})
var_value, global_step_value = session.run([var, global_step])
# Due to randomness the following number may change if graph is different.
self.assertAlmostEqual(var_value, 9.801016, 4)
self.assertEqual(global_step_value, 1)