本文整理汇总了Python中tensorflow.python.training.gradient_descent.GradientDescentOptimizer方法的典型用法代码示例。如果您正苦于以下问题:Python gradient_descent.GradientDescentOptimizer方法的具体用法?Python gradient_descent.GradientDescentOptimizer怎么用?Python gradient_descent.GradientDescentOptimizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.training.gradient_descent
的用法示例。
在下文中一共展示了gradient_descent.GradientDescentOptimizer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def setUp(self):
self.a = variables.Variable(10.0, name="a")
self.b = variables.Variable(20.0, name="b")
self.c = math_ops.add(self.a, self.b, name="c") # Should be 30.0.
self.d = math_ops.subtract(self.a, self.c, name="d") # Should be -20.0.
self.e = math_ops.multiply(self.c, self.d, name="e") # Should be -600.0.
self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph")
self.f = math_ops.multiply(self.e, self.ph, name="f")
self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
self.e, name="opt")
self.sess = session.Session()
self.sess.run(self.a.initializer)
self.sess.run(self.b.initializer)
示例2: testTrainWithNoInitAssignCanAchieveZeroLoss
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertLess(loss, .1)
示例3: testUseGlobalStep
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testUseGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# After 10 updates global_step should be 10.
self.assertAllClose(global_step, 10)
示例4: testNoneGlobalStep
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testNoneGlobalStep(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = BatchNormClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(
total_loss, optimizer, global_step=None)
global_step = variables_lib2.get_or_create_global_step()
with session.Session() as sess:
# Initialize all variables
sess.run(variables_lib.global_variables_initializer())
for _ in range(10):
sess.run([train_op])
global_step = global_step.eval()
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step, 0)
示例5: testTrainWithNonDefaultGraph
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNonDefaultGraph(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
g = ops.Graph()
with g.as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10, graph=g)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
示例6: testTrainWithNoneAsLogdir
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsLogdir(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, None, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
示例7: testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
summary.scalar('total_loss', total_loss)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
summary_op = summary.merge_all()
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, summary_op=summary_op)
示例8: testTrainWithNoneAsLogdirWhenUsingTraceRaisesError
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(ValueError):
learning.train(
train_op, None, number_of_steps=300, trace_every_n_steps=10)
示例9: testTrainWithNoneAsLogdirWhenUsingSaverRaisesError
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
saver = saver_lib.Saver()
with self.assertRaises(ValueError):
learning.train(
train_op, None, init_op=None, number_of_steps=300, saver=saver)
示例10: testTrainWithNoneAsInitWhenUsingVarsRaisesError
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
with self.assertRaises(RuntimeError):
learning.train(train_op, logdir, init_op=None, number_of_steps=300)
示例11: testTrainWithLocalVariable
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithLocalVariable(self):
logdir = os.path.join(
tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
local_multiplier = variables_lib2.local_variable(1.0)
tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = learning.create_train_op(total_loss, optimizer)
loss = learning.train(
train_op, logdir, number_of_steps=300, log_every_n_steps=10)
self.assertIsNotNone(loss)
self.assertLess(loss, .015)
示例12: create_train_op
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = LogisticClassifier(tf_inputs)
loss_ops.log_loss(tf_predictions, tf_labels)
total_loss = loss_ops.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(
learning_rate=learning_rate)
if gradient_multiplier != 1.0:
variables = variables_lib.trainable_variables()
gradient_multipliers = {var: gradient_multiplier for var in variables}
else:
gradient_multipliers = None
return learning.create_train_op(
total_loss, optimizer, gradient_multipliers=gradient_multipliers)
示例13: _train_model
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def _train_model(self, checkpoint_dir, num_steps):
"""Trains a simple classification model.
Note that the data has been configured such that after around 300 steps,
the model has memorized the dataset (e.g. we can expect %100 accuracy).
Args:
checkpoint_dir: The directory where the checkpoint is written to.
num_steps: The number of steps to train for.
"""
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = logistic_classifier(tf_inputs)
loss = loss_ops.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer)
loss = training.train(
train_op,
checkpoint_dir,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)])
示例14: testGlobalStepNotIncrementedWhenSetToNone
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testGlobalStepNotIncrementedWhenSetToNone(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
loss = losses.log_loss(tf_labels, tf_predictions)
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(loss, optimizer, global_step=None)
global_step = variables_lib.get_or_create_global_step()
with self.cached_session() as session:
# Initialize all variables
session.run(variables_lib2.global_variables_initializer())
for _ in range(10):
session.run(train_op)
# Since train_op don't use global_step it shouldn't change.
self.assertAllClose(global_step.eval(), 0)
示例15: testTrainWithNoInitAssignCanAchieveZeroLoss
# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
with ops.Graph().as_default():
random_seed.set_random_seed(0)
tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)
tf_predictions = batchnorm_classifier(tf_inputs)
losses.log_loss(tf_labels, tf_predictions)
total_loss = losses.get_total_loss()
optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
train_op = training.create_train_op(total_loss, optimizer)
loss = training.train(
train_op,
None,
hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
save_summaries_steps=None,
save_checkpoint_secs=None)
self.assertLess(loss, .1)