当前位置: 首页>>代码示例>>Python>>正文


Python gradient_descent.GradientDescentOptimizer方法代码示例

本文整理汇总了Python中tensorflow.python.training.gradient_descent.GradientDescentOptimizer方法的典型用法代码示例。如果您正苦于以下问题:Python gradient_descent.GradientDescentOptimizer方法的具体用法?Python gradient_descent.GradientDescentOptimizer怎么用?Python gradient_descent.GradientDescentOptimizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.training.gradient_descent的用法示例。


在下文中一共展示了gradient_descent.GradientDescentOptimizer方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: setUp

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def setUp(self):
    self.a = variables.Variable(10.0, name="a")
    self.b = variables.Variable(20.0, name="b")

    self.c = math_ops.add(self.a, self.b, name="c")  # Should be 30.0.
    self.d = math_ops.subtract(self.a, self.c, name="d")  # Should be -20.0.
    self.e = math_ops.multiply(self.c, self.d, name="e")  # Should be -600.0.

    self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph")
    self.f = math_ops.multiply(self.e, self.ph, name="f")

    self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
        self.e, name="opt")

    self.sess = session.Session()

    self.sess.run(self.a.initializer)
    self.sess.run(self.b.initializer) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:20,代码来源:stepper_cli_test.py

示例2: testTrainWithNoInitAssignCanAchieveZeroLoss

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    g = ops.Graph()
    with g.as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertLess(loss, .1) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:22,代码来源:learning_test.py

示例3: testUseGlobalStep

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testUseGlobalStep(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      global_step = variables_lib2.get_or_create_global_step()

      with session.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())

        for _ in range(10):
          sess.run([train_op])
        global_step = global_step.eval()
        # After 10 updates global_step should be 10.
        self.assertAllClose(global_step, 10) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:26,代码来源:learning_test.py

示例4: testNoneGlobalStep

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testNoneGlobalStep(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(
          total_loss, optimizer, global_step=None)

      global_step = variables_lib2.get_or_create_global_step()

      with session.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())

        for _ in range(10):
          sess.run([train_op])
        global_step = global_step.eval()
        # Since train_op don't use global_step it shouldn't change.
        self.assertAllClose(global_step, 0) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:27,代码来源:learning_test.py

示例5: testTrainWithNonDefaultGraph

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNonDefaultGraph(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    g = ops.Graph()
    with g.as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

    loss = learning.train(
        train_op, logdir, number_of_steps=300, log_every_n_steps=10, graph=g)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:23,代码来源:learning_test.py

示例6: testTrainWithNoneAsLogdir

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsLogdir(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, None, number_of_steps=300, log_every_n_steps=10)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:20,代码来源:learning_test.py

示例7: testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      summary.scalar('total_loss', total_loss)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)
      summary_op = summary.merge_all()

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, number_of_steps=300, summary_op=summary_op) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:21,代码来源:learning_test.py

示例8: testTrainWithNoneAsLogdirWhenUsingTraceRaisesError

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, number_of_steps=300, trace_every_n_steps=10) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:19,代码来源:learning_test.py

示例9: testTrainWithNoneAsLogdirWhenUsingSaverRaisesError

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)
      saver = saver_lib.Saver()

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, init_op=None, number_of_steps=300, saver=saver) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:20,代码来源:learning_test.py

示例10: testTrainWithNoneAsInitWhenUsingVarsRaisesError

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(RuntimeError):
        learning.train(train_op, logdir, init_op=None, number_of_steps=300) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:20,代码来源:learning_test.py

示例11: testTrainWithLocalVariable

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithLocalVariable(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      local_multiplier = variables_lib2.local_variable(1.0)

      tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:24,代码来源:learning_test.py

示例12: create_train_op

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
    tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    tf_predictions = LogisticClassifier(tf_inputs)
    loss_ops.log_loss(tf_predictions, tf_labels)
    total_loss = loss_ops.get_total_loss()

    optimizer = gradient_descent.GradientDescentOptimizer(
        learning_rate=learning_rate)

    if gradient_multiplier != 1.0:
      variables = variables_lib.trainable_variables()
      gradient_multipliers = {var: gradient_multiplier for var in variables}
    else:
      gradient_multipliers = None

    return learning.create_train_op(
        total_loss, optimizer, gradient_multipliers=gradient_multipliers) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:21,代码来源:learning_test.py

示例13: _train_model

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def _train_model(self, checkpoint_dir, num_steps):
    """Trains a simple classification model.

    Note that the data has been configured such that after around 300 steps,
    the model has memorized the dataset (e.g. we can expect %100 accuracy).

    Args:
      checkpoint_dir: The directory where the checkpoint is written to.
      num_steps: The number of steps to train for.
    """
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = logistic_classifier(tf_inputs)
      loss = loss_ops.log_loss(tf_labels, tf_predictions)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(loss, optimizer)

      loss = training.train(
          train_op,
          checkpoint_dir,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)]) 
开发者ID:google-research,项目名称:tf-slim,代码行数:27,代码来源:evaluation_test.py

示例14: testGlobalStepNotIncrementedWhenSetToNone

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testGlobalStepNotIncrementedWhenSetToNone(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      loss = losses.log_loss(tf_labels, tf_predictions)
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(loss, optimizer, global_step=None)

      global_step = variables_lib.get_or_create_global_step()

      with self.cached_session() as session:
        # Initialize all variables
        session.run(variables_lib2.global_variables_initializer())

        for _ in range(10):
          session.run(train_op)

        # Since train_op don't use global_step it shouldn't change.
        self.assertAllClose(global_step.eval(), 0) 
开发者ID:google-research,项目名称:tf-slim,代码行数:24,代码来源:training_test.py

示例15: testTrainWithNoInitAssignCanAchieveZeroLoss

# 需要导入模块: from tensorflow.python.training import gradient_descent [as 别名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 别名]
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      losses.log_loss(tf_labels, tf_predictions)
      total_loss = losses.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(total_loss, optimizer)

      loss = training.train(
          train_op,
          None,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
          save_summaries_steps=None,
          save_checkpoint_secs=None)
      self.assertLess(loss, .1) 
开发者ID:google-research,项目名称:tf-slim,代码行数:23,代码来源:training_test.py


注:本文中的tensorflow.python.training.gradient_descent.GradientDescentOptimizer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。