當前位置: 首頁>>代碼示例>>Python>>正文


Python gradient_descent.GradientDescentOptimizer方法代碼示例

本文整理匯總了Python中tensorflow.python.training.gradient_descent.GradientDescentOptimizer方法的典型用法代碼示例。如果您正苦於以下問題:Python gradient_descent.GradientDescentOptimizer方法的具體用法?Python gradient_descent.GradientDescentOptimizer怎麽用?Python gradient_descent.GradientDescentOptimizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.python.training.gradient_descent的用法示例。


在下文中一共展示了gradient_descent.GradientDescentOptimizer方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: setUp

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def setUp(self):
    self.a = variables.Variable(10.0, name="a")
    self.b = variables.Variable(20.0, name="b")

    self.c = math_ops.add(self.a, self.b, name="c")  # Should be 30.0.
    self.d = math_ops.subtract(self.a, self.c, name="d")  # Should be -20.0.
    self.e = math_ops.multiply(self.c, self.d, name="e")  # Should be -600.0.

    self.ph = array_ops.placeholder(dtypes.float32, shape=(2, 2), name="ph")
    self.f = math_ops.multiply(self.e, self.ph, name="f")

    self.opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
        self.e, name="opt")

    self.sess = session.Session()

    self.sess.run(self.a.initializer)
    self.sess.run(self.b.initializer) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:20,代碼來源:stepper_cli_test.py

示例2: testTrainWithNoInitAssignCanAchieveZeroLoss

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    g = ops.Graph()
    with g.as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertLess(loss, .1) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:22,代碼來源:learning_test.py

示例3: testUseGlobalStep

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testUseGlobalStep(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      global_step = variables_lib2.get_or_create_global_step()

      with session.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())

        for _ in range(10):
          sess.run([train_op])
        global_step = global_step.eval()
        # After 10 updates global_step should be 10.
        self.assertAllClose(global_step, 10) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:26,代碼來源:learning_test.py

示例4: testNoneGlobalStep

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testNoneGlobalStep(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = BatchNormClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(
          total_loss, optimizer, global_step=None)

      global_step = variables_lib2.get_or_create_global_step()

      with session.Session() as sess:
        # Initialize all variables
        sess.run(variables_lib.global_variables_initializer())

        for _ in range(10):
          sess.run([train_op])
        global_step = global_step.eval()
        # Since train_op don't use global_step it shouldn't change.
        self.assertAllClose(global_step, 0) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:27,代碼來源:learning_test.py

示例5: testTrainWithNonDefaultGraph

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithNonDefaultGraph(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    g = ops.Graph()
    with g.as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

    loss = learning.train(
        train_op, logdir, number_of_steps=300, log_every_n_steps=10, graph=g)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:23,代碼來源:learning_test.py

示例6: testTrainWithNoneAsLogdir

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithNoneAsLogdir(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, None, number_of_steps=300, log_every_n_steps=10)
    self.assertIsNotNone(loss)
    self.assertLess(loss, .015) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:20,代碼來源:learning_test.py

示例7: testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithNoneAsLogdirWhenUsingSummariesRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()
      summary.scalar('total_loss', total_loss)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)
      summary_op = summary.merge_all()

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, number_of_steps=300, summary_op=summary_op) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:21,代碼來源:learning_test.py

示例8: testTrainWithNoneAsLogdirWhenUsingTraceRaisesError

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithNoneAsLogdirWhenUsingTraceRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, number_of_steps=300, trace_every_n_steps=10) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:19,代碼來源:learning_test.py

示例9: testTrainWithNoneAsLogdirWhenUsingSaverRaisesError

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithNoneAsLogdirWhenUsingSaverRaisesError(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)
      saver = saver_lib.Saver()

      with self.assertRaises(ValueError):
        learning.train(
            train_op, None, init_op=None, number_of_steps=300, saver=saver) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:20,代碼來源:learning_test.py

示例10: testTrainWithNoneAsInitWhenUsingVarsRaisesError

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithNoneAsInitWhenUsingVarsRaisesError(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = LogisticClassifier(tf_inputs)
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      with self.assertRaises(RuntimeError):
        learning.train(train_op, logdir, init_op=None, number_of_steps=300) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:20,代碼來源:learning_test.py

示例11: testTrainWithLocalVariable

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithLocalVariable(self):
    logdir = os.path.join(
        tempfile.mkdtemp(prefix=self.get_temp_dir()), 'tmp_logs')
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      local_multiplier = variables_lib2.local_variable(1.0)

      tf_predictions = LogisticClassifier(tf_inputs) * local_multiplier
      loss_ops.log_loss(tf_predictions, tf_labels)
      total_loss = loss_ops.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = learning.create_train_op(total_loss, optimizer)

      loss = learning.train(
          train_op, logdir, number_of_steps=300, log_every_n_steps=10)
      self.assertIsNotNone(loss)
      self.assertLess(loss, .015) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:24,代碼來源:learning_test.py

示例12: create_train_op

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def create_train_op(self, learning_rate=1.0, gradient_multiplier=1.0):
    tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
    tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

    tf_predictions = LogisticClassifier(tf_inputs)
    loss_ops.log_loss(tf_predictions, tf_labels)
    total_loss = loss_ops.get_total_loss()

    optimizer = gradient_descent.GradientDescentOptimizer(
        learning_rate=learning_rate)

    if gradient_multiplier != 1.0:
      variables = variables_lib.trainable_variables()
      gradient_multipliers = {var: gradient_multiplier for var in variables}
    else:
      gradient_multipliers = None

    return learning.create_train_op(
        total_loss, optimizer, gradient_multipliers=gradient_multipliers) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:21,代碼來源:learning_test.py

示例13: _train_model

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def _train_model(self, checkpoint_dir, num_steps):
    """Trains a simple classification model.

    Note that the data has been configured such that after around 300 steps,
    the model has memorized the dataset (e.g. we can expect %100 accuracy).

    Args:
      checkpoint_dir: The directory where the checkpoint is written to.
      num_steps: The number of steps to train for.
    """
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = logistic_classifier(tf_inputs)
      loss = loss_ops.log_loss(tf_labels, tf_predictions)

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(loss, optimizer)

      loss = training.train(
          train_op,
          checkpoint_dir,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps)]) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:27,代碼來源:evaluation_test.py

示例14: testGlobalStepNotIncrementedWhenSetToNone

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testGlobalStepNotIncrementedWhenSetToNone(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      loss = losses.log_loss(tf_labels, tf_predictions)
      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)
      train_op = training.create_train_op(loss, optimizer, global_step=None)

      global_step = variables_lib.get_or_create_global_step()

      with self.cached_session() as session:
        # Initialize all variables
        session.run(variables_lib2.global_variables_initializer())

        for _ in range(10):
          session.run(train_op)

        # Since train_op don't use global_step it shouldn't change.
        self.assertAllClose(global_step.eval(), 0) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:24,代碼來源:training_test.py

示例15: testTrainWithNoInitAssignCanAchieveZeroLoss

# 需要導入模塊: from tensorflow.python.training import gradient_descent [as 別名]
# 或者: from tensorflow.python.training.gradient_descent import GradientDescentOptimizer [as 別名]
def testTrainWithNoInitAssignCanAchieveZeroLoss(self):
    with ops.Graph().as_default():
      random_seed.set_random_seed(0)
      tf_inputs = constant_op.constant(self._inputs, dtype=dtypes.float32)
      tf_labels = constant_op.constant(self._labels, dtype=dtypes.float32)

      tf_predictions = batchnorm_classifier(tf_inputs)
      losses.log_loss(tf_labels, tf_predictions)
      total_loss = losses.get_total_loss()

      optimizer = gradient_descent.GradientDescentOptimizer(learning_rate=1.0)

      train_op = training.create_train_op(total_loss, optimizer)

      loss = training.train(
          train_op,
          None,
          hooks=[basic_session_run_hooks.StopAtStepHook(num_steps=300)],
          save_summaries_steps=None,
          save_checkpoint_secs=None)
      self.assertLess(loss, .1) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:23,代碼來源:training_test.py


注:本文中的tensorflow.python.training.gradient_descent.GradientDescentOptimizer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。