當前位置: 首頁>>代碼示例>>Python>>正文


Python learning_schedules.exponential_decay_with_burnin方法代碼示例

本文整理匯總了Python中object_detection.utils.learning_schedules.exponential_decay_with_burnin方法的典型用法代碼示例。如果您正苦於以下問題:Python learning_schedules.exponential_decay_with_burnin方法的具體用法?Python learning_schedules.exponential_decay_with_burnin怎麽用?Python learning_schedules.exponential_decay_with_burnin使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在object_detection.utils.learning_schedules的用法示例。


在下文中一共展示了learning_schedules.exponential_decay_with_burnin方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testExponentialDecayWithBurnin

# 需要導入模塊: from object_detection.utils import learning_schedules [as 別名]
# 或者: from object_detection.utils.learning_schedules import exponential_decay_with_burnin [as 別名]
def testExponentialDecayWithBurnin(self):
    global_step = tf.placeholder(tf.int32, [])
    learning_rate_base = 1.0
    learning_rate_decay_steps = 3
    learning_rate_decay_factor = .1
    burnin_learning_rate = .5
    burnin_steps = 2
    exp_rates = [.5, .5, 1, .1, .1, .1, .01, .01]
    learning_rate = learning_schedules.exponential_decay_with_burnin(
        global_step, learning_rate_base, learning_rate_decay_steps,
        learning_rate_decay_factor, burnin_learning_rate, burnin_steps)
    with self.test_session() as sess:
      output_rates = []
      for input_global_step in range(8):
        output_rate = sess.run(learning_rate,
                               feed_dict={global_step: input_global_step})
        output_rates.append(output_rate)
      self.assertAllClose(output_rates, exp_rates) 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:20,代碼來源:learning_schedules_test.py

示例2: testExponentialDecayWithBurnin

# 需要導入模塊: from object_detection.utils import learning_schedules [as 別名]
# 或者: from object_detection.utils.learning_schedules import exponential_decay_with_burnin [as 別名]
def testExponentialDecayWithBurnin(self):
    def graph_fn(global_step):
      learning_rate_base = 1.0
      learning_rate_decay_steps = 3
      learning_rate_decay_factor = .1
      burnin_learning_rate = .5
      burnin_steps = 2
      min_learning_rate = .05
      learning_rate = learning_schedules.exponential_decay_with_burnin(
          global_step, learning_rate_base, learning_rate_decay_steps,
          learning_rate_decay_factor, burnin_learning_rate, burnin_steps,
          min_learning_rate)
      assert learning_rate.op.name.endswith('learning_rate')
      return (learning_rate,)

    output_rates = [
        self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(9)
    ]

    exp_rates = [.5, .5, 1, 1, 1, .1, .1, .1, .05]
    self.assertAllClose(output_rates, exp_rates, rtol=1e-4) 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:23,代碼來源:learning_schedules_test.py

示例3: testExponentialDecayWithBurnin

# 需要導入模塊: from object_detection.utils import learning_schedules [as 別名]
# 或者: from object_detection.utils.learning_schedules import exponential_decay_with_burnin [as 別名]
def testExponentialDecayWithBurnin(self):
    def graph_fn(global_step):
      learning_rate_base = 1.0
      learning_rate_decay_steps = 3
      learning_rate_decay_factor = .1
      burnin_learning_rate = .5
      burnin_steps = 2
      learning_rate = learning_schedules.exponential_decay_with_burnin(
          global_step, learning_rate_base, learning_rate_decay_steps,
          learning_rate_decay_factor, burnin_learning_rate, burnin_steps)
      assert learning_rate.op.name.endswith('learning_rate')
      return (learning_rate,)

    output_rates = [
        self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(8)
    ]

    exp_rates = [.5, .5, 1, .1, .1, .1, .01, .01]
    self.assertAllClose(output_rates, exp_rates, rtol=1e-4) 
開發者ID:cagbal,項目名稱:ros_people_object_detection_tensorflow,代碼行數:21,代碼來源:learning_schedules_test.py

示例4: testExponentialDecayWithBurnin

# 需要導入模塊: from object_detection.utils import learning_schedules [as 別名]
# 或者: from object_detection.utils.learning_schedules import exponential_decay_with_burnin [as 別名]
def testExponentialDecayWithBurnin(self):
    def graph_fn(global_step):
      learning_rate_base = 1.0
      learning_rate_decay_steps = 3
      learning_rate_decay_factor = .1
      burnin_learning_rate = .5
      burnin_steps = 2
      learning_rate = learning_schedules.exponential_decay_with_burnin(
          global_step, learning_rate_base, learning_rate_decay_steps,
          learning_rate_decay_factor, burnin_learning_rate, burnin_steps)
      return (learning_rate,)

    output_rates = [
        self.execute(graph_fn, [np.array(i).astype(np.int64)]) for i in range(8)
    ]

    exp_rates = [.5, .5, 1, .1, .1, .1, .01, .01]
    self.assertAllClose(output_rates, exp_rates, rtol=1e-4) 
開發者ID:scorelab,項目名稱:Elphas,代碼行數:20,代碼來源:learning_schedules_test.py

示例5: _create_learning_rate

# 需要導入模塊: from object_detection.utils import learning_schedules [as 別名]
# 或者: from object_detection.utils.learning_schedules import exponential_decay_with_burnin [as 別名]
def _create_learning_rate(learning_rate_config):
  """Create optimizer learning rate based on config.

  Args:
    learning_rate_config: A LearningRate proto message.

  Returns:
    A learning rate.

  Raises:
    ValueError: when using an unsupported input data type.
  """
  learning_rate = None
  learning_rate_type = learning_rate_config.WhichOneof('learning_rate')
  if learning_rate_type == 'constant_learning_rate':
    config = learning_rate_config.constant_learning_rate
    learning_rate = tf.constant(config.learning_rate, dtype=tf.float32,
                                name='learning_rate')

  if learning_rate_type == 'exponential_decay_learning_rate':
    config = learning_rate_config.exponential_decay_learning_rate
    learning_rate = learning_schedules.exponential_decay_with_burnin(
        tf.train.get_or_create_global_step(),
        config.initial_learning_rate,
        config.decay_steps,
        config.decay_factor,
        burnin_learning_rate=config.burnin_learning_rate,
        burnin_steps=config.burnin_steps,
        min_learning_rate=config.min_learning_rate,
        staircase=config.staircase)

  if learning_rate_type == 'manual_step_learning_rate':
    config = learning_rate_config.manual_step_learning_rate
    if not config.schedule:
      raise ValueError('Empty learning rate schedule.')
    learning_rate_step_boundaries = [x.step for x in config.schedule]
    learning_rate_sequence = [config.initial_learning_rate]
    learning_rate_sequence += [x.learning_rate for x in config.schedule]
    learning_rate = learning_schedules.manual_stepping(
        tf.train.get_or_create_global_step(), learning_rate_step_boundaries,
        learning_rate_sequence, config.warmup)

  if learning_rate_type == 'cosine_decay_learning_rate':
    config = learning_rate_config.cosine_decay_learning_rate
    learning_rate = learning_schedules.cosine_decay_with_warmup(
        tf.train.get_or_create_global_step(),
        config.learning_rate_base,
        config.total_steps,
        config.warmup_learning_rate,
        config.warmup_steps,
        config.hold_base_rate_steps)

  if learning_rate is None:
    raise ValueError('Learning_rate %s not supported.' % learning_rate_type)

  return learning_rate 
開發者ID:ahmetozlu,項目名稱:vehicle_counting_tensorflow,代碼行數:58,代碼來源:optimizer_builder.py


注:本文中的object_detection.utils.learning_schedules.exponential_decay_with_burnin方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。