當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.rms方法代碼示例

本文整理匯總了Python中utils.rms方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.rms方法的具體用法?Python utils.rms怎麽用?Python utils.rms使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.rms方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _create_loss

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import rms [as 別名]
def _create_loss(self):
    # Hard loss
    logQHard, samples = self._recognition_network()
    reinforce_learning_signal, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # REINFORCE
    learning_signal = tf.stop_gradient(U.center(reinforce_learning_signal))
    self.optimizerLoss = -(learning_signal*logQHard +
                           reinforce_model_grad)
    self.lHat = map(tf.reduce_mean, [
        reinforce_learning_signal,
        U.rms(learning_signal),
    ])

    return reinforce_learning_signal 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:18,代碼來源:rebar.py

示例2: get_nvil_gradient

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import rms [as 別名]
def get_nvil_gradient(self):
    """Compute the NVIL gradient."""
    # Hard loss
    logQHard, samples = self._recognition_network()
    ELBO, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # Add baselines (no variance normalization)
    learning_signal = tf.stop_gradient(ELBO) - self._create_baseline()

    # Set up losses
    self.baseline_loss.append(tf.square(learning_signal))
    optimizerLoss = -(tf.stop_gradient(learning_signal)*logQHard +
                           reinforce_model_grad)
    optimizerLoss = tf.reduce_mean(optimizerLoss)

    nvil_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
    debug = {
        'ELBO': ELBO,
        'RMS of centered learning signal': U.rms(learning_signal),
    }

    return nvil_gradient, debug 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:25,代碼來源:rebar.py

示例3: _create_loss

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import rms [as 別名]
def _create_loss(self):
    # Hard loss
    logQHard, samples = self._recognition_network()
    reinforce_learning_signal, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # REINFORCE
    learning_signal = tf.stop_gradient(center(reinforce_learning_signal))
    self.optimizerLoss = -(learning_signal*logQHard +
                           reinforce_model_grad)
    self.lHat = map(tf.reduce_mean, [
        reinforce_learning_signal,
        U.rms(learning_signal),
    ])

    return reinforce_learning_signal 
開發者ID:loicmarie,項目名稱:hands-detection,代碼行數:18,代碼來源:rebar.py

示例4: get_simple_muprop_gradient

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import rms [as 別名]
def get_simple_muprop_gradient(self):
    """ Computes the simple muprop gradient.

    This muprop control variate does not include the linear term.
    """
    # Hard loss
    logQHard, hardSamples = self._recognition_network()
    hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)

    # Soft loss
    logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
    muELBO, _  = self._generator_network(muSamples, logQ)

    scaling_baseline = self._create_eta(collection='BASELINE')
    learning_signal = (hardELBO
                       - scaling_baseline * muELBO
                       - self._create_baseline())
    self.baseline_loss.append(tf.square(learning_signal))

    optimizerLoss = -(tf.stop_gradient(learning_signal) * tf.add_n(logQHard)
                      + reinforce_model_grad)
    optimizerLoss = tf.reduce_mean(optimizerLoss)

    simple_muprop_gradient = (self.optimizer_class.
                              compute_gradients(optimizerLoss))
    debug = {
        'ELBO': hardELBO,
        'muELBO': muELBO,
        'RMS': U.rms(learning_signal),
    }

    return simple_muprop_gradient, debug 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:34,代碼來源:rebar.py

示例5: get_muprop_gradient

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import rms [as 別名]
def get_muprop_gradient(self):
    """
    random sample function that actually returns mean
    new forward pass that returns logQ as a list

    can get x_i from samples
    """

    # Hard loss
    logQHard, hardSamples = self._recognition_network()
    hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)

    # Soft loss
    logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
    muELBO, _ = self._generator_network(muSamples, logQ)

    # Compute gradients
    muELBOGrads = tf.gradients(tf.reduce_sum(muELBO),
                               [ muSamples[i]['activation'] for
                                i in xrange(self.hparams.n_layer) ])

    # Compute MuProp gradient estimates
    learning_signal = hardELBO
    optimizerLoss = 0.0
    learning_signals = []
    for i in xrange(self.hparams.n_layer):
      dfDiff = tf.reduce_sum(
          muELBOGrads[i] * (hardSamples[i]['activation'] -
                            muSamples[i]['activation']),
          axis=1)
      dfMu = tf.reduce_sum(
          tf.stop_gradient(muELBOGrads[i]) *
          tf.nn.sigmoid(hardSamples[i]['log_param']),
          axis=1)

      scaling_baseline_0 = self._create_eta(collection='BASELINE')
      scaling_baseline_1 = self._create_eta(collection='BASELINE')
      learning_signals.append(learning_signal - scaling_baseline_0 * muELBO - scaling_baseline_1 * dfDiff - self._create_baseline())
      self.baseline_loss.append(tf.square(learning_signals[i]))

      optimizerLoss += (
          logQHard[i] * tf.stop_gradient(learning_signals[i]) +
          tf.stop_gradient(scaling_baseline_1) * dfMu)
    optimizerLoss += reinforce_model_grad
    optimizerLoss *= -1

    optimizerLoss = tf.reduce_mean(optimizerLoss)

    muprop_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
    debug = {
        'ELBO': hardELBO,
        'muELBO': muELBO,
    }

    debug.update(dict([
        ('RMS learning signal layer %d' % i, U.rms(learning_signal))
        for (i, learning_signal) in enumerate(learning_signals)]))

    return muprop_gradient, debug

  # REBAR gradient helper functions 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:63,代碼來源:rebar.py


注:本文中的utils.rms方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。