当前位置: 首页>>代码示例>>Python>>正文


Python utils.rms方法代码示例

本文整理汇总了Python中utils.rms方法的典型用法代码示例。如果您正苦于以下问题:Python utils.rms方法的具体用法?Python utils.rms怎么用?Python utils.rms使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.rms方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_loss

# 需要导入模块: import utils [as 别名]
# 或者: from utils import rms [as 别名]
def _create_loss(self):
    # Hard loss
    logQHard, samples = self._recognition_network()
    reinforce_learning_signal, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # REINFORCE
    learning_signal = tf.stop_gradient(U.center(reinforce_learning_signal))
    self.optimizerLoss = -(learning_signal*logQHard +
                           reinforce_model_grad)
    self.lHat = map(tf.reduce_mean, [
        reinforce_learning_signal,
        U.rms(learning_signal),
    ])

    return reinforce_learning_signal 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:18,代码来源:rebar.py

示例2: get_nvil_gradient

# 需要导入模块: import utils [as 别名]
# 或者: from utils import rms [as 别名]
def get_nvil_gradient(self):
    """Compute the NVIL gradient."""
    # Hard loss
    logQHard, samples = self._recognition_network()
    ELBO, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # Add baselines (no variance normalization)
    learning_signal = tf.stop_gradient(ELBO) - self._create_baseline()

    # Set up losses
    self.baseline_loss.append(tf.square(learning_signal))
    optimizerLoss = -(tf.stop_gradient(learning_signal)*logQHard +
                           reinforce_model_grad)
    optimizerLoss = tf.reduce_mean(optimizerLoss)

    nvil_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
    debug = {
        'ELBO': ELBO,
        'RMS of centered learning signal': U.rms(learning_signal),
    }

    return nvil_gradient, debug 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:25,代码来源:rebar.py

示例3: _create_loss

# 需要导入模块: import utils [as 别名]
# 或者: from utils import rms [as 别名]
def _create_loss(self):
    # Hard loss
    logQHard, samples = self._recognition_network()
    reinforce_learning_signal, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # REINFORCE
    learning_signal = tf.stop_gradient(center(reinforce_learning_signal))
    self.optimizerLoss = -(learning_signal*logQHard +
                           reinforce_model_grad)
    self.lHat = map(tf.reduce_mean, [
        reinforce_learning_signal,
        U.rms(learning_signal),
    ])

    return reinforce_learning_signal 
开发者ID:loicmarie,项目名称:hands-detection,代码行数:18,代码来源:rebar.py

示例4: get_simple_muprop_gradient

# 需要导入模块: import utils [as 别名]
# 或者: from utils import rms [as 别名]
def get_simple_muprop_gradient(self):
    """ Computes the simple muprop gradient.

    This muprop control variate does not include the linear term.
    """
    # Hard loss
    logQHard, hardSamples = self._recognition_network()
    hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)

    # Soft loss
    logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
    muELBO, _  = self._generator_network(muSamples, logQ)

    scaling_baseline = self._create_eta(collection='BASELINE')
    learning_signal = (hardELBO
                       - scaling_baseline * muELBO
                       - self._create_baseline())
    self.baseline_loss.append(tf.square(learning_signal))

    optimizerLoss = -(tf.stop_gradient(learning_signal) * tf.add_n(logQHard)
                      + reinforce_model_grad)
    optimizerLoss = tf.reduce_mean(optimizerLoss)

    simple_muprop_gradient = (self.optimizer_class.
                              compute_gradients(optimizerLoss))
    debug = {
        'ELBO': hardELBO,
        'muELBO': muELBO,
        'RMS': U.rms(learning_signal),
    }

    return simple_muprop_gradient, debug 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:34,代码来源:rebar.py

示例5: get_muprop_gradient

# 需要导入模块: import utils [as 别名]
# 或者: from utils import rms [as 别名]
def get_muprop_gradient(self):
    """
    random sample function that actually returns mean
    new forward pass that returns logQ as a list

    can get x_i from samples
    """

    # Hard loss
    logQHard, hardSamples = self._recognition_network()
    hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)

    # Soft loss
    logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
    muELBO, _ = self._generator_network(muSamples, logQ)

    # Compute gradients
    muELBOGrads = tf.gradients(tf.reduce_sum(muELBO),
                               [ muSamples[i]['activation'] for
                                i in xrange(self.hparams.n_layer) ])

    # Compute MuProp gradient estimates
    learning_signal = hardELBO
    optimizerLoss = 0.0
    learning_signals = []
    for i in xrange(self.hparams.n_layer):
      dfDiff = tf.reduce_sum(
          muELBOGrads[i] * (hardSamples[i]['activation'] -
                            muSamples[i]['activation']),
          axis=1)
      dfMu = tf.reduce_sum(
          tf.stop_gradient(muELBOGrads[i]) *
          tf.nn.sigmoid(hardSamples[i]['log_param']),
          axis=1)

      scaling_baseline_0 = self._create_eta(collection='BASELINE')
      scaling_baseline_1 = self._create_eta(collection='BASELINE')
      learning_signals.append(learning_signal - scaling_baseline_0 * muELBO - scaling_baseline_1 * dfDiff - self._create_baseline())
      self.baseline_loss.append(tf.square(learning_signals[i]))

      optimizerLoss += (
          logQHard[i] * tf.stop_gradient(learning_signals[i]) +
          tf.stop_gradient(scaling_baseline_1) * dfMu)
    optimizerLoss += reinforce_model_grad
    optimizerLoss *= -1

    optimizerLoss = tf.reduce_mean(optimizerLoss)

    muprop_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
    debug = {
        'ELBO': hardELBO,
        'muELBO': muELBO,
    }

    debug.update(dict([
        ('RMS learning signal layer %d' % i, U.rms(learning_signal))
        for (i, learning_signal) in enumerate(learning_signals)]))

    return muprop_gradient, debug

  # REBAR gradient helper functions 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:63,代码来源:rebar.py


注:本文中的utils.rms方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。