本文整理匯總了Python中rllab.misc.ext.iterate_minibatches_generic方法的典型用法代碼示例。如果您正苦於以下問題:Python ext.iterate_minibatches_generic方法的具體用法?Python ext.iterate_minibatches_generic怎麽用?Python ext.iterate_minibatches_generic使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類rllab.misc.ext
的用法示例。
在下文中一共展示了ext.iterate_minibatches_generic方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: fit
# 需要導入模塊: from rllab.misc import ext [as 別名]
# 或者: from rllab.misc.ext import iterate_minibatches_generic [as 別名]
def fit(self, xs, ys):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(
np.mean(xs, axis=0, keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value(
(np.std(xs, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean_var.set_value(
np.mean(ys, axis=0, keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value(
(np.std(ys, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._name:
prefix = self._name + "_"
else:
prefix = ""
# FIXME: needs batch computation to avoid OOM.
loss_before, loss_after, mean_kl, batch_count = 0., 0., 0., 0
for batch in iterate_minibatches_generic(input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
xs, ys = batch
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
logger.record_tabular(prefix + 'LossBefore', loss_before / batch_count)
logger.record_tabular(prefix + 'LossAfter', loss_after / batch_count)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after / batch_count)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', mean_kl / batch_count)
示例2: fit
# 需要導入模塊: from rllab.misc import ext [as 別名]
# 或者: from rllab.misc.ext import iterate_minibatches_generic [as 別名]
def fit(self, xs, ys):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int(
num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(
np.mean(xs, axis=0, keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value(
(np.std(xs, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean_var.set_value(
np.mean(ys, axis=0, keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value(
(np.std(ys, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._name:
prefix = self._name + "_"
else:
prefix = ""
# FIXME: needs batch computation to avoid OOM.
loss_before, loss_after, mean_kl, batch_count = 0., 0., 0., 0
for batch in iterate_minibatches_generic(input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
xs, ys = batch
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
logger.record_tabular(prefix + 'LossBefore', loss_before / batch_count)
logger.record_tabular(prefix + 'LossAfter', loss_after / batch_count)
logger.record_tabular(
prefix + 'dLoss', loss_before - loss_after / batch_count)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', mean_kl / batch_count)
示例3: fit
# 需要導入模塊: from rllab.misc import ext [as 別名]
# 或者: from rllab.misc.ext import iterate_minibatches_generic [as 別名]
def fit(self, xs, ys, log=True):
if self._subsample_factor < 1:
num_samples_tot = xs.shape[0]
idx = np.random.randint(0, num_samples_tot, int(num_samples_tot * self._subsample_factor))
xs, ys = xs[idx], ys[idx]
if self._normalize_inputs:
# recompute normalizing constants for inputs
self._x_mean_var.set_value(
np.mean(xs, axis=0, keepdims=True).astype(theano.config.floatX))
self._x_std_var.set_value(
(np.std(xs, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._normalize_outputs:
# recompute normalizing constants for outputs
self._y_mean_var.set_value(
np.mean(ys, axis=0, keepdims=True).astype(theano.config.floatX))
self._y_std_var.set_value(
(np.std(ys, axis=0, keepdims=True) + 1e-8).astype(theano.config.floatX))
if self._name:
prefix = self._name + "_"
else:
prefix = ""
# FIXME: needs batch computation to avoid OOM.
loss_before, loss_after, mean_kl, batch_count = 0., 0., 0., 0
for batch in iterate_minibatches_generic(input_lst=[xs, ys], batchsize=self._batchsize, shuffle=True):
batch_count += 1
xs, ys = batch
if self._use_trust_region:
old_means, old_log_stds = self._f_pdists(xs)
inputs = [xs, ys, old_means, old_log_stds]
else:
inputs = [xs, ys]
loss_before += self._optimizer.loss(inputs)
self._optimizer.optimize(inputs)
loss_after += self._optimizer.loss(inputs)
if self._use_trust_region:
mean_kl += self._optimizer.constraint_val(inputs)
if log:
logger.record_tabular(prefix + 'LossBefore', loss_before / batch_count)
logger.record_tabular(prefix + 'LossAfter', loss_after / batch_count)
logger.record_tabular(prefix + 'dLoss', loss_before - loss_after / batch_count)
if self._use_trust_region:
logger.record_tabular(prefix + 'MeanKL', mean_kl / batch_count)