本文整理汇总了Python中utils.vectorize方法的典型用法代码示例。如果您正苦于以下问题:Python utils.vectorize方法的具体用法?Python utils.vectorize怎么用?Python utils.vectorize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.vectorize方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_rebar_gradient
# 需要导入模块: import utils [as 别名]
# 或者: from utils import vectorize [as 别名]
def get_rebar_gradient(self):
"""Get the rebar gradient."""
hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()
if self.hparams.quadratic:
gumbel_cv, _ = self._create_gumbel_control_variate_quadratic(logQHard)
else:
gumbel_cv, _ = self._create_gumbel_control_variate(logQHard)
f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))
eta = {}
h_grads, eta_statistics = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),
eta)
model_grads = U.add_grads_and_vars(f_grads, h_grads)
total_grads = model_grads
# Construct the variance objective
variance_objective = tf.reduce_mean(tf.square(U.vectorize(model_grads, set_none_to_zero=True)))
debug = { 'ELBO': hardELBO,
'etas': eta_statistics,
'variance_objective': variance_objective,
}
return total_grads, debug, variance_objective
###
# Create varaints
###
示例2: compute_gradient_moments
# 需要导入模块: import utils [as 别名]
# 或者: from utils import vectorize [as 别名]
def compute_gradient_moments(self, grads_and_vars):
first_moment = U.vectorize(grads_and_vars, set_none_to_zero=True)
second_moment = tf.square(first_moment)
self.maintain_ema_ops.append(self.ema.apply([first_moment, second_moment]))
return self.ema.average(first_moment), self.ema.average(second_moment)
示例3: _create_train_op
# 需要导入模块: import utils [as 别名]
# 或者: from utils import vectorize [as 别名]
def _create_train_op(self, grads_and_vars, extra_grads_and_vars=[]):
'''
Args:
grads_and_vars: gradients to apply and compute running average variance
extra_grads_and_vars: gradients to apply (not used to compute average variance)
'''
# Variance summaries
first_moment = U.vectorize(grads_and_vars, skip_none=True)
second_moment = tf.square(first_moment)
self.maintain_ema_ops.append(self.ema.apply([first_moment, second_moment]))
# Add baseline losses
if len(self.baseline_loss) > 0:
mean_baseline_loss = tf.reduce_mean(tf.add_n(self.baseline_loss))
extra_grads_and_vars += self.optimizer_class.compute_gradients(
mean_baseline_loss,
var_list=tf.get_collection('BASELINE'))
# Ensure that all required tensors are computed before updates are executed
extra_optimizer = tf.train.AdamOptimizer(
learning_rate=10*self.hparams.learning_rate,
beta2=self.hparams.beta2)
with tf.control_dependencies(
[tf.group(*[g for g, _ in (grads_and_vars + extra_grads_and_vars) if g is not None])]):
# Filter out the P_COLLECTION variables if we're in eval mode
if self.eval_mode:
grads_and_vars = [(g, v) for g, v in grads_and_vars
if v not in tf.get_collection(P_COLLECTION)]
train_op = self.optimizer_class.apply_gradients(grads_and_vars,
global_step=self.global_step)
if len(extra_grads_and_vars) > 0:
extra_train_op = extra_optimizer.apply_gradients(extra_grads_and_vars)
else:
extra_train_op = tf.no_op()
self.optimizer = tf.group(train_op, extra_train_op, *self.maintain_ema_ops)
# per parameter variance
variance_estimator = (self.ema.average(second_moment) -
tf.square(self.ema.average(first_moment)))
self.grad_variance = tf.reduce_mean(variance_estimator)
示例4: get_dynamic_rebar_gradient
# 需要导入模块: import utils [as 别名]
# 或者: from utils import vectorize [as 别名]
def get_dynamic_rebar_gradient(self):
"""Get the dynamic rebar gradient (t, eta optimized)."""
tiled_pre_temperature = tf.tile([self.pre_temperature_variable],
[self.batch_size])
temperature = tf.exp(tiled_pre_temperature)
hardELBO, nvil_gradient, logQHard = self._create_hard_elbo()
if self.hparams.quadratic:
gumbel_cv, extra = self._create_gumbel_control_variate_quadratic(logQHard, temperature=temperature)
else:
gumbel_cv, extra = self._create_gumbel_control_variate(logQHard, temperature=temperature)
f_grads = self.optimizer_class.compute_gradients(tf.reduce_mean(-nvil_gradient))
eta = {}
h_grads, eta_statistics = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(gumbel_cv)),
eta)
model_grads = U.add_grads_and_vars(f_grads, h_grads)
total_grads = model_grads
# Construct the variance objective
g = U.vectorize(model_grads, set_none_to_zero=True)
self.maintain_ema_ops.append(self.ema.apply([g]))
gbar = 0 #tf.stop_gradient(self.ema.average(g))
variance_objective = tf.reduce_mean(tf.square(g - gbar))
reinf_g_t = 0
if self.hparams.quadratic:
for layer in xrange(self.hparams.n_layer):
gumbel_learning_signal, _ = extra[layer]
df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0]
reinf_g_t_i, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(tf.stop_gradient(df_dt) * logQHard[layer])),
eta)
reinf_g_t += U.vectorize(reinf_g_t_i, set_none_to_zero=True)
reparam = tf.add_n([reparam_i for _, reparam_i in extra])
else:
gumbel_learning_signal, reparam = extra
df_dt = tf.gradients(gumbel_learning_signal, tiled_pre_temperature)[0]
reinf_g_t, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(tf.stop_gradient(df_dt) * tf.add_n(logQHard))),
eta)
reinf_g_t = U.vectorize(reinf_g_t, set_none_to_zero=True)
reparam_g, _ = self.multiply_by_eta_per_layer(
self.optimizer_class.compute_gradients(tf.reduce_mean(reparam)),
eta)
reparam_g = U.vectorize(reparam_g, set_none_to_zero=True)
reparam_g_t = tf.gradients(tf.reduce_mean(2*tf.stop_gradient(g - gbar)*reparam_g), self.pre_temperature_variable)[0]
variance_objective_grad = tf.reduce_mean(2*(g - gbar)*reinf_g_t) + reparam_g_t
debug = { 'ELBO': hardELBO,
'etas': eta_statistics,
'variance_objective': variance_objective,
}
return total_grads, debug, variance_objective, variance_objective_grad