本文整理汇总了Python中tensorflow.assign_sub方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.assign_sub方法的具体用法?Python tensorflow.assign_sub怎么用?Python tensorflow.assign_sub使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.assign_sub方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _apply_dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def _apply_dense(self, grad, var):
lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
if var.dtype.base_dtype == tf.float16:
# Can't use 1e-8 due to underflow
eps = 1e-7
else:
eps = 1e-8
v = self.get_slot(var, "v")
v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
m = self.get_slot(var, "m")
m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
g_t = v_t / m_t
var_update = tf.assign_sub(var, lr_t * g_t)
return tf.group(*[var_update, m_t, v_t])
示例2: _apply_dense
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def _apply_dense(self, grad, var):
lr_t = tf.cast(self._lr_t, var.dtype.base_dtype)
beta1_t = tf.cast(self._beta1_t, var.dtype.base_dtype)
beta2_t = tf.cast(self._beta2_t, var.dtype.base_dtype)
if var.dtype.base_dtype == tf.float16:
eps = 1e-7 # Can't use 1e-8 due to underflow -- not sure if it makes a big difference.
else:
eps = 1e-8
v = self.get_slot(var, "v")
v_t = v.assign(beta1_t * v + (1. - beta1_t) * grad)
m = self.get_slot(var, "m")
m_t = m.assign(tf.maximum(beta2_t * m + eps, tf.abs(grad)))
g_t = v_t / m_t
var_update = tf.assign_sub(var, lr_t * g_t)
return tf.group(*[var_update, m_t, v_t])
示例3: batchNormalization
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def batchNormalization(opt,input,type):
with tf.variable_scope("batchNorm"):
globalMean = tf.get_variable("mean",shape=[input.shape[-1]],dtype=tf.float32,trainable=False,
initializer=tf.constant_initializer(0.0))
globalVar = tf.get_variable("var",shape=[input.shape[-1]],dtype=tf.float32,trainable=False,
initializer=tf.constant_initializer(1.0))
if opt.training:
if type=="conv": batchMean,batchVar = tf.nn.moments(input,axes=[0,1,2])
elif type=="fc": batchMean,batchVar = tf.nn.moments(input,axes=[0])
trainMean = tf.assign_sub(globalMean,(1-opt.BNdecay)*(globalMean-batchMean))
trainVar = tf.assign_sub(globalVar,(1-opt.BNdecay)*(globalVar-batchVar))
with tf.control_dependencies([trainMean,trainVar]):
output = tf.nn.batch_normalization(input,batchMean,batchVar,None,None,opt.BNepsilon)
else: output = tf.nn.batch_normalization(input,globalMean,globalVar,None,None,opt.BNepsilon)
return output
# L1 loss
示例4: batch_norm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def batch_norm(x, train, name, decay=0.99, epsilon=1e-5):
shape = x.get_shape().as_list()
with tf.variable_scope(name):
beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.))
gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02))
pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False)
pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False)
if pop_mean not in tf.moving_average_variables():
tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean)
tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var)
def func1():
# Execute at training time
batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1))
update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean))
update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var))
with tf.control_dependencies([update_mean, update_var]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)
def func2():
# Execute at test time
return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon)
return tf.cond(train, func1, func2)
示例5: apply_updates
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def apply_updates(self, model, grads):
"""
Updates the model parameters based on the given gradients, using momentum
"""
update_ops = []
mom_ops = []
if isinstance(self._learning_rate, list):
lrs = self._learning_rate
print('d')
else:
lrs = [self._learning_rate for p in model.model_params]
with tf.name_scope('CDLearning/updates'):
for param, grad, mv, lr in zip(model.model_params, grads, self._momentum_vector, lrs):
mv = tf.assign(mv, self._momentum * mv + grad * lr)
update_ops.append(tf.assign_sub(param, mv))
return update_ops, mom_ops
示例6: batch_norm
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def batch_norm(x, train, name, decay=0.99, epsilon=1e-5):
shape = x.get_shape().as_list()
with tf.variable_scope(name):
beta = tf.get_variable('beta', [shape[-1]], initializer=tf.constant_initializer(0.))
gamma = tf.get_variable('gamma', [shape[-1]], initializer=tf.random_normal_initializer(1., 0.02))
pop_mean = tf.get_variable('pop_mean', [shape[-1]], initializer=tf.constant_initializer(0.), trainable=False)
pop_var = tf.get_variable('pop_var', [shape[-1]], initializer=tf.constant_initializer(1.), trainable=False)
if pop_mean not in tf.moving_average_variables():
tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_mean)
tf.add_to_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES, pop_var)
def func1():
# execute at training time
batch_mean, batch_var = tf.nn.moments(x, range(len(shape) - 1))
update_mean = tf.assign_sub(pop_mean, (1 - decay)*(pop_mean - batch_mean))
update_var = tf.assign_sub(pop_var, (1 - decay)*(pop_var - batch_var))
with tf.control_dependencies([update_mean, update_var]):
return tf.nn.batch_normalization(x, batch_mean, batch_var, beta, gamma, epsilon)
def func2():
# execute at test time
return tf.nn.batch_normalization(x, pop_mean, pop_var, beta, gamma, epsilon)
return tf.cond(train, func1, func2)
示例7: build_trainer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def build_trainer(self, child_model):
# actor
self.valid_loss = tf.to_float(child_model.rl_loss)
self.valid_loss = tf.stop_gradient(self.valid_loss)
self.valid_ppl = tf.exp(self.valid_loss)
self.reward = 80.0 / self.valid_ppl
if self.entropy_weight is not None:
self.reward += self.entropy_weight * self.sample_entropy
# or baseline
self.sample_log_probs = tf.reduce_sum(self.sample_log_probs)
self.baseline = tf.Variable(0.0, dtype=tf.float32, trainable=False)
baseline_update = tf.assign_sub(
self.baseline, (1 - self.bl_dec) * (self.baseline - self.reward))
with tf.control_dependencies([baseline_update]):
self.reward = tf.identity(self.reward)
self.loss = self.sample_log_probs * (self.reward - self.baseline)
self.train_step = tf.Variable(
0, dtype=tf.int32, trainable=False, name="train_step")
tf_variables = [var
for var in tf.trainable_variables() if var.name.startswith(self.name)]
self.train_op, self.lr, self.grad_norm, self.optimizer = get_train_ops(
self.loss,
tf_variables,
self.train_step,
clip_mode=self.clip_mode,
grad_bound=self.grad_bound,
l2_reg=self.l2_reg,
lr_init=self.lr_init,
lr_dec_start=self.lr_dec_start,
lr_dec_every=self.lr_dec_every,
lr_dec_rate=self.lr_dec_rate,
optim_algo=self.optim_algo,
sync_replicas=self.sync_replicas,
num_aggregate=self.num_aggregate,
num_replicas=self.num_replicas)
示例8: minimize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def minimize(self, loss, variables=None):
""""""
variables = variables or tf.trainable_variables()
gradients = tf.gradients(loss, variables,
colocate_gradients_with_ops=True,
gate_gradients=True,
aggregation_method=2)
gradients = {variable: gradient for variable, gradient in zip(variables, gradients) if gradient is not None}
variable_steps = {}
variable_indices = {}
updates = [tf.assign_add(self.global_step, 1)]
for variable, gradient in six.iteritems(gradients):
if isinstance(gradient, tf.Tensor):
step, update = self.dense_update(gradient, variable)
variable_steps[variable] = step
updates.extend(update)
else:
step, indices, update = self.sparse_update(gradient, variable)
variable_steps[variable] = step
variable_indices[variable] = indices
updates.extend(update)
variable_steps = self.clip_by_global_norm(variable_steps)
for variable, step in six.iteritems(variable_steps):
if variable in variable_indices:
indices = variable_indices[variable]
updates.append(tf.scatter_sub(variable, indices, step))
else:
updates.append(tf.assign_sub(variable, step))
return tf.tuple(updates)[0]
#=============================================================
示例9: update_sub
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def update_sub(x, decrement):
"""Update the value of `x` by subtracting `decrement`.
# Arguments
x: A `Variable`.
decrement: A tensor of same shape as `x`.
# Returns
The variable `x` updated.
"""
return tf.assign_sub(x, decrement)
示例10: testAssignUpdate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def testAssignUpdate(self):
var = state_ops.variable_op([1, 2], tf.float32)
added = tf.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = tf.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
示例11: testAssignUpdateNoVarShape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def testAssignUpdateNoVarShape(self):
var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
added = tf.assign_add(var, [[2.0, 3.0]])
self.assertEqual([1, 2], added.get_shape())
subbed = tf.assign_sub(var, [[12.0, 13.0]])
self.assertEqual([1, 2], subbed.get_shape())
示例12: testAssignUpdateNoValueShape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def testAssignUpdateNoValueShape(self):
var = state_ops.variable_op([1, 2], tf.float32)
added = tf.assign_add(var, self._NewShapelessTensor())
self.assertEqual([1, 2], added.get_shape())
subbed = tf.assign_sub(var, self._NewShapelessTensor())
self.assertEqual([1, 2], subbed.get_shape())
示例13: testAssignUpdateNoShape
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def testAssignUpdateNoShape(self):
var = state_ops.variable_op([1, 2], tf.float32, set_shape=False)
added = tf.assign_add(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), added.get_shape())
subbed = tf.assign_sub(var, self._NewShapelessTensor())
self.assertEqual(tensor_shape.unknown_shape(), subbed.get_shape())
示例14: _initAssignSubFetch
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def _initAssignSubFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param -= y."""
with self.test_session(use_gpu=use_gpu):
p = tf.Variable(x)
sub = tf.assign_sub(p, y)
p.initializer.run()
new_value = sub.eval()
return p.eval(), new_value
示例15: _make_train_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import assign_sub [as 别名]
def _make_train_op(self, item_logits, item_idxs, scores):
batch_size = self._batch_size
base_decay = 0.99
learning_rate = 0.0035
adam_beta1 = 0
adam_epsilon = 1e-3
entropy_weight = 0.0001
# Compute log probs & entropy
sample_log_probs = self._compute_sample_log_probs(item_idxs, item_logits)
sample_entropy = self._compute_sample_entropy(item_logits)
# Compute rewards in a batch
# Adding entropy encourages exploration
rewards = scores
rewards += entropy_weight * sample_entropy
# Baseline reward for REINFORCE
reward_base = tf.Variable(0., name='reward_base', dtype=tf.float32, trainable=False)
# Update baseline whenever reward updates
base_update = tf.assign_sub(reward_base, (1 - base_decay) * (reward_base - tf.reduce_mean(rewards)))
with tf.control_dependencies([base_update]):
rewards = tf.identity(rewards)
# Compute losses in a batch
losses = sample_log_probs * (rewards - reward_base)
# Add optimizer
tf_vars = self._get_all_variables()
steps = tf.Variable(0, name='steps', dtype=tf.int32, trainable=False)
grads = tf.gradients(losses, tf_vars)
grads = [x / tf.constant(batch_size, dtype=tf.float32) for x in grads] # Average all gradients
opt = tf.train.AdamOptimizer(learning_rate, beta1=adam_beta1, epsilon=adam_epsilon,
use_locking=True)
train_op = opt.apply_gradients(zip(grads, tf_vars), global_step=steps)
return (train_op, losses, rewards)