本文整理汇总了Python中tensorflow.clip_by_global_norm函数的典型用法代码示例。如果您正苦于以下问题:Python clip_by_global_norm函数的具体用法?Python clip_by_global_norm怎么用?Python clip_by_global_norm使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clip_by_global_norm函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _add_shared_train_op
def _add_shared_train_op(self):
"""Sets self._train_op, the op to run for training."""
# Take gradients of the trainable variables w.r.t. the loss function to minimize
if self._hps.rl_training or self._hps.ac_training:
loss_to_minimize = self._reinforce_shared_loss
if self._hps.coverage:
loss_to_minimize = self._reinforce_cov_total_loss
else:
loss_to_minimize = self._pgen_loss
if self._hps.coverage:
loss_to_minimize = self._pointer_cov_total_loss
tvars = tf.trainable_variables()
gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)
# Clip the gradients
with tf.device("/gpu:{}".format(self._hps.gpu_num)):
grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)
# Add a summary
tf.summary.scalar('global_norm', global_norm)
# Apply adagrad optimizer
optimizer = tf.train.AdagradOptimizer(self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)
with tf.device("/gpu:{}".format(self._hps.gpu_num)):
self._shared_train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')
示例2: optimizer
def optimizer(someloss):
global_step = tf.Variable(0)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
gradients, v = zip(*optimizer.compute_gradients(someloss))
gradients, _ = tf.clip_by_global_norm(gradients, 1.25)
optimizer = optimizer.apply_gradients(zip(gradients, v), global_step=global_step)
return optimizer
示例3: __init__
def __init__(self, is_training, config):
self.batch_size = batch_size = config.batch_size
size = config.hidden_size
self.max_len = max_len = config.max_len
vocab_size = config.vocab_size
self._input_data = tf.placeholder(tf.int32, [batch_size, config.max_len])
self._targets = tf.placeholder(tf.int32, [batch_size])
embedding = tf.get_variable("embedding", [vocab_size, size])
inputs = tf.nn.embedding_lookup(embedding, self._input_data)
output = tf.reduce_sum(inputs, 1)
softmax_w = tf.get_variable("softmax_w", [size, 2])
softmax_b = tf.get_variable("softmax_b", [2])
logits = tf.matmul(output, softmax_w) + softmax_b
prediction = tf.nn.softmax(logits)
self._prediction = prediction
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, self._targets)
self._cost = cost = tf.reduce_sum(loss) / batch_size
if not is_training:
return
self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(cost, tvars),
config.max_grad_norm)
optimizer = tf.train.GradientDescentOptimizer(self.lr)
self._train_op = optimizer.apply_gradients(zip(grads, tvars))
示例4: __init__
def __init__(self, loss, global_step, optimizer, learning_rate, clip_gradients=5.0):
"""Build a trainer part of graph.
Args:
loss: Tensor that evaluates to model's loss.
global_step: Tensor with global step of the model.
optimizer: Name of the optimizer class (SGD, Adam, Adagrad) or class.
"""
self.loss = loss
self.global_step = global_step
self._learning_rate = tf.get_variable(
"learning_rate",
[],
initializer=tf.constant_initializer(learning_rate))
params = tf.trainable_variables()
self.gradients = tf.gradients(loss, params)
if clip_gradients > 0.0:
self.gradients, self.gradients_norm = tf.clip_by_global_norm(
self.gradients, clip_gradients)
grads_and_vars = zip(self.gradients, params)
if isinstance(optimizer, str):
self._optimizer = OPTIMIZER_CLS_NAMES[
optimizer](self._learning_rate)
else:
self._optimizer = optimizer(self.learning_rate)
self.trainer = self._optimizer.apply_gradients(grads_and_vars,
global_step=global_step,
name="train")
# Get all initializers for all trainable variables.
self._initializers = tf.initialize_all_variables()
示例5: create_gen_train_op
def create_gen_train_op(hparams, learning_rate, gen_loss, global_step, mode):
"""Create Generator train op."""
del hparams
with tf.name_scope('train_generator'):
if FLAGS.generator_optimizer == 'sgd':
gen_optimizer = tf.train.GradientDescentOptimizer(learning_rate)
elif FLAGS.generator_optimizer == 'adam':
gen_optimizer = tf.train.AdamOptimizer(learning_rate)
else:
raise NotImplementedError
gen_vars = [
v for v in tf.trainable_variables() if v.op.name.startswith('gen')
]
print('Optimizing Generator vars.')
for v in gen_vars:
print(v)
if mode == 'MINIMIZE':
gen_grads = tf.gradients(gen_loss, gen_vars)
elif mode == 'MAXIMIZE':
gen_grads = tf.gradients(-gen_loss, gen_vars)
else:
raise ValueError("Must be one of 'MINIMIZE' or 'MAXIMIZE'")
gen_grads_clipped, _ = tf.clip_by_global_norm(gen_grads,
FLAGS.grad_clipping)
gen_train_op = gen_optimizer.apply_gradients(
zip(gen_grads_clipped, gen_vars), global_step=global_step)
return gen_train_op, gen_grads_clipped, gen_vars
示例6: apply_gradients
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""Applying gradients and tune hyperparams with YellowFin.
Args:
grads_and_vars: List of (gradient, variable) pairs as returned by
compute_gradients().
global_step: Optional Variable to increment by one after the
variables have been updated.
name: Optional name for the returned operation. Default to the
name passed to the Optimizer constructor.
Returns:
(A group of operations)
Variable Update with Momentum ops,
YellowFin ops(Curvature, Variance, Distance) ops,
SingleStep and lr_mu tuning ops,
Step increment ops.
"""
self._grad, self._vars = zip(*[(g, t)
for g, t in grads_and_vars if g is not None])
# Var update with Momentum.
with tf.variable_scope("apply_updates"):
# Gradient Clipping?
if self._clip_thresh_var is not None:
self._grad, _ = tf.clip_by_global_norm(
self._grad, self._clip_thresh_var)
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
else:
apply_grad_op = self._momentum_optimizer.apply_gradients(
zip(self._grad, self._vars),
global_step=global_step,
name=name)
# Begin lr and mu tuning.
with tf.variable_scope("prepare_yellowFin_variables"):
# the dependencies ideally only need to be after clip is done,
# i.e. depends on self._grads. However, the control_dependencies
# does not support indexed slice for sparse gradients.
# The alternative dependencies here might be slightly slower due
# to less parallelization.
with tf.control_dependencies([apply_grad_op,]):
prepare_variables_op = self._prepare_variables()
with tf.variable_scope("yellowfin"):
with tf.control_dependencies([prepare_variables_op]):
yellowfin_op = self._yellowfin()
# Update YellowFin step variable.
with tf.control_dependencies([yellowfin_op]):
self._increment_step_op = tf.assign_add(self._step, 1).op
return tf.group(apply_grad_op,
prepare_variables_op,
yellowfin_op,
self._increment_step_op)
示例7: build_rmsprop_optimizer
def build_rmsprop_optimizer(self, learning_rate, rmsprop_decay, rmsprop_constant, gradient_clip, version):
with tf.name_scope('rmsprop'):
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
grads_and_vars = optimizer.compute_gradients(self.loss)
grads = [gv[0] for gv in grads_and_vars]
params = [gv[1] for gv in grads_and_vars]
if gradient_clip > 0:
grads = tf.clip_by_global_norm(grads, gradient_clip)
if version == 'rmsprop':
return optimizer.apply_gradients(zip(grads, params))
elif version == 'graves_rmsprop':
square_grads = [tf.square(grad) for grad in grads]
avg_grads = [tf.Variable(tf.ones(var.get_shape())) for var in params]
avg_square_grads = [tf.Variable(tf.ones(var.get_shape())) for var in params]
update_avg_grads = [grad_pair[0].assign((rmsprop_decay * grad_pair[0]) + ((1 - rmsprop_decay) * grad_pair[1]))
for grad_pair in zip(avg_grads, grads)]
update_avg_square_grads = [grad_pair[0].assign((rmsprop_decay * grad_pair[0]) + ((1 - rmsprop_decay) * tf.square(grad_pair[1])))
for grad_pair in zip(avg_square_grads, grads)]
avg_grad_updates = update_avg_grads + update_avg_square_grads
rms = [tf.sqrt(avg_grad_pair[1] - tf.square(avg_grad_pair[0]) + rmsprop_constant)
for avg_grad_pair in zip(avg_grads, avg_square_grads)]
rms_updates = [grad_rms_pair[0] / grad_rms_pair[1] for grad_rms_pair in zip(grads, rms)]
train = optimizer.apply_gradients(zip(rms_updates, params))
return tf.group(train, tf.group(*avg_grad_updates))
示例8: make_train_op
def make_train_op(local_net, global_net):
"""
Use gradients from local network to update the global network
"""
# Idea:
# We want a list of gradients and corresponding variables
# e.g. [[g1, g2, g3], [v1, v2, v3]]
# Since that's what the optimizer expects.
# But we would like the gradients to come from the local network
# And the variables to come from the global network
# So we want to make a list like this:
# [[local_g1, local_g2, local_g3], [global_v1, global_v2, global_v3]]
# First get only the gradients
local_grads, _ = zip(*local_net.grads_and_vars)
# Clip gradients to avoid large values
local_grads, _ = tf.clip_by_global_norm(local_grads, 5.0)
# Get global vars
_, global_vars = zip(*global_net.grads_and_vars)
# Combine local grads and global vars
local_grads_global_vars = list(zip(local_grads, global_vars))
# Run a gradient descent step, e.g.
# var = var - learning_rate * grad
return global_net.optimizer.apply_gradients(
local_grads_global_vars,
global_step=tf.train.get_global_step())
示例9: __init__
def __init__(self, model, optimizer, learning_rate, clip_gradients=5.0):
"""Build a trainer part of graph.
Args:
model: Model object, that has loss and global_step attributes.
optimizer: Name of the optimizer class (SGD, Adam, Adagrad) or class.
"""
self.model = model
self._learning_rate = tf.get_variable(
"learning_rate",
[],
initializer=tf.constant_initializer(learning_rate))
params = tf.trainable_variables()
self.gradients = tf.gradients(model.loss, params)
if clip_gradients > 0.0:
self.gradients, self.gradients_norm = tf.clip_by_global_norm(
self.gradients, clip_gradients)
grads_and_vars = zip(self.gradients, params)
if isinstance(optimizer, str):
self._optimizer = OPTIMIZER_CLS_NAMES[optimizer](self._learning_rate)
else:
self._optimizer = optimizer(self.learning_rate)
self.trainer = self._optimizer.apply_gradients(grads_and_vars,
global_step=model.global_step,
name="train")
# Get all initializers for all trainable variables.
self._initializers = tf.initialize_all_variables()
示例10: defineTensorGradientDescent
def defineTensorGradientDescent(self):
self._learningRate = tf.Variable(0.0, trainable=False)
trainingVars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self.cost, trainingVars),self.config.max_grad_norm)
optimizer = tf.train.AdamOptimizer(self.learningRate)
self._tensorGradientDescentTrainingOperation = optimizer.apply_gradients(zip(grads, trainingVars))
示例11: __init__
def __init__(self, vocab_size, size, num_layers, max_gradient_norm, batch_size, learning_rate,
learning_rate_decay_factor, dropout, forward_only=False):
self.size = size
self.vocab_size = vocab_size
self.batch_size = batch_size
self.num_layers = num_layers
self.keep_prob = 1.0 - dropout
self.learning_rate = tf.Variable(float(learning_rate), trainable=False)
self.learning_rate_decay_op = self.learning_rate.assign(self.learning_rate * learning_rate_decay_factor)
self.global_step = tf.Variable(0, trainable=False)
self.source_tokens = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="source_tokens")
self.target_tokens = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="target_tokens")
self.source_mask = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="source_mask")
self.target_mask = tf.placeholder(tf.int32, shape=[None, self.batch_size], name="target_mask")
self.source_length = tf.reduce_sum(self.source_mask, reduction_indices=0)
self.target_length = tf.reduce_sum(self.target_mask, reduction_indices=0)
self.setup_embeddings()
self.setup_encoder()
self.setup_decoder()
self.setup_loss()
params = tf.trainable_variables()
if not forward_only:
opt = tf.train.AdamOptimizer(self.learning_rate)
gradients = tf.gradients(self.losses, params)
clipped_gradients, norm = tf.clip_by_global_norm(gradients, max_gradient_norm)
self.gradient_norms = norm
self.updates = opt.apply_gradients(
zip(clipped_gradients, params), global_step=self.global_step)
self.saver = tf.train.Saver(tf.all_variables())
示例12: training
def training(hypes, loss, global_step, learning_rate, opt=None):
"""Sets up the training Ops.
Creates a summarizer to track the loss over time in TensorBoard.
Creates an optimizer and applies the gradients to all trainable variables.
The Op returned by this function is what must be passed to the
`sess.run()` call to cause the model to train.
Args:
loss: Loss tensor, from loss().
global_step: Integer Variable counting the number of training steps
processed.
learning_rate: The learning rate to use for gradient descent.
Returns:
train_op: The Op for training.
"""
# Add a scalar summary for the snapshot loss.''
sol = hypes["solver"]
hypes['tensors'] = {}
hypes['tensors']['global_step'] = global_step
total_loss = loss['total_loss']
with tf.name_scope('training'):
if opt is None:
if sol['opt'] == 'RMS':
opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate,
decay=0.9,
epsilon=sol['epsilon'])
elif sol['opt'] == 'Adam':
opt = tf.train.AdamOptimizer(learning_rate=learning_rate,
epsilon=sol['adam_eps'])
elif sol['opt'] == 'SGD':
lr = learning_rate
opt = tf.train.GradientDescentOptimizer(learning_rate=lr)
else:
raise ValueError('Unrecognized opt type')
hypes['opt'] = opt
grads_and_vars = opt.compute_gradients(total_loss)
if hypes['clip_norm'] > 0:
grads, tvars = zip(*grads_and_vars)
clip_norm = hypes["clip_norm"]
clipped_grads, norm = tf.clip_by_global_norm(grads, clip_norm)
grads_and_vars = zip(clipped_grads, tvars)
train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = opt.apply_gradients(grads_and_vars,
global_step=global_step)
return train_op
示例13: fit
def fit(self, data_function):
with tf.Graph().as_default(), tf.Session() as sess:
n, s, p = data_function.train.X.shape
X_pl = tf.placeholder(tf.float32, [self.batch_size, s, p])
Y_pl = tf.placeholder(tf.float32, [self.batch_size, p])
lstm_cell = rnn_cell.BasicLSTMCell(self.hidden_size)
cell = tf.nn.rnn_cell.MultiRNNCell([lstm_cell] * self.num_layers)
outputs, _ = rnn.rnn(cell, [X_pl[:,i,:] for i in xrange(s)],
dtype = tf.float32)
softmax_w = tf.get_variable("softmax_w", [self.hidden_size, p])
softmax_b = tf.get_variable("softmax_b", [p])
logits = tf.matmul(outputs[-1], softmax_w) + softmax_b
loss = loss_dict['ce'](logits, Y_pl)
tvars = tf.trainable_variables()
print([i.get_shape() for i in tvars])
grads, _ = tf.clip_by_global_norm(tf.gradients(loss,
tvars), self.max_grad_norm)
optimizer = tf.train.AdamOptimizer()
train_op = optimizer.apply_gradients(zip(grads, tvars))
initializer = tf.random_uniform_initializer(-self.init_scale,
self.init_scale)
tf.initialize_all_variables().run()
for i in xrange(self.n_step):
batch_xs, batch_ys = data_function.train.next_batch(
self.batch_size)
feed_dict = {X_pl: batch_xs, Y_pl: batch_ys}
_, loss_value = sess.run([train_op, loss],
feed_dict = feed_dict)
if i % 100 == 0:
PrintMessage(data_function.train.epochs_completed,
loss_value , 0, 0)
示例14: create_optimizer
def create_optimizer(loss, init_lr, num_train_steps, num_warmup_steps, use_tpu):
"""Creates an optimizer training op."""
global_step = tf.train.get_or_create_global_step()
learning_rate = tf.constant(value=init_lr, shape=[], dtype=tf.float32)
# Implements linear decay of the learning rate.
learning_rate = tf.train.polynomial_decay(
learning_rate,
global_step,
num_train_steps,
end_learning_rate=0.0,
power=1.0,
cycle=False)
# Implements linear warmup. I.e., if global_step < num_warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
if num_warmup_steps:
global_steps_int = tf.cast(global_step, tf.int32)
warmup_steps_int = tf.constant(num_warmup_steps, dtype=tf.int32)
global_steps_float = tf.cast(global_steps_int, tf.float32)
warmup_steps_float = tf.cast(warmup_steps_int, tf.float32)
warmup_percent_done = global_steps_float / warmup_steps_float
warmup_learning_rate = init_lr * warmup_percent_done
is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32)
learning_rate = (
(1.0 - is_warmup) * learning_rate + is_warmup * warmup_learning_rate)
# It is recommended that you use this optimizer for fine tuning, since this
# is how the model was trained (note that the Adam m/v variables are NOT
# loaded from init_checkpoint.)
optimizer = AdamWeightDecayOptimizer(
learning_rate=learning_rate,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=["LayerNorm", "layer_norm", "bias"])
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
tvars = tf.trainable_variables()
grads = tf.gradients(loss, tvars)
# This is how the model was pre-trained.
(grads, _) = tf.clip_by_global_norm(grads, clip_norm=1.0)
train_op = optimizer.apply_gradients(
zip(grads, tvars), global_step=global_step)
# Normally the global step update is done inside of `apply_gradients`.
# However, `AdamWeightDecayOptimizer` doesn't do this. But if you use
# a different optimizer, you should probably take this line out.
new_global_step = global_step + 1
train_op = tf.group(train_op, [global_step.assign(new_global_step)])
return train_op
示例15: training_ops
def training_ops(self, loss):
opt = self.get_optimizer()
params = tf.trainable_variables()
gradients = tf.gradients(loss, params)
clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
return opt.apply_gradients(zip(clipped_gradients, params),
global_step=self.global_step)