本文整理汇总了Python中tensorflow.global_norm方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.global_norm方法的具体用法?Python tensorflow.global_norm怎么用?Python tensorflow.global_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.global_norm方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _add_gradients_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.summary.histogram(var.op.name + ':gradient',
grad_values))
summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
示例2: _update_value_step
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def _update_value_step(self, observ, reward, length):
"""Compute the current value loss and perform a gradient update step.
Args:
observ: Sequences of observations.
reward: Sequences of reward.
length: Batch of sequence lengths.
Returns:
Tuple of loss tensor and summary tensor.
"""
loss, summary = self._value_loss(observ, reward, length)
gradients, variables = (
zip(*self._value_optimizer.compute_gradients(loss)))
optimize = self._value_optimizer.apply_gradients(
zip(gradients, variables))
summary = tf.summary.merge([
summary,
tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
utility.gradient_summaries(
zip(gradients, variables), dict(value=r'.*'))])
with tf.control_dependencies([optimize]):
return [tf.identity(loss), tf.identity(summary)]
示例3: __init__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def __init__(self, state_size, action_size, lr,
name, n_h1=400, n_h2=300, global_name='global'):
self.state_size = state_size
self.action_size = action_size
self.name = name
self.n_h1 = n_h1
self.n_h2 = n_h2
self.optimizer = tf.train.AdamOptimizer(lr)
self.input_s, self.input_a, self.advantage, self.target_v, self.policy, self.value, self.action_est, self.model_variables = self._build_network(
name)
# 0.5, 0.2, 1.0
self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))
self.entropy_loss = 1.0 * tf.reduce_sum(self.policy * tf.log(self.policy))
self.policy_loss = 1.0 * tf.reduce_sum(-tf.log(self.action_est) * self.advantage)
self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in self.model_variables])
# self.loss = 0.5 * self.value_loss + self.policy_loss + 0.2 * self.entropy_loss
self.loss = self.value_loss + self.policy_loss + self.entropy_loss
self.gradients = tf.gradients(self.loss, self.model_variables)
if name != global_name:
self.var_norms = tf.global_norm(self.model_variables)
global_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, global_name)
self.apply_gradients = self.optimizer.apply_gradients(zip(self.gradients, global_variables))
示例4: grad_clip_fn
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def grad_clip_fn(self, loss, tvars, **kargs):
grads = tf.gradients(loss, tvars)
grad_clip = self.config.get("grad_clip", "global_norm")
tf.logging.info(" gradient clip method {}".format(grad_clip))
if grad_clip == "global_norm":
clip_norm = self.config.get("clip_norm", 1.0)
[grads, _] = tf.clip_by_global_norm(grads,
clip_norm=clip_norm)
elif grad_clip == "norm":
clip_norm = self.config.get("clip_norm", 1.0)
grads = [tf.clip_by_norm(grad, clip_norm) for grad in grads]
elif grad_clip == "value":
clip_min_value = self.config.get("clip_min_value", -1.0)
clip_max_value = self.config.get("clip_max_value", 1.0)
grads = [tf.clip_by_value(grad, clip_norm) for grad in grads]
else:
grads = grads
return grads
示例5: _add_grads_and_vars_to_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def _add_grads_and_vars_to_summaries(model_results: ModelResults):
if model_results.grads_and_vars is not None:
for grad, var in model_results.grads_and_vars:
grad_name = ('gradient/' + var.name).replace(':', '_')
model_utils.add_histogram_summary(grad_name, grad)
grad_norm = tf.norm(grad)
grad_norm_name = "gradient_l2_norms/scalar_" + grad_name
model_utils.add_summary_by_name(grad_norm_name, grad_norm)
all_grads = list(zip(*model_results.grads_and_vars))[0]
global_grad_norm = tf.global_norm(all_grads)
global_norm_name = "_".join(["scalar", "global_gradient_l2_norm"])
model_utils.add_summary_by_name(global_norm_name, global_grad_norm)
if model_results.regularization_grads_and_vars is not None:
for grad, var in model_results.regularization_grads_and_vars:
grad_name = ('reg_gradient/' + var.name).replace(':', '_')
model_utils.add_histogram_summary(grad_name, grad)
示例6: create_train_op
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def create_train_op(loss, optimizer, global_step, params):
with tf.name_scope("create_train_op"):
grads_and_vars = optimizer.compute_gradients(
loss, colocate_gradients_with_ops=True)
gradients = [item[0] for item in grads_and_vars]
variables = [item[1] for item in grads_and_vars]
# Add summaries
tf.summary.scalar("loss", loss)
tf.summary.scalar("global_norm/gradient_norm",
tf.global_norm(gradients))
# Gradient clipping
if isinstance(params.clip_grad_norm or None, float) and params.clip_grad_norm > 0:
gradients, _ = tf.clip_by_global_norm(gradients,
params.clip_grad_norm)
# Update variables
grads_and_vars = list(zip(gradients, variables))
train_op = optimizer.apply_gradients(grads_and_vars, global_step)
return loss, train_op
示例7: clip_by_global_norm_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables):
# wrapper around tf.clip_by_global_norm that also does summary ops of norms
# compute norms
# use global_norm with one element to handle IndexedSlices vs dense
norms = [tf.global_norm([t]) for t in t_list]
# summary ops before clipping
summary_ops = []
for ns, v in zip(norms, variables):
name = 'norm_pre_clip/' + v.name.replace(":", "_")
summary_ops.append(tf.summary.scalar(name, ns))
# clip
clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm)
# summary ops after clipping
norms_post = [tf.global_norm([t]) for t in clipped_t_list]
for ns, v in zip(norms_post, variables):
name = 'norm_post_clip/' + v.name.replace(":", "_")
summary_ops.append(tf.summary.scalar(name, ns))
summary_ops.append(tf.summary.scalar(norm_name, tf_norm))
return clipped_t_list, tf_norm, summary_ops
示例8: _add_gradients_summaries
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def _add_gradients_summaries(grads_and_vars):
"""Add histogram summaries to gradients.
Note: The summaries are also added to the SUMMARIES collection.
Args:
grads_and_vars: A list of gradient to variable pairs (tuples).
Returns:
The _list_ of the added summaries for grads_and_vars.
"""
summaries = []
for grad, var in grads_and_vars:
if grad is not None:
if isinstance(grad, tf.IndexedSlices):
grad_values = grad.values
else:
grad_values = grad
summaries.append(tf.histogram_summary(var.op.name + ':gradient',
grad_values))
summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm',
tf.global_norm([grad_values])))
else:
tf.logging.info('Var %s has no gradient', var.op.name)
return summaries
示例9: clip_by_global_norm_summary
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import global_norm [as 别名]
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables):
# wrapper around tf.clip_by_global_norm that also does summary ops of norms
# compute norms
# use global_norm with one element to handle IndexedSlices vs dense
norms = [tf.global_norm([t]) for t in t_list]
# summary ops before clipping
summary_ops = []
for ns, v in zip(norms, variables):
name = 'norm_pre_clip/' + v.name
summary_ops.append(tf.summary.scalar(name, ns))
# clip
clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm)
# summary ops after clipping
norms_post = [tf.global_norm([t]) for t in clipped_t_list]
for ns, v in zip(norms_post, variables):
name = 'norm_post_clip/' + v.name
summary_ops.append(tf.summary.scalar(name, ns))
summary_ops.append(tf.summary.scalar(norm_name, tf_norm))
return clipped_t_list, tf_norm, summary_ops