當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow.global_norm方法代碼示例

本文整理匯總了Python中tensorflow.global_norm方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow.global_norm方法的具體用法?Python tensorflow.global_norm怎麽用?Python tensorflow.global_norm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.global_norm方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _add_gradients_summaries

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def _add_gradients_summaries(grads_and_vars):
  """Add histogram summaries to gradients.

  Note: The summaries are also added to the SUMMARIES collection.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The _list_ of the added summaries for grads_and_vars.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, tf.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(tf.summary.histogram(var.op.name + ':gradient',
                                            grad_values))
      summaries.append(tf.summary.histogram(var.op.name + ':gradient_norm',
                                            tf.global_norm([grad_values])))
    else:
      tf.logging.info('Var %s has no gradient', var.op.name)
  return summaries 
開發者ID:ringringyi,項目名稱:DOTA_models,代碼行數:27,代碼來源:model_deploy.py

示例2: _update_value_step

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def _update_value_step(self, observ, reward, length):
    """Compute the current value loss and perform a gradient update step.

    Args:
      observ: Sequences of observations.
      reward: Sequences of reward.
      length: Batch of sequence lengths.

    Returns:
      Tuple of loss tensor and summary tensor.
    """
    loss, summary = self._value_loss(observ, reward, length)
    gradients, variables = (
        zip(*self._value_optimizer.compute_gradients(loss)))
    optimize = self._value_optimizer.apply_gradients(
        zip(gradients, variables))
    summary = tf.summary.merge([
        summary,
        tf.summary.scalar('gradient_norm', tf.global_norm(gradients)),
        utility.gradient_summaries(
            zip(gradients, variables), dict(value=r'.*'))])
    with tf.control_dependencies([optimize]):
      return [tf.identity(loss), tf.identity(summary)] 
開發者ID:utra-robosoccer,項目名稱:soccer-matlab,代碼行數:25,代碼來源:algorithm.py

示例3: __init__

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def __init__(self, state_size, action_size, lr,
               name, n_h1=400, n_h2=300, global_name='global'):

    self.state_size = state_size
    self.action_size = action_size
    self.name = name
    self.n_h1 = n_h1
    self.n_h2 = n_h2

    self.optimizer = tf.train.AdamOptimizer(lr)
    self.input_s, self.input_a, self.advantage, self.target_v, self.policy, self.value, self.action_est, self.model_variables = self._build_network(
        name)

    # 0.5, 0.2, 1.0
    self.value_loss = 0.5 * tf.reduce_sum(tf.square(self.target_v - tf.reshape(self.value, [-1])))
    self.entropy_loss = 1.0 * tf.reduce_sum(self.policy * tf.log(self.policy))
    self.policy_loss = 1.0 * tf.reduce_sum(-tf.log(self.action_est) * self.advantage)
    self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in self.model_variables])
    # self.loss = 0.5 * self.value_loss + self.policy_loss + 0.2 * self.entropy_loss
    self.loss = self.value_loss + self.policy_loss + self.entropy_loss
    self.gradients = tf.gradients(self.loss, self.model_variables)
    if name != global_name:
      self.var_norms = tf.global_norm(self.model_variables)
      global_variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, global_name)
      self.apply_gradients = self.optimizer.apply_gradients(zip(self.gradients, global_variables)) 
開發者ID:yrlu,項目名稱:reinforcement_learning,代碼行數:27,代碼來源:ac_net.py

示例4: grad_clip_fn

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def grad_clip_fn(self, loss, tvars, **kargs):
		grads = tf.gradients(loss, tvars)
		grad_clip = self.config.get("grad_clip", "global_norm")
		tf.logging.info(" gradient clip method {}".format(grad_clip))
		if grad_clip == "global_norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			[grads, _] = tf.clip_by_global_norm(grads, 
								clip_norm=clip_norm)
		elif grad_clip == "norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			grads = [tf.clip_by_norm(grad, clip_norm) for grad in grads]
		elif grad_clip == "value":
			clip_min_value = self.config.get("clip_min_value", -1.0)
			clip_max_value = self.config.get("clip_max_value", 1.0)
			grads = [tf.clip_by_value(grad, clip_norm) for grad in grads]
		else:
			grads = grads
		return grads 
開發者ID:yyht,項目名稱:BERT,代碼行數:20,代碼來源:optimizer.py

示例5: _add_grads_and_vars_to_summaries

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def _add_grads_and_vars_to_summaries(model_results: ModelResults):
        if model_results.grads_and_vars is not None:
            for grad, var in model_results.grads_and_vars:
                grad_name = ('gradient/' + var.name).replace(':', '_')
                model_utils.add_histogram_summary(grad_name, grad)
                grad_norm = tf.norm(grad)
                grad_norm_name = "gradient_l2_norms/scalar_" + grad_name
                model_utils.add_summary_by_name(grad_norm_name, grad_norm)
            all_grads = list(zip(*model_results.grads_and_vars))[0]
            global_grad_norm = tf.global_norm(all_grads)
            global_norm_name = "_".join(["scalar", "global_gradient_l2_norm"])
            model_utils.add_summary_by_name(global_norm_name, global_grad_norm)

        if model_results.regularization_grads_and_vars is not None:
            for grad, var in model_results.regularization_grads_and_vars:
                grad_name = ('reg_gradient/' + var.name).replace(':', '_')
                model_utils.add_histogram_summary(grad_name, grad) 
開發者ID:audi,項目名稱:nucleus7,代碼行數:19,代碼來源:model_handler.py

示例6: create_train_op

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def create_train_op(loss, optimizer, global_step, params):
    with tf.name_scope("create_train_op"):
        grads_and_vars = optimizer.compute_gradients(
            loss, colocate_gradients_with_ops=True)
        gradients = [item[0] for item in grads_and_vars]
        variables = [item[1] for item in grads_and_vars]

        # Add summaries
        tf.summary.scalar("loss", loss)
        tf.summary.scalar("global_norm/gradient_norm",
                          tf.global_norm(gradients))

        # Gradient clipping
        if isinstance(params.clip_grad_norm or None, float) and params.clip_grad_norm > 0:
            gradients, _ = tf.clip_by_global_norm(gradients,
                                                  params.clip_grad_norm)

        # Update variables
        grads_and_vars = list(zip(gradients, variables))
        train_op = optimizer.apply_gradients(grads_and_vars, global_step)

        return loss, train_op 
開發者ID:Imagist-Shuo,項目名稱:UNMT-SPR,代碼行數:24,代碼來源:optimize.py

示例7: clip_by_global_norm_summary

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables):
    # wrapper around tf.clip_by_global_norm that also does summary ops of norms

    # compute norms
    # use global_norm with one element to handle IndexedSlices vs dense
    norms = [tf.global_norm([t]) for t in t_list]

    # summary ops before clipping
    summary_ops = []
    for ns, v in zip(norms, variables):
        name = 'norm_pre_clip/' + v.name.replace(":", "_")
        summary_ops.append(tf.summary.scalar(name, ns))

    # clip 
    clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm)

    # summary ops after clipping
    norms_post = [tf.global_norm([t]) for t in clipped_t_list]
    for ns, v in zip(norms_post, variables):
        name = 'norm_post_clip/' + v.name.replace(":", "_")
        summary_ops.append(tf.summary.scalar(name, ns))

    summary_ops.append(tf.summary.scalar(norm_name, tf_norm))

    return clipped_t_list, tf_norm, summary_ops 
開發者ID:searobbersduck,項目名稱:ELMo_Chin,代碼行數:27,代碼來源:training.py

示例8: _add_gradients_summaries

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def _add_gradients_summaries(grads_and_vars):
  """Add histogram summaries to gradients.

  Note: The summaries are also added to the SUMMARIES collection.

  Args:
    grads_and_vars: A list of gradient to variable pairs (tuples).

  Returns:
    The _list_ of the added summaries for grads_and_vars.
  """
  summaries = []
  for grad, var in grads_and_vars:
    if grad is not None:
      if isinstance(grad, tf.IndexedSlices):
        grad_values = grad.values
      else:
        grad_values = grad
      summaries.append(tf.histogram_summary(var.op.name + ':gradient',
                                            grad_values))
      summaries.append(tf.histogram_summary(var.op.name + ':gradient_norm',
                                            tf.global_norm([grad_values])))
    else:
      tf.logging.info('Var %s has no gradient', var.op.name)
  return summaries 
開發者ID:shiyemin,項目名稱:shuttleNet,代碼行數:27,代碼來源:model_deploy.py

示例9: clip_by_global_norm_summary

# 需要導入模塊: import tensorflow [as 別名]
# 或者: from tensorflow import global_norm [as 別名]
def clip_by_global_norm_summary(t_list, clip_norm, norm_name, variables):
    # wrapper around tf.clip_by_global_norm that also does summary ops of norms

    # compute norms
    # use global_norm with one element to handle IndexedSlices vs dense
    norms = [tf.global_norm([t]) for t in t_list]

    # summary ops before clipping
    summary_ops = []
    for ns, v in zip(norms, variables):
        name = 'norm_pre_clip/' + v.name
        summary_ops.append(tf.summary.scalar(name, ns))

    # clip 
    clipped_t_list, tf_norm = tf.clip_by_global_norm(t_list, clip_norm)

    # summary ops after clipping
    norms_post = [tf.global_norm([t]) for t in clipped_t_list]
    for ns, v in zip(norms_post, variables):
        name = 'norm_post_clip/' + v.name
        summary_ops.append(tf.summary.scalar(name, ns))

    summary_ops.append(tf.summary.scalar(norm_name, tf_norm))

    return clipped_t_list, tf_norm, summary_ops 
開發者ID:kermitt2,項目名稱:delft,代碼行數:27,代碼來源:training.py


注:本文中的tensorflow.global_norm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。