當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.clip_by_global_norm方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.clip_by_global_norm方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.clip_by_global_norm方法的具體用法?Python v1.clip_by_global_norm怎麽用?Python v1.clip_by_global_norm使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.clip_by_global_norm方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: clip_gradients_in_scope

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def clip_gradients_in_scope(grads_and_vars, scope, max_grad_norm):
  """DOC."""
  if max_grad_norm == 0:
    return grads_and_vars
  else:
    grads_in_scope = []
    vars_in_scope = []
    for grad, var in grads_and_vars:
      if is_var_in_scope(var, scope):
        grads_in_scope.append(grad)
        vars_in_scope.append(var)
    clipped_grads_in_scope, _ = tf.clip_by_global_norm(
        grads_in_scope, max_grad_norm)
    new_grads_and_vars = []
    for grad, var in grads_and_vars:
      if vars_in_scope and var is vars_in_scope[0]:
        new_grads_and_vars.append((clipped_grads_in_scope.pop(0),
                                   vars_in_scope.pop(0)))
      else:
        new_grads_and_vars.append((grad, var))
    return new_grads_and_vars 
開發者ID:deepmind,項目名稱:lamb,代碼行數:23,代碼來源:utils.py

示例2: preprocess_record_impl

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def preprocess_record_impl(self, params, record):
    """Clips the l2 norm, returning the clipped record and the l2 norm.

    Args:
      params: The parameters for the sample.
      record: The record to be processed.

    Returns:
      A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is
        the structure of preprocessed tensors, and l2_norm is the total l2 norm
        before clipping.
    """
    l2_norm_clip = params
    record_as_list = tf.nest.flatten(record)
    clipped_as_list, norm = tf.clip_by_global_norm(record_as_list, l2_norm_clip)
    return tf.nest.pack_sequence_as(record, clipped_as_list), norm 
開發者ID:tensorflow,項目名稱:privacy,代碼行數:18,代碼來源:gaussian_query.py

示例3: _make_training_step

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def _make_training_step(self, loss: tf.Tensor) -> tf.Tensor:
        """
        Constructs a trainig step from the loss parameter and hyperparameters.
        """
        optimizer_name = self.hyperparameters["optimizer"].lower()
        if optimizer_name == "sgd":
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate=self.hyperparameters["learning_rate"]
            )
        elif optimizer_name == "rmsprop":
            optimizer = tf.train.RMSPropOptimizer(
                learning_rate=self.hyperparameters["learning_rate"],
                decay=self.hyperparameters["learning_rate_decay"],
                momentum=self.hyperparameters["momentum"],
            )
        elif optimizer_name == "adam":
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.hyperparameters["learning_rate"]
            )
        else:
            raise Exception(
                'Unknown optimizer "%s".' % (self.hyperparameters["optimizer"])
            )

        # Calculate and clip gradients
        trainable_vars = self._sess.graph.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES
        )
        gradients = tf.gradients(loss, trainable_vars)
        clipped_gradients, _ = tf.clip_by_global_norm(
            gradients, self.hyperparameters["gradient_clip_value"]
        )
        pruned_clipped_gradients = []
        for (gradient, trainable_var) in zip(clipped_gradients, trainable_vars):
            if gradient is None:
                continue
            pruned_clipped_gradients.append((gradient, trainable_var))
        return optimizer.apply_gradients(pruned_clipped_gradients) 
開發者ID:microsoft,項目名稱:machine-learning-for-programming-samples,代碼行數:40,代碼來源:model_tf1.py

示例4: _add_optimize_op

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def _add_optimize_op(self, loss):
    """Add ops for training."""
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.Variable(self.learning_rate, trainable=False)
    tvars = tf.trainable_variables()
    grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars),
                                      self.max_grad_norm)
    opt = tf.train.AdamOptimizer(learning_rate)
    opt_step = opt.apply_gradients(zip(grads, tvars),
                                   global_step=global_step)
    return opt_step 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:13,代碼來源:robust_model.py

示例5: config_model_training

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def config_model_training(self, model, labels_ph, params=None):
    model.loss = nql.nonneg_crossentropy(model.predicted_y, labels_ph)
    logging.info('learning rate %f', FLAGS.learning_rate)
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    if FLAGS.gradient_clip > 0:
      logging.info('clipping gradients to %f', FLAGS.gradient_clip)
      gradients, variables = zip(*optimizer.compute_gradients(loss=model.loss))
      gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
      model.train_op = optimizer.apply_gradients(
          zip(gradients, variables), global_step=tf.train.get_global_step())
    else:
      logging.info('no gradient clipping')
      model.train_op = optimizer.minimize(
          loss=model.loss, global_step=tf.train.get_global_step()) 
開發者ID:google-research,項目名稱:language,代碼行數:16,代碼來源:nell995.py

示例6: config_model_training

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def config_model_training(self, model, labels_ph, params=None):
    model.loss = nql.nonneg_crossentropy(model.predicted_y, labels_ph)
    logging.info('learning rate %f', FLAGS.learning_rate)
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    # clip gradients
    if FLAGS.gradient_clip > 0:
      logging.info('clipping gradients to %f', FLAGS.gradient_clip)
      gradients, variables = zip(*optimizer.compute_gradients(loss=model.loss))
      gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
      model.train_op = optimizer.apply_gradients(
          zip(gradients, variables), global_step=tf.train.get_global_step())
    else:
      logging.info('no gradient clipping')
      model.train_op = optimizer.minimize(
          loss=model.loss, global_step=tf.train.get_global_step()) 
開發者ID:google-research,項目名稱:language,代碼行數:17,代碼來源:metaqa.py

示例7: apply_gradients

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    """Applying gradients and tune hyperparams with YellowFin.

    Args:
      grads_and_vars: List of (gradient, variable) pairs as returned by
        compute_gradients().
      global_step: Optional Variable to increment by one after the
        variables have been updated.
      name:  Optional name for the returned operation. Default to the
        name passed to the Optimizer constructor.

    Returns:
        (A group of operations)
        Variable Update with Momentum ops,
        YellowFin ops(Curvature, Variance, Distance) ops,
        SingleStep and lr_mu tuning ops,
        Step increment ops.
    """
    self._grad, self._vars = zip(*[(g, t)
                                   for g, t in grads_and_vars if g is not None])

    # Var update with Momentum.
    with tf.variable_scope("apply_updates"):
      # Gradient Clipping?
      if self._clip_thresh_var is not None:
        self._grad, _ = tf.clip_by_global_norm(
            self._grad, self._clip_thresh_var)

        apply_grad_op = self._momentum_optimizer.apply_gradients(
            zip(self._grad, self._vars),
            global_step=global_step,
            name=name)
      else:
        apply_grad_op = self._momentum_optimizer.apply_gradients(
            zip(self._grad, self._vars),
            global_step=global_step,
            name=name)

    # Begin lr and mu tuning.
    with tf.variable_scope("prepare_yellowFin_variables"):
      # the dependencies ideally only need to be after clip is done,
      # i.e. depends on self._grads. However, the control_dependencies
      # does not support indexed slice for sparse gradients.
      # The alternative dependencies here might be slightly slower due
      # to less parallelization.
      with tf.control_dependencies([apply_grad_op,]):
        prepare_variables_op = self._prepare_variables()

    with tf.variable_scope("yellowfin"):
      with tf.control_dependencies([prepare_variables_op]):
        yellowfin_op = self._yellowfin()

    # Update YellowFin step variable.
    with tf.control_dependencies([yellowfin_op]):
      self._increment_step_op = tf.assign_add(self._step, 1).op

    return tf.group(apply_grad_op,
                    prepare_variables_op,
                    yellowfin_op,
                    self._increment_step_op) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:62,代碼來源:yellowfin.py

示例8: adam

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def adam(params, grads, lr, schedule, t_total,
         b1=0.9,
         b2=0.999,
         e=1e-8,
         weight_decay=1e-2,
         bias_l2=True,
         max_grad_norm=1.):
  """Custom Adam optimzizer for weight decay and learning rate schedule.

  Implementation adapted from https://github.com/openai/finetune-transformer-lm.

  Args:
    params: Parameters to be optimzed.
    grads: Gradients.
    lr: learning rate.
    schedule: Type of learning rate scheduling
    t_total: Total training steps.
    b1: beta_1.
    b2: beta_2.
    e: epsilon.
    weight_decay: Weight decay coefficient.
    bias_l2: Pose l2 penalty on bias parameters or not.
    max_grad_norm: Norm of gradient ot be clipped to.

  Returns:
    A list of update operations.
  """
  t = tf.train.get_global_step()
  tt = t + 1
  updates = [t.assign(tt)]
  if max_grad_norm > 0:
    grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
  for p, g in zip(params, grads):
    if p is None or g is None:
      print("can't train", p.name, g)
    else:
      if isinstance(g, tf.IndexedSlices):
        g = tf.convert_to_tensor(g)

      # past 1st moment vector; same shape as p.
      m = tf.Variable(p * 0., dtype=tf.float32, trainable=False)

      # past 2nd moment vector; same shape as p.
      v = tf.Variable(p * 0., dtype=tf.float32, trainable=False)
      lrt = lr * tf.sqrt(1 - b2**(tf.cast(tt, tf.float32))) / \
          (1 - b1**(tf.cast(tt, tf.float32)))
      lrt *= schedule(tf.cast(t, tf.float32)/t_total)

      # new 1st moment vector; same shape as p.
      mt = b1 * m + (1 - b1) * g

      # new 2nd moment vector; same shape as p.
      vt = b2 * v + (1 - b2) * g * g

      if (len(p.get_shape()) > 1 or bias_l2) and weight_decay > 0:
        pt = p - lrt * (mt / (tf.sqrt(vt) + e) + weight_decay * p)
      else:
        pt = p - lrt * (mt / (tf.sqrt(vt) + e))
      updates.extend([m.assign(mt), v.assign(vt), p.assign(pt)])
  return tf.group(*updates) 
開發者ID:google-research,項目名稱:language,代碼行數:62,代碼來源:adam.py

示例9: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 別名]
def __init__(
        self,
        obs_spec: Spec,
        act_spec: Spec,
        model_fn: ModelBuilder=None,
        policy_cls: PolicyType=None,
        sess_mgr: SessionManager=None,
        optimizer: tf.train.Optimizer=None,
        value_coef=DEFAULTS['value_coef'],
        entropy_coef=DEFAULTS['entropy_coef'],
        traj_len=DEFAULTS['traj_len'],
        batch_sz=DEFAULTS['batch_sz'],
        discount=DEFAULTS['discount'],
        gae_lambda=DEFAULTS['gae_lambda'],
        clip_rewards=DEFAULTS['clip_rewards'],
        clip_grads_norm=DEFAULTS['clip_grads_norm'],
        normalize_returns=DEFAULTS['normalize_returns'],
        normalize_advantages=DEFAULTS['normalize_advantages'],
    ):
        MemoryAgent.__init__(self, obs_spec, act_spec, traj_len, batch_sz)

        if not sess_mgr:
            sess_mgr = SessionManager()

        if not optimizer:
            optimizer = tf.train.AdamOptimizer(learning_rate=DEFAULTS['learning_rate'])

        self.sess_mgr = sess_mgr
        self.value_coef = value_coef
        self.entropy_coef = entropy_coef
        self.discount = discount
        self.gae_lambda = gae_lambda
        self.clip_rewards = clip_rewards
        self.normalize_returns = normalize_returns
        self.normalize_advantages = normalize_advantages

        self.model = model_fn(obs_spec, act_spec)
        self.value = self.model.outputs[-1]
        self.policy = policy_cls(act_spec, self.model.outputs[:-1])
        self.loss_op, self.loss_terms, self.loss_inputs = self.loss_fn()

        grads, vars = zip(*optimizer.compute_gradients(self.loss_op))
        self.grads_norm = tf.global_norm(grads)
        if clip_grads_norm > 0.:
            grads, _ = tf.clip_by_global_norm(grads, clip_grads_norm, self.grads_norm)
        self.train_op = optimizer.apply_gradients(zip(grads, vars), global_step=sess_mgr.global_step)
        self.minimize_ops = self.make_minimize_ops()

        sess_mgr.restore_or_init()
        self.n_batches = sess_mgr.start_step
        self.start_step = sess_mgr.start_step * traj_len

        self.logger = Logger() 
開發者ID:inoryy,項目名稱:reaver,代碼行數:55,代碼來源:actor_critic.py


注:本文中的tensorflow.compat.v1.clip_by_global_norm方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。