当前位置: 首页>>代码示例>>Python>>正文


Python v1.clip_by_global_norm方法代码示例

本文整理汇总了Python中tensorflow.compat.v1.clip_by_global_norm方法的典型用法代码示例。如果您正苦于以下问题:Python v1.clip_by_global_norm方法的具体用法?Python v1.clip_by_global_norm怎么用?Python v1.clip_by_global_norm使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.clip_by_global_norm方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: clip_gradients_in_scope

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def clip_gradients_in_scope(grads_and_vars, scope, max_grad_norm):
  """DOC."""
  if max_grad_norm == 0:
    return grads_and_vars
  else:
    grads_in_scope = []
    vars_in_scope = []
    for grad, var in grads_and_vars:
      if is_var_in_scope(var, scope):
        grads_in_scope.append(grad)
        vars_in_scope.append(var)
    clipped_grads_in_scope, _ = tf.clip_by_global_norm(
        grads_in_scope, max_grad_norm)
    new_grads_and_vars = []
    for grad, var in grads_and_vars:
      if vars_in_scope and var is vars_in_scope[0]:
        new_grads_and_vars.append((clipped_grads_in_scope.pop(0),
                                   vars_in_scope.pop(0)))
      else:
        new_grads_and_vars.append((grad, var))
    return new_grads_and_vars 
开发者ID:deepmind,项目名称:lamb,代码行数:23,代码来源:utils.py

示例2: preprocess_record_impl

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def preprocess_record_impl(self, params, record):
    """Clips the l2 norm, returning the clipped record and the l2 norm.

    Args:
      params: The parameters for the sample.
      record: The record to be processed.

    Returns:
      A tuple (preprocessed_records, l2_norm) where `preprocessed_records` is
        the structure of preprocessed tensors, and l2_norm is the total l2 norm
        before clipping.
    """
    l2_norm_clip = params
    record_as_list = tf.nest.flatten(record)
    clipped_as_list, norm = tf.clip_by_global_norm(record_as_list, l2_norm_clip)
    return tf.nest.pack_sequence_as(record, clipped_as_list), norm 
开发者ID:tensorflow,项目名称:privacy,代码行数:18,代码来源:gaussian_query.py

示例3: _make_training_step

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def _make_training_step(self, loss: tf.Tensor) -> tf.Tensor:
        """
        Constructs a trainig step from the loss parameter and hyperparameters.
        """
        optimizer_name = self.hyperparameters["optimizer"].lower()
        if optimizer_name == "sgd":
            optimizer = tf.train.GradientDescentOptimizer(
                learning_rate=self.hyperparameters["learning_rate"]
            )
        elif optimizer_name == "rmsprop":
            optimizer = tf.train.RMSPropOptimizer(
                learning_rate=self.hyperparameters["learning_rate"],
                decay=self.hyperparameters["learning_rate_decay"],
                momentum=self.hyperparameters["momentum"],
            )
        elif optimizer_name == "adam":
            optimizer = tf.train.AdamOptimizer(
                learning_rate=self.hyperparameters["learning_rate"]
            )
        else:
            raise Exception(
                'Unknown optimizer "%s".' % (self.hyperparameters["optimizer"])
            )

        # Calculate and clip gradients
        trainable_vars = self._sess.graph.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES
        )
        gradients = tf.gradients(loss, trainable_vars)
        clipped_gradients, _ = tf.clip_by_global_norm(
            gradients, self.hyperparameters["gradient_clip_value"]
        )
        pruned_clipped_gradients = []
        for (gradient, trainable_var) in zip(clipped_gradients, trainable_vars):
            if gradient is None:
                continue
            pruned_clipped_gradients.append((gradient, trainable_var))
        return optimizer.apply_gradients(pruned_clipped_gradients) 
开发者ID:microsoft,项目名称:machine-learning-for-programming-samples,代码行数:40,代码来源:model_tf1.py

示例4: _add_optimize_op

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def _add_optimize_op(self, loss):
    """Add ops for training."""
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.Variable(self.learning_rate, trainable=False)
    tvars = tf.trainable_variables()
    grads, _ = tf.clip_by_global_norm(tf.gradients(loss, tvars),
                                      self.max_grad_norm)
    opt = tf.train.AdamOptimizer(learning_rate)
    opt_step = opt.apply_gradients(zip(grads, tvars),
                                   global_step=global_step)
    return opt_step 
开发者ID:deepmind,项目名称:interval-bound-propagation,代码行数:13,代码来源:robust_model.py

示例5: config_model_training

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def config_model_training(self, model, labels_ph, params=None):
    model.loss = nql.nonneg_crossentropy(model.predicted_y, labels_ph)
    logging.info('learning rate %f', FLAGS.learning_rate)
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    if FLAGS.gradient_clip > 0:
      logging.info('clipping gradients to %f', FLAGS.gradient_clip)
      gradients, variables = zip(*optimizer.compute_gradients(loss=model.loss))
      gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
      model.train_op = optimizer.apply_gradients(
          zip(gradients, variables), global_step=tf.train.get_global_step())
    else:
      logging.info('no gradient clipping')
      model.train_op = optimizer.minimize(
          loss=model.loss, global_step=tf.train.get_global_step()) 
开发者ID:google-research,项目名称:language,代码行数:16,代码来源:nell995.py

示例6: config_model_training

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def config_model_training(self, model, labels_ph, params=None):
    model.loss = nql.nonneg_crossentropy(model.predicted_y, labels_ph)
    logging.info('learning rate %f', FLAGS.learning_rate)
    optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
    # clip gradients
    if FLAGS.gradient_clip > 0:
      logging.info('clipping gradients to %f', FLAGS.gradient_clip)
      gradients, variables = zip(*optimizer.compute_gradients(loss=model.loss))
      gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
      model.train_op = optimizer.apply_gradients(
          zip(gradients, variables), global_step=tf.train.get_global_step())
    else:
      logging.info('no gradient clipping')
      model.train_op = optimizer.minimize(
          loss=model.loss, global_step=tf.train.get_global_step()) 
开发者ID:google-research,项目名称:language,代码行数:17,代码来源:metaqa.py

示例7: apply_gradients

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
    """Applying gradients and tune hyperparams with YellowFin.

    Args:
      grads_and_vars: List of (gradient, variable) pairs as returned by
        compute_gradients().
      global_step: Optional Variable to increment by one after the
        variables have been updated.
      name:  Optional name for the returned operation. Default to the
        name passed to the Optimizer constructor.

    Returns:
        (A group of operations)
        Variable Update with Momentum ops,
        YellowFin ops(Curvature, Variance, Distance) ops,
        SingleStep and lr_mu tuning ops,
        Step increment ops.
    """
    self._grad, self._vars = zip(*[(g, t)
                                   for g, t in grads_and_vars if g is not None])

    # Var update with Momentum.
    with tf.variable_scope("apply_updates"):
      # Gradient Clipping?
      if self._clip_thresh_var is not None:
        self._grad, _ = tf.clip_by_global_norm(
            self._grad, self._clip_thresh_var)

        apply_grad_op = self._momentum_optimizer.apply_gradients(
            zip(self._grad, self._vars),
            global_step=global_step,
            name=name)
      else:
        apply_grad_op = self._momentum_optimizer.apply_gradients(
            zip(self._grad, self._vars),
            global_step=global_step,
            name=name)

    # Begin lr and mu tuning.
    with tf.variable_scope("prepare_yellowFin_variables"):
      # the dependencies ideally only need to be after clip is done,
      # i.e. depends on self._grads. However, the control_dependencies
      # does not support indexed slice for sparse gradients.
      # The alternative dependencies here might be slightly slower due
      # to less parallelization.
      with tf.control_dependencies([apply_grad_op,]):
        prepare_variables_op = self._prepare_variables()

    with tf.variable_scope("yellowfin"):
      with tf.control_dependencies([prepare_variables_op]):
        yellowfin_op = self._yellowfin()

    # Update YellowFin step variable.
    with tf.control_dependencies([yellowfin_op]):
      self._increment_step_op = tf.assign_add(self._step, 1).op

    return tf.group(apply_grad_op,
                    prepare_variables_op,
                    yellowfin_op,
                    self._increment_step_op) 
开发者ID:tensorflow,项目名称:tensor2tensor,代码行数:62,代码来源:yellowfin.py

示例8: adam

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def adam(params, grads, lr, schedule, t_total,
         b1=0.9,
         b2=0.999,
         e=1e-8,
         weight_decay=1e-2,
         bias_l2=True,
         max_grad_norm=1.):
  """Custom Adam optimzizer for weight decay and learning rate schedule.

  Implementation adapted from https://github.com/openai/finetune-transformer-lm.

  Args:
    params: Parameters to be optimzed.
    grads: Gradients.
    lr: learning rate.
    schedule: Type of learning rate scheduling
    t_total: Total training steps.
    b1: beta_1.
    b2: beta_2.
    e: epsilon.
    weight_decay: Weight decay coefficient.
    bias_l2: Pose l2 penalty on bias parameters or not.
    max_grad_norm: Norm of gradient ot be clipped to.

  Returns:
    A list of update operations.
  """
  t = tf.train.get_global_step()
  tt = t + 1
  updates = [t.assign(tt)]
  if max_grad_norm > 0:
    grads, _ = tf.clip_by_global_norm(grads, max_grad_norm)
  for p, g in zip(params, grads):
    if p is None or g is None:
      print("can't train", p.name, g)
    else:
      if isinstance(g, tf.IndexedSlices):
        g = tf.convert_to_tensor(g)

      # past 1st moment vector; same shape as p.
      m = tf.Variable(p * 0., dtype=tf.float32, trainable=False)

      # past 2nd moment vector; same shape as p.
      v = tf.Variable(p * 0., dtype=tf.float32, trainable=False)
      lrt = lr * tf.sqrt(1 - b2**(tf.cast(tt, tf.float32))) / \
          (1 - b1**(tf.cast(tt, tf.float32)))
      lrt *= schedule(tf.cast(t, tf.float32)/t_total)

      # new 1st moment vector; same shape as p.
      mt = b1 * m + (1 - b1) * g

      # new 2nd moment vector; same shape as p.
      vt = b2 * v + (1 - b2) * g * g

      if (len(p.get_shape()) > 1 or bias_l2) and weight_decay > 0:
        pt = p - lrt * (mt / (tf.sqrt(vt) + e) + weight_decay * p)
      else:
        pt = p - lrt * (mt / (tf.sqrt(vt) + e))
      updates.extend([m.assign(mt), v.assign(vt), p.assign(pt)])
  return tf.group(*updates) 
开发者ID:google-research,项目名称:language,代码行数:62,代码来源:adam.py

示例9: __init__

# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import clip_by_global_norm [as 别名]
def __init__(
        self,
        obs_spec: Spec,
        act_spec: Spec,
        model_fn: ModelBuilder=None,
        policy_cls: PolicyType=None,
        sess_mgr: SessionManager=None,
        optimizer: tf.train.Optimizer=None,
        value_coef=DEFAULTS['value_coef'],
        entropy_coef=DEFAULTS['entropy_coef'],
        traj_len=DEFAULTS['traj_len'],
        batch_sz=DEFAULTS['batch_sz'],
        discount=DEFAULTS['discount'],
        gae_lambda=DEFAULTS['gae_lambda'],
        clip_rewards=DEFAULTS['clip_rewards'],
        clip_grads_norm=DEFAULTS['clip_grads_norm'],
        normalize_returns=DEFAULTS['normalize_returns'],
        normalize_advantages=DEFAULTS['normalize_advantages'],
    ):
        MemoryAgent.__init__(self, obs_spec, act_spec, traj_len, batch_sz)

        if not sess_mgr:
            sess_mgr = SessionManager()

        if not optimizer:
            optimizer = tf.train.AdamOptimizer(learning_rate=DEFAULTS['learning_rate'])

        self.sess_mgr = sess_mgr
        self.value_coef = value_coef
        self.entropy_coef = entropy_coef
        self.discount = discount
        self.gae_lambda = gae_lambda
        self.clip_rewards = clip_rewards
        self.normalize_returns = normalize_returns
        self.normalize_advantages = normalize_advantages

        self.model = model_fn(obs_spec, act_spec)
        self.value = self.model.outputs[-1]
        self.policy = policy_cls(act_spec, self.model.outputs[:-1])
        self.loss_op, self.loss_terms, self.loss_inputs = self.loss_fn()

        grads, vars = zip(*optimizer.compute_gradients(self.loss_op))
        self.grads_norm = tf.global_norm(grads)
        if clip_grads_norm > 0.:
            grads, _ = tf.clip_by_global_norm(grads, clip_grads_norm, self.grads_norm)
        self.train_op = optimizer.apply_gradients(zip(grads, vars), global_step=sess_mgr.global_step)
        self.minimize_ops = self.make_minimize_ops()

        sess_mgr.restore_or_init()
        self.n_batches = sess_mgr.start_step
        self.start_step = sess_mgr.start_step * traj_len

        self.logger = Logger() 
开发者ID:inoryy,项目名称:reaver,代码行数:55,代码来源:actor_critic.py


注:本文中的tensorflow.compat.v1.clip_by_global_norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。