当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.clip_by_norm方法代码示例

本文整理汇总了Python中tensorflow.clip_by_norm方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.clip_by_norm方法的具体用法?Python tensorflow.clip_by_norm怎么用?Python tensorflow.clip_by_norm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.clip_by_norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _clip_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def _clip_gradients(self, grad):
    """Clips gradients if the hyperparameter `gradient_clip_norm` requires it.

    Sparse tensors, in the form of IndexedSlices returned for the
    gradients of embeddings, require special handling.

    Args:
      grad: Gradient Tensor, IndexedSlices, or None.

    Returns:
      Optionally clipped gradient.
    """
    if grad is not None and self.hyperparams.gradient_clip_norm > 0:
      logging.info('Clipping gradient %s', grad)
      if isinstance(grad, tf.IndexedSlices):
        tmp = tf.clip_by_norm(grad.values, self.hyperparams.gradient_clip_norm)
        return tf.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        return tf.clip_by_norm(grad, self.hyperparams.gradient_clip_norm)
    else:
      return grad 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:23,代码来源:graph_builder.py

示例2: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def __init__(self, model, args):
        self.args = args

        with tf.variable_scope('supervisor_loss'):
            optimizer = tf.train.AdamOptimizer(
                args.ml_lr, beta1=0.9, beta2=0.98, epsilon=1e-8)

            loss = self.compute_loss(model)

            gradients = optimizer.compute_gradients(loss)
            for i, (grad, var) in enumerate(gradients):
                if grad is not None:
                    gradients[i] = (
                        tf.clip_by_norm(grad, args.clip_norm), var)
            self.train_op = optimizer.apply_gradients(
                gradients, global_step=model.global_step)

            tf.summary.scalar('loss', loss)
            self.merged = tf.summary.merge_all() 
开发者ID:ne7ermore,项目名称:deeping-flow,代码行数:21,代码来源:model.py

示例3: _graph_fn_calculate_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def _graph_fn_calculate_gradients(self, variables, loss, time_percentage):
        """
        Args:
            variables (DataOpTuple): A list of variables to calculate gradients for.
            loss (SingeDataOp): The total loss over a batch to be minimized.
        """
        if get_backend() == "tf":
            var_list = list(variables.values()) if isinstance(variables, dict) else force_list(variables)
            grads_and_vars = self.optimizer.compute_gradients(
                loss=loss,
                var_list=var_list
            )
            if self.clip_grad_norm is not None:
                for i, (grad, var) in enumerate(grads_and_vars):
                    if grad is not None:
                        grads_and_vars[i] = (tf.clip_by_norm(t=grad, clip_norm=self.clip_grad_norm), var)
            return DataOpTuple(grads_and_vars) 
开发者ID:rlgraph,项目名称:rlgraph,代码行数:19,代码来源:local_optimizers.py

示例4: flatgrad

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def flatgrad(loss, var_list, clip_norm=None):
    """
    calculates the gradient and flattens it

    :param loss: (float) the loss value
    :param var_list: ([TensorFlow Tensor]) the variables
    :param clip_norm: (float) clip the gradients (disabled if None)
    :return: ([TensorFlow Tensor]) flattened gradient
    """
    grads = tf.gradients(loss, var_list)
    if clip_norm is not None:
        grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]
    return tf.concat(axis=0, values=[
        tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
        for (v, grad) in zip(var_list, grads)
    ]) 
开发者ID:Stable-Baselines-Team,项目名称:stable-baselines,代码行数:18,代码来源:tf_util.py

示例5: grad_clip_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def grad_clip_fn(self, loss, tvars, **kargs):
		grads = tf.gradients(loss, tvars)
		grad_clip = self.config.get("grad_clip", "global_norm")
		tf.logging.info(" gradient clip method {}".format(grad_clip))
		if grad_clip == "global_norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			[grads, _] = tf.clip_by_global_norm(grads, 
								clip_norm=clip_norm)
		elif grad_clip == "norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			grads = [tf.clip_by_norm(grad, clip_norm) for grad in grads]
		elif grad_clip == "value":
			clip_min_value = self.config.get("clip_min_value", -1.0)
			clip_max_value = self.config.get("clip_max_value", 1.0)
			grads = [tf.clip_by_value(grad, clip_norm) for grad in grads]
		else:
			grads = grads
		return grads 
开发者ID:yyht,项目名称:BERT,代码行数:20,代码来源:optimizer.py

示例6: grad_clip_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def grad_clip_fn(self, opt, loss, tvars, **kargs):
		grads_and_vars = opt.compute_gradients(loss, tvars)
		grads = [grad for grad, _ in grads_and_vars]
		grad_clip = self.config.get("grad_clip", "global_norm")
		tf.logging.info(" gradient clip method {}".format(grad_clip))
		if grad_clip == "global_norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			[grads, _] = tf.clip_by_global_norm(grads, 
								clip_norm=clip_norm)
		elif grad_clip == "norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			grads = [tf.clip_by_norm(grad, clip_norm) for grad in grads]
		elif grad_clip == "value":
			clip_min_value = self.config.get("clip_min_value", -1.0)
			clip_max_value = self.config.get("clip_max_value", 1.0)
			grads = [tf.clip_by_value(grad, clip_norm) for grad in grads]
		else:
			grads = grads
		return grads 
开发者ID:yyht,项目名称:BERT,代码行数:21,代码来源:hvd_distributed_optimizer.py

示例7: add_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def add_train_op(self, loss):
        """
        训练节点
        """
        with tf.name_scope('train_op'):
            # 记录训练步骤
            self.global_step = tf.Variable(0, 
                    name='global_step', trainable=False)
            opt = tf.train.AdamOptimizer(self.config.lr)
            # train_op = opt.minimize(loss, self.global_step)
            train_variables = tf.trainable_variables()
            grads_vars = opt.compute_gradients(loss, train_variables)
            for i, (grad, var) in enumerate(grads_vars):
                grads_vars[i] = (
                    tf.clip_by_norm(grad, self.config.grad_clip), var)
            train_op = opt.apply_gradients(
                grads_vars, global_step=self.global_step)
            return train_op 
开发者ID:l11x0m7,项目名称:Question_Answering_Models,代码行数:20,代码来源:models.py

示例8: minimize_and_clip

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def minimize_and_clip(optimizer, objective, var_list, clip_val=10):
    """Minimized `objective` using `optimizer` w.r.t. variables in
    `var_list` while ensure the norm of the gradients for each
    variable is clipped to `clip_val`
    """    
    if clip_val is None:
        return optimizer.minimize(objective, var_list=var_list)
    else:
        gradients = optimizer.compute_gradients(objective, var_list=var_list)
        for i, (grad, var) in enumerate(gradients):
            if grad is not None:
                gradients[i] = (tf.clip_by_norm(grad, clip_val), var)
        return optimizer.apply_gradients(gradients)


# ================================================================
# Global session
# ================================================================ 
开发者ID:dadadidodi,项目名称:m3ddpg,代码行数:20,代码来源:tf_util.py

示例9: build_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def build_model(self):
        self.build_memory()

        self.W = tf.Variable(tf.random_normal([self.edim, 3], stddev=self.init_std))
        z = tf.matmul(self.hid[-1], self.W)

        self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=z, labels=self.target)

        self.lr = tf.Variable(self.current_lr)
        self.opt = tf.train.GradientDescentOptimizer(self.lr)

        params = [self.A, self.B, self.C, self.T_A, self.T_B, self.W, self.ASP, self.BL_W, self.BL_B]
        grads_and_vars = self.opt.compute_gradients(self.loss, params)
        clipped_grads_and_vars = [(tf.clip_by_norm(gv[0], self.max_grad_norm), gv[1]) \
                                  for gv in grads_and_vars]

        inc = self.global_step.assign_add(1)
        with tf.control_dependencies([inc]):
            self.optim = self.opt.apply_gradients(clipped_grads_and_vars)

        tf.global_variables_initializer().run()

        self.correct_prediction = tf.argmax(z, 1) 
开发者ID:koala-ai,项目名称:tensorflow_nlp,代码行数:25,代码来源:model.py

示例10: set_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def set_train_op(loss, tvars):
    if FLAGS.optimizer_type == "sgd":
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.learning_rate)
    elif FLAGS.optimizer_type == "rmsprop":
        optimizer = tf.train.RMSPropOptimizer(learning_rate=FLAGS.learning_rate)
    elif FLAGS.optimizer_type == "adam":
        optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
    else:
        raise ValueError("Wrong optimizer_type.")

    gradients = optimizer.compute_gradients(loss, var_list=tvars)
    clipped_gradients = [(grad if grad is None else tf.clip_by_norm(grad, FLAGS.max_grads), var)
                         for grad, var in gradients]

    train_op = optimizer.apply_gradients(clipped_gradients)
    return train_op 
开发者ID:tokestermw,项目名称:text-gan-tensorflow,代码行数:18,代码来源:train.py

示例11: build_model

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def build_model(self):
        self.build_memory()

        self.W = tf.Variable(tf.random_normal([self.edim, self.nwords], stddev=self.init_std))
        z = tf.matmul(self.hid[-1], self.W)

        self.loss = tf.nn.softmax_cross_entropy_with_logits(z, self.target)

        self.lr = tf.Variable(self.current_lr)
        self.opt = tf.train.GradientDescentOptimizer(self.lr)

        params = [self.A, self.B, self.C, self.T_A, self.T_B, self.W]
        grads_and_vars = self.opt.compute_gradients(self.loss,params)
        clipped_grads_and_vars = [(tf.clip_by_norm(gv[0], self.max_grad_norm), gv[1]) \
                                   for gv in grads_and_vars]

        inc = self.global_step.assign_add(1)
        with tf.control_dependencies([inc]):
            self.optim = self.opt.apply_gradients(clipped_grads_and_vars)

        tf.initialize_all_variables().run()
        self.saver = tf.train.Saver() 
开发者ID:keon,项目名称:Seq2Seq-Tensorflow,代码行数:24,代码来源:model.py

示例12: testClipByNormClipped

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def testClipByNormClipped(self):
    # Norm clipping when clip_norm < 5
    with self.test_session():
      x = tf.constant([-3.0, 0.0, 0.0, 4.0, 0.0, 0.0], shape=[2, 3])
      # Norm of x = sqrt(3^2 + 4^2) = 5
      np_ans = [[-2.4, 0.0, 0.0],
                [3.2, 0.0, 0.0]]
      clip_norm = 4.0
      ans = tf.clip_by_norm(x, clip_norm)
      tf_ans = ans.eval()

      clip_tensor = tf.constant(4.0)
      ans = tf.clip_by_norm(x, clip_norm)
      tf_ans_tensor = ans.eval()

    self.assertAllClose(np_ans, tf_ans)
    self.assertAllClose(np_ans, tf_ans_tensor) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:19,代码来源:clip_ops_test.py

示例13: clip_gradient_norms

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def clip_gradient_norms(gradients_to_variables, max_norm):
  """Clips the gradients by the given value.

  Args:
    gradients_to_variables: A list of gradient to variable pairs (tuples).
    max_norm: the maximum norm value.

  Returns:
    A list of clipped gradient to variable pairs.
  """
  clipped_grads_and_vars = []
  for grad, var in gradients_to_variables:
    if grad is not None:
      if isinstance(grad, tf.IndexedSlices):
        tmp = tf.clip_by_norm(grad.values, max_norm)
        grad = tf.IndexedSlices(tmp, grad.indices, grad.dense_shape)
      else:
        grad = tf.clip_by_norm(grad, max_norm)
    clipped_grads_and_vars.append((grad, var))
  return clipped_grads_and_vars 
开发者ID:google,项目名称:youtube-8m,代码行数:22,代码来源:utils.py

示例14: _optimize

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def _optimize(self, loss, step, **knobs):
        opt_momentum = knobs['opt_momentum'] # Momentum optimizer momentum
        grad_clip_norm = knobs['grad_clip_norm'] # L2 norm to clip gradients by

        # Compute learning rate, gradients
        tf_trainable_vars = tf.trainable_variables()
        lr = self._get_learning_rate(step, **knobs)
        grads = tf.gradients(loss, tf_trainable_vars)
        self._mark_for_monitoring('lr', lr)

        # Clip gradients
        if grad_clip_norm > 0:
            grads = [tf.clip_by_norm(x, grad_clip_norm) for x in grads]

        # Init optimizer
        opt = tf.train.MomentumOptimizer(lr, opt_momentum, use_locking=True, use_nesterov=True)
        train_op = opt.apply_gradients(zip(grads, tf_trainable_vars), global_step=step)

        return train_op 
开发者ID:nginyc,项目名称:rafiki,代码行数:21,代码来源:TfEnas.py

示例15: build_optim

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_norm [as 别名]
def build_optim(self):
        # Update moving_mean and moving_variance for batch normalization layers
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):

            with tf.name_scope('reinforce'):
                # Actor learning rate
                self.lr1 = tf.train.exponential_decay(self.lr1_start, self.global_step, self.lr1_decay_step,self.lr1_decay_rate, staircase=False, name="learning_rate1")
                # Optimizer
                self.opt1 = tf.train.AdamOptimizer(learning_rate=self.lr1,beta1=0.9,beta2=0.99, epsilon=0.0000001)
                # Discounted reward
                self.reward_baseline = tf.stop_gradient(self.reward - self.critic.predictions) # [Batch size, 1]
                variable_summaries('reward_baseline',self.reward_baseline, with_max_min = True)
                # Loss
                self.loss1 = tf.reduce_mean(self.reward_baseline*self.log_softmax,0)
                tf.summary.scalar('loss1', self.loss1)
                # Minimize step
                gvs = self.opt1.compute_gradients(self.loss1)
                capped_gvs = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs if grad is not None] # L2 clip
                self.train_step1 = self.opt1.apply_gradients(capped_gvs, global_step=self.global_step)

            with tf.name_scope('state_value'):
                # Critic learning rate
                self.lr2 = tf.train.exponential_decay(self.lr2_start, self.global_step2, self.lr2_decay_step,self.lr2_decay_rate, staircase=False, name="learning_rate1")
                # Optimizer
                self.opt2 = tf.train.AdamOptimizer(learning_rate=self.lr2,beta1=0.9,beta2=0.99, epsilon=0.0000001)
                # Loss
                self.loss2 = tf.losses.mean_squared_error(self.reward, self.critic.predictions, weights = 1.0)
                tf.summary.scalar('loss2', self.loss1)
                # Minimize step
                gvs2 = self.opt2.compute_gradients(self.loss2)
                capped_gvs2 = [(tf.clip_by_norm(grad, 1.), var) for grad, var in gvs2 if grad is not None] # L2 clip
                self.train_step2 = self.opt1.apply_gradients(capped_gvs2, global_step=self.global_step2) 
开发者ID:MichelDeudon,项目名称:neural-combinatorial-optimization-rl-tensorflow,代码行数:35,代码来源:actor.py


注:本文中的tensorflow.clip_by_norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。