当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.clip_by_global_norm方法代码示例

本文整理汇总了Python中tensorflow.clip_by_global_norm方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.clip_by_global_norm方法的具体用法?Python tensorflow.clip_by_global_norm怎么用?Python tensorflow.clip_by_global_norm使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.clip_by_global_norm方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _add_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def _add_train_op(self):
    """Sets self._train_op, op to run for training."""
    hps = self._hps

    self._lr_rate = tf.maximum(
        hps.min_lr,  # min_lr_rate.
        tf.train.exponential_decay(hps.lr, self.global_step, 30000, 0.98))

    tvars = tf.trainable_variables()
    with tf.device(self._get_gpu(self._num_gpus-1)):
      grads, global_norm = tf.clip_by_global_norm(
          tf.gradients(self._loss, tvars), hps.max_grad_norm)
    tf.summary.scalar('global_norm', global_norm)
    optimizer = tf.train.GradientDescentOptimizer(self._lr_rate)
    tf.summary.scalar('learning rate', self._lr_rate)
    self._train_op = optimizer.apply_gradients(
        zip(grads, tvars), global_step=self.global_step, name='train_step') 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:seq2seq_attention_model.py

示例2: _add_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def _add_train_op(self):
        # In regression, the objective loss is Mean Squared Error (MSE).
        self.loss = tf.losses.mean_squared_error(labels = self._y, predictions = self.output)

        tvars = tf.trainable_variables()
        gradients = tf.gradients(self.loss, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)

        # Clip the gradients
        with tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
            grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)

        # Add a summary
        tf.summary.scalar('global_norm', global_norm)

        # Apply adagrad optimizer
        optimizer = tf.train.AdamOptimizer(self._hps.lr)
        with tf.device("/gpu:{}".format(self._hps.dqn_gpu_num)):
            self.train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')

        self.variable_summaries('dqn_loss',self.loss) 
开发者ID:yaserkl,项目名称:TransferRL,代码行数:22,代码来源:dqn.py

示例3: get_train_op

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def get_train_op(self, loss, clip_factor, clip, step):
        import tensorflow as tf
        optimizer = tf.train.AdamOptimizer(learning_rate=step)
        gradients, variables = zip(*optimizer.compute_gradients(loss))
        filtered_grads = []
        filtered_vars = []
        for i in range(len(gradients)):
            if gradients[i] is not None:
                filtered_grads.append(gradients[i])
                filtered_vars.append(variables[i])
        gradients = filtered_grads
        variables = filtered_vars
        if clip:
            gradients, _ = tf.clip_by_global_norm(gradients, clip_factor)
        grad_norm = tf.reduce_sum([tf.norm(grad) for grad in gradients])
        train_op = optimizer.apply_gradients(zip(gradients, variables))
        return optimizer, train_op, grad_norm 
开发者ID:jet-black,项目名称:ppo-lstm-parallel,代码行数:19,代码来源:agent.py

示例4: adem

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def adem(context_vector, model_response_vector, reference_response_vector,
         context_dim, model_response_dim, reference_response_dim,
         human_score_place, lr, max_grad_norm):
    model_score, M, N = tf_dynamic_adem_score(
        context=context_vector,
        model_response=model_response_vector,
        reference_response=reference_response_vector,
        shape_info={'batch_size': None,
                    'ct_dim': context_dim,
                    'mr_dim': model_response_dim,
                    'rr_dim': reference_response_dim})

    loss = compute_adem_l1_loss(human_score_place, model_score, M, N)

    tvars = tf.trainable_variables()
    grads, _ = tf.clip_by_global_norm(
        tf.gradients(loss, tvars), max_grad_norm)
    optimizer = tf.train.AdamOptimizer(lr)
    train_op = optimizer.apply_gradients(
        zip(grads, tvars),
        global_step=tf.contrib.framework.get_or_create_global_step()
    )
    return train_op, loss, model_score 
开发者ID:Yoctol,项目名称:ADEM,代码行数:25,代码来源:adem_graphs.py

示例5: train_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def train_fn(loss):
  trained_vars = tf.trainable_variables()
  count_parameters(trained_vars)

  # Gradient clipping
  gradients = tf.gradients(loss, trained_vars)

  clipped_grads, global_norm = tf.clip_by_global_norm(gradients, FLAGS.max_grad_norm)
  tf.summary.scalar('global_grad_norm', global_norm)

  # Add gradients and vars to summary
  # for gradient, var in list(zip(clipped_grads, trained_vars)):
  #   if 'attention' in var.name:
  #     tf.summary.histogram(var.name + '/gradient', gradient)
  #     tf.summary.histogram(var.name, var)

  # Define optimizer
  global_step = tf.train.get_or_create_global_step()
  optimizer = tf.train.RMSPropOptimizer(FLAGS.learning_rate)
  train_op = optimizer.apply_gradients(zip(clipped_grads, trained_vars),
                                       name='train_op',
                                       global_step=global_step)
  return train_op, global_step 
开发者ID:tqtg,项目名称:hierarchical-attention-networks,代码行数:25,代码来源:train.py

示例6: _build_train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def _build_train(self, loss, optimizer, vars=None, global_step=None):

        grads_and_vars = optimizer.compute_gradients(loss=loss, var_list=vars)
        grads_and_vars = [(grad, var) for grad, var in grads_and_vars
                          if grad is not None]

        # apply grad clipping
        grads, vars = zip(*grads_and_vars)
        clipped_grads, _ = tf.clip_by_global_norm(
            grads, clip_norm=self.config.get('global_norm_clip', 40))
        grads_and_vars = list(zip(clipped_grads, vars))

        train_op = optimizer.apply_gradients(
            grads_and_vars, global_step=global_step)

        return train_op 
开发者ID:alibaba,项目名称:EasyRL,代码行数:18,代码来源:policy_gradient.py

示例7: _build_train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def _build_train(self, loss, optimizer, vars, global_step=None):
        """
        construct the operation for optimization.

        Arguments:
            loss: the object loss function to minimize
            optimizer: optimizer to implement the optimization
            vars: the available variables to optimize
            global_step: record to total number of optimization
        """

        # compute gradients
        grads_and_vars = optimizer.compute_gradients(loss=loss, var_list=vars)
        grads_and_vars = [(grad, var) for grad, var in grads_and_vars
                          if grad is not None]

        # apply grad clipping
        grads, vars = zip(*grads_and_vars)
        clipped_grads, _ = tf.clip_by_global_norm(
            grads, clip_norm=self.config.get('global_norm_clip', 40))
        grads_and_vars = list(zip(clipped_grads, vars))

        train_op = optimizer.apply_gradients(
            grads_and_vars, global_step=global_step)
        return train_op 
开发者ID:alibaba,项目名称:EasyRL,代码行数:27,代码来源:batch_dqn.py

示例8: __create_optimizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def __create_optimizer(self):
        print('creating optimizer...')
        start = time.time()

        learning_rate = tf.train.exponential_decay(self.config.LR, self.global_step, 200, 0.97, staircase=True)
        self.opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
        # self.opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)

        # normalize the gradients of a parameter vector when its L2 norm exceeds a certain threshold according to
        trainable_params = tf.trainable_variables()

        # calculate gradients of the loss given all the trainable parameters
        gradients = tf.gradients(self.loss, trainable_params)

        # Gradient clipping: new_gradients = gradients * threshold / l2_norm(gradients)
        clip_gradients, _ = tf.clip_by_global_norm(gradients, self.config.MAX_GRAD_NORM)

        self.updates = self.opt.apply_gradients(zip(clip_gradients, trainable_params), global_step=self.global_step)

        print('Building optimizer in: ', time.time() - start, ' secs') 
开发者ID:hadyelsahar,项目名称:Zeroshot-QuestionGeneration,代码行数:22,代码来源:tripletext2seq.py

示例9: __create_optimizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def __create_optimizer(self):
        print('creating optimizer...')
        start = time.time()

        learning_rate = tf.train.exponential_decay(self.config.LR, self.global_step, 200, 0.97, staircase=True)
        self.opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate)

        # learning_rate = tf.train.exponential_decay(self.config.LR, self.global_step, 100, 0.96, staircase=True)

        # self.opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
        # self.opt = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)

        # normalize the gradients of a parameter vector when its L2 norm exceeds a certain threshold according to
        trainable_params = tf.trainable_variables()

        # calculate gradients of the loss given all the trainable parameters
        gradients = tf.gradients(self.loss, trainable_params)

        # Gradient clipping: new_gradients = gradients * threshold / l2_norm(gradients)
        clip_gradients, _ = tf.clip_by_global_norm(gradients, self.config.MAX_GRAD_NORM)

        self.updates = self.opt.apply_gradients(zip(clip_gradients, trainable_params), global_step=self.global_step)

        print('Building optimizer in: ', time.time() - start, ' secs') 
开发者ID:hadyelsahar,项目名称:Zeroshot-QuestionGeneration,代码行数:26,代码来源:triples2seq.py

示例10: add_optimizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def add_optimizer(self, global_step):
    '''Adds optimizer. Sets "gradients" and "optimize" fields. add_loss must have been called.

    Args:
      global_step: int32 scalar Tensor representing current global step in training
    '''
    with tf.variable_scope('optimizer') as scope:
      hp = self._hparams
      if hp.decay_learning_rate:
        self.learning_rate = _learning_rate_decay(hp.initial_learning_rate, global_step)
      else:
        self.learning_rate = tf.convert_to_tensor(hp.initial_learning_rate)
      optimizer = tf.train.AdamOptimizer(self.learning_rate, hp.adam_beta1, hp.adam_beta2)
      gradients, variables = zip(*optimizer.compute_gradients(self.loss))
      self.gradients = gradients
      clipped_gradients, _ = tf.clip_by_global_norm(gradients, 1.0)

      # Add dependency on UPDATE_OPS; otherwise batchnorm won't work correctly. See:
      # https://github.com/tensorflow/tensorflow/issues/1122
      with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        self.optimize = optimizer.apply_gradients(zip(clipped_gradients, variables),
          global_step=global_step) 
开发者ID:yanggeng1995,项目名称:vae_tacotron,代码行数:24,代码来源:tacotron.py

示例11: grad_clip_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def grad_clip_fn(self, loss, tvars, **kargs):
		grads = tf.gradients(loss, tvars)
		grad_clip = self.config.get("grad_clip", "global_norm")
		tf.logging.info(" gradient clip method {}".format(grad_clip))
		if grad_clip == "global_norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			[grads, _] = tf.clip_by_global_norm(grads, 
								clip_norm=clip_norm)
		elif grad_clip == "norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			grads = [tf.clip_by_norm(grad, clip_norm) for grad in grads]
		elif grad_clip == "value":
			clip_min_value = self.config.get("clip_min_value", -1.0)
			clip_max_value = self.config.get("clip_max_value", 1.0)
			grads = [tf.clip_by_value(grad, clip_norm) for grad in grads]
		else:
			grads = grads
		return grads 
开发者ID:yyht,项目名称:BERT,代码行数:20,代码来源:optimizer.py

示例12: grad_clip_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def grad_clip_fn(self, opt, loss, tvars, **kargs):
		grads_and_vars = opt.compute_gradients(loss, tvars)
		grads = [grad for grad, _ in grads_and_vars]
		grad_clip = self.config.get("grad_clip", "global_norm")
		tf.logging.info(" gradient clip method {}".format(grad_clip))
		if grad_clip == "global_norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			[grads, _] = tf.clip_by_global_norm(grads, 
								clip_norm=clip_norm)
		elif grad_clip == "norm":
			clip_norm = self.config.get("clip_norm", 1.0)
			grads = [tf.clip_by_norm(grad, clip_norm) for grad in grads]
		elif grad_clip == "value":
			clip_min_value = self.config.get("clip_min_value", -1.0)
			clip_max_value = self.config.get("clip_max_value", 1.0)
			grads = [tf.clip_by_value(grad, clip_norm) for grad in grads]
		else:
			grads = grads
		return grads 
开发者ID:yyht,项目名称:BERT,代码行数:21,代码来源:hvd_distributed_optimizer.py

示例13: _define_apply_ops

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def _define_apply_ops(self):
        """Defines the graph nodes for applying the accumulated gradients."""

        final_loss = self._accumulated_loss

        final_grad_vars = [(self._accumulated_gradients[key],
                            self._trainables[key])
                           for key in self._trainables.keys()]

        if self._config.clip_c > 0.0:
            grads, varss = list(zip(*final_grad_vars))
            clipped_grads, global_norm = tf.clip_by_global_norm(
                grads, clip_norm=self._config.clip_c)
            # Might be interesting to see how the global norm changes over
            # time, attach a summary?
            final_grad_vars = list(zip(clipped_grads, varss))

        apply_grads = self._optimizer.apply_gradients(
            final_grad_vars,
            global_step=self._global_step)

        self._apply_ops = [self._global_step, apply_grads, final_loss] 
开发者ID:EdinburghNLP,项目名称:nematus,代码行数:24,代码来源:model_updater.py

示例14: training_ops

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def training_ops(self, loss):
    opt = self.get_optimizer()
    params = tf.trainable_variables()
    gradients = tf.gradients(loss, params)
    clipped_gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
    return opt.apply_gradients(zip(clipped_gradients, params),
                               global_step=self.global_step) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:9,代码来源:model.py

示例15: training_ops

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import clip_by_global_norm [as 别名]
def training_ops(self, loss, learning_rate=None):
    """Gradient ops."""
    opt = self.get_optimizer(learning_rate)
    params = tf.trainable_variables()
    grads = tf.gradients(loss, params)

    if self.clip_norm:
      grads, global_norm = tf.clip_by_global_norm(grads, self.clip_norm)
      tf.summary.scalar('grad_global_norm', global_norm)

    return opt.apply_gradients(zip(grads, params)) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:13,代码来源:objective.py


注:本文中的tensorflow.clip_by_global_norm方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。