当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.GradientTape方法代码示例

本文整理汇总了Python中tensorflow.GradientTape方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.GradientTape方法的具体用法?Python tensorflow.GradientTape怎么用?Python tensorflow.GradientTape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.GradientTape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: pretrain_step

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def pretrain_step(model, x, y):
    """
    Single step of generator pre-training.
    Args:
        model: A model object with a tf keras compiled generator.
        x: The low resolution image tensor.
        y: The high resolution image tensor.
    """
    with tf.GradientTape() as tape:
        fake_hr = model.generator(x)
        loss_mse = tf.keras.losses.MeanSquaredError()(y, fake_hr)

    grads = tape.gradient(loss_mse, model.generator.trainable_variables)
    model.gen_optimizer.apply_gradients(zip(grads, model.generator.trainable_variables))

    return loss_mse 
开发者ID:HasnainRaz,项目名称:Fast-SRGAN,代码行数:18,代码来源:main.py

示例2: _compute_meta_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def _compute_meta_loss(self, inputs, inputs2, variables):
    """This is called during fitting to compute the meta-loss (the loss after a
    few steps of optimization), and its gradient.
    """
    updated_variables = variables
    with tf.GradientTape() as meta_tape:
      for k in range(self.optimization_steps):
        with tf.GradientTape() as tape:
          loss, _ = self.learner.compute_model(inputs, updated_variables, True)
        gradients = tape.gradient(loss, updated_variables)
        updated_variables = [
            v if g is None else v - self.learning_rate * g
            for v, g in zip(updated_variables, gradients)
        ]
      meta_loss, _ = self.learner.compute_model(inputs2, updated_variables,
                                                True)
    meta_gradients = meta_tape.gradient(meta_loss, variables)
    return meta_loss, meta_gradients 
开发者ID:deepchem,项目名称:deepchem,代码行数:20,代码来源:maml.py

示例3: train_on_current_task

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def train_on_current_task(self, optimization_steps=1, restore=True):
    """Perform a few steps of gradient descent to fine tune the model on the current task.

    Parameters
    ----------
    optimization_steps: int
      the number of steps of gradient descent to perform
    restore: bool
      if True, restore the model from the most recent checkpoint before optimizing
    """
    if restore:
      self.restore()
    variables = self.learner.variables
    for i in range(optimization_steps):
      inputs = self.learner.get_batch()
      with tf.GradientTape() as tape:
        loss, _ = self.learner.compute_model(inputs, variables, True)
      gradients = tape.gradient(loss, variables)
      self._tf_task_optimizer.apply_gradients(zip(gradients, variables)) 
开发者ID:deepchem,项目名称:deepchem,代码行数:21,代码来源:maml.py

示例4: _create_gradient_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def _create_gradient_fn(self, variables):
    """Create a function that computes gradients and applies them to the model.
    Because of the way TensorFlow function tracing works, we need to create a
    separate function for each new set of variables.
    """

    @tf.function(experimental_relax_shapes=True)
    def apply_gradient_for_batch(inputs, labels, weights, loss):
      with tf.GradientTape() as tape:
        outputs = self.model(inputs, training=True)
        if isinstance(outputs, tf.Tensor):
          outputs = [outputs]
        if self._loss_outputs is not None:
          outputs = [outputs[i] for i in self._loss_outputs]
        batch_loss = loss(outputs, labels, weights)
      if variables is None:
        vars = self.model.trainable_variables
      else:
        vars = variables
      grads = tape.gradient(batch_loss, vars)
      self._tf_optimizer.apply_gradients(zip(grads, vars))
      self._global_step.assign_add(1)
      return batch_loss

    return apply_gradient_for_batch 
开发者ID:deepchem,项目名称:deepchem,代码行数:27,代码来源:keras_model.py

示例5: call

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def call(self, inputs, conditional_inputs):
    with tf.GradientTape() as tape:
      for layer in inputs:
        tape.watch(layer)
      output = self.discriminator(_list_or_tensor(inputs + conditional_inputs))
    gradients = tape.gradient(output, inputs)
    gradients = [g for g in gradients if g is not None]
    if len(gradients) > 0:
      norm2 = 0.0
      for g in gradients:
        g2 = tf.square(g)
        dims = len(g.shape)
        if dims > 1:
          g2 = tf.reduce_sum(g2, axis=list(range(1, dims)))
        norm2 += g2
      penalty = tf.square(tf.sqrt(norm2) - 1.0)
      penalty = self.gan.gradient_penalty * tf.reduce_mean(penalty)
    else:
      penalty = 0.0
    return [output, penalty] 
开发者ID:deepchem,项目名称:deepchem,代码行数:22,代码来源:gan.py

示例6: _train_body

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def _train_body(self, agent_states, agent_next_states, expert_states, expert_next_states):
        epsilon = 1e-8
        with tf.device(self.device):
            with tf.GradientTape() as tape:
                real_logits = self.disc([expert_states, expert_next_states])
                fake_logits = self.disc([agent_states, agent_next_states])
                loss = -(tf.reduce_mean(tf.math.log(real_logits + epsilon)) +
                         tf.reduce_mean(tf.math.log(1. - fake_logits + epsilon)))
            grads = tape.gradient(loss, self.disc.trainable_variables)
            self.optimizer.apply_gradients(
                zip(grads, self.disc.trainable_variables))

        accuracy = \
            tf.reduce_mean(tf.cast(real_logits >= 0.5, tf.float32)) / 2. + \
            tf.reduce_mean(tf.cast(fake_logits < 0.5, tf.float32)) / 2.
        js_divergence = self._compute_js_divergence(
            fake_logits, real_logits)
        return loss, accuracy, js_divergence 
开发者ID:keiohta,项目名称:tf2rl,代码行数:20,代码来源:gaifo.py

示例7: _train_body

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def _train_body(self, states, actions, next_states, rewards, done, weights):
        with tf.device(self.device):
            with tf.GradientTape() as tape:
                if self._enable_categorical_dqn:
                    td_errors = self._compute_td_error_body_distributional(
                        states, actions, next_states, rewards, done)
                    q_func_loss = tf.reduce_mean(
                        huber_loss(tf.negative(td_errors),
                                   delta=self.max_grad) * weights)
                else:
                    td_errors = self._compute_td_error_body(
                        states, actions, next_states, rewards, done)
                    q_func_loss = tf.reduce_mean(
                        huber_loss(td_errors,
                                   delta=self.max_grad) * weights)

            q_func_grad = tape.gradient(
                q_func_loss, self.q_func.trainable_variables)
            self.q_func_optimizer.apply_gradients(
                zip(q_func_grad, self.q_func.trainable_variables))

            return td_errors, q_func_loss 
开发者ID:keiohta,项目名称:tf2rl,代码行数:24,代码来源:dqn.py

示例8: _train_body

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def _train_body(self, agent_states, agent_acts, expert_states, expert_acts):
        epsilon = 1e-8
        with tf.device(self.device):
            with tf.GradientTape() as tape:
                real_logits = self.disc([expert_states, expert_acts])
                fake_logits = self.disc([agent_states, agent_acts])
                loss = -(tf.reduce_mean(tf.math.log(real_logits + epsilon)) +
                         tf.reduce_mean(tf.math.log(1. - fake_logits + epsilon)))
            grads = tape.gradient(loss, self.disc.trainable_variables)
            self.optimizer.apply_gradients(
                zip(grads, self.disc.trainable_variables))

        accuracy = \
            tf.reduce_mean(tf.cast(real_logits >= 0.5, tf.float32)) / 2. + \
            tf.reduce_mean(tf.cast(fake_logits < 0.5, tf.float32)) / 2.
        js_divergence = self._compute_js_divergence(
            fake_logits, real_logits)
        return loss, accuracy, js_divergence 
开发者ID:keiohta,项目名称:tf2rl,代码行数:20,代码来源:gail.py

示例9: test_swish_grad

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def test_swish_grad(self):
        def swish_grad(x, beta):
            return (
                beta * (2 - beta * x * np.tanh(beta * x / 2)) / (1 + np.cosh(beta * x))
            )

        x = testing_utils.generate_real_values_with_zeros(shape=(8, 3, 3, 16))
        tf_x = tf.Variable(x)
        with tf.GradientTape() as tape:
            activation = lq.quantizers.SwishSign()(tf_x)
        grad = tape.gradient(activation, tf_x)
        np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=5.0))

        with tf.GradientTape() as tape:
            activation = lq.quantizers.SwishSign(beta=10.0)(tf_x)
        grad = tape.gradient(activation, tf_x)
        np.testing.assert_allclose(grad.numpy(), swish_grad(x, beta=10.0)) 
开发者ID:larq,项目名称:larq,代码行数:19,代码来源:quantizers_test.py

示例10: testSparseGaussianProcess

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def testSparseGaussianProcess(self):
    dataset_size = 10
    batch_size = 3
    input_dim = 4
    output_dim = 5
    features = tf.to_float(np.random.rand(batch_size, input_dim))
    labels = tf.to_float(np.random.rand(batch_size, output_dim))
    model = gaussian_process.SparseGaussianProcess(output_dim, num_inducing=2)
    with tf.GradientTape() as tape:
      predictions = model(features)
      nll = -tf.reduce_mean(predictions.distribution.log_prob(labels))
      kl = sum(model.losses) / dataset_size
      loss = nll + kl

    self.evaluate(tf.global_variables_initializer())
    grads = tape.gradient(nll, model.variables)
    for grad in grads:
      self.assertIsNotNone(grad)

    loss_val, predictions_val = self.evaluate([loss, predictions])
    self.assertEqual(loss_val.shape, ())
    self.assertGreaterEqual(loss_val, 0.)
    self.assertEqual(predictions_val.shape, (batch_size, output_dim)) 
开发者ID:yyht,项目名称:BERT,代码行数:25,代码来源:gaussian_process_test.py

示例11: wrapped_tf_function

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def wrapped_tf_function(points, label):
  """Performs one step of minimization of the loss."""
  # --- subsampling (order DO matter)
  points = points[0:FLAGS.num_points, ...]

  # --- augmentation
  if FLAGS.augment:
    points = tf.map_fn(augment.rotate, points)
    points = augment.jitter(points)

  # --- training
  with tf.GradientTape() as tape:
    logits = model(points, training=True)
    loss = model.loss(label, logits)
  variables = model.trainable_variables
  gradients = tape.gradient(loss, variables)
  optimizer.apply_gradients(zip(gradients, variables))
  return loss 
开发者ID:tensorflow,项目名称:graphics,代码行数:20,代码来源:train.py

示例12: train_step

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def train_step(self, inputs):
    """One train step.
    Args:
      inputs: one batch input.
    Returns:
      loss: Scaled loss.
    """

    image, label = inputs
    with tf.GradientTape() as tape:
      predictions = self.model(image, training=True)

      loss = self.compute_loss(predictions,label,training=True)

    gradients = tape.gradient(loss, self.model.trainable_variables)
    gradients = [(tf.clip_by_value(grad, -5.0, 5.0))
                 for grad in gradients]
    self.optimizer.apply_gradients(zip(gradients,
                                       self.model.trainable_variables))

    return loss 
开发者ID:610265158,项目名称:face_landmark,代码行数:23,代码来源:net_work.py

示例13: train_step

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def train_step(self, inp, tar):
        tar_inp = tar[:, :-1]
        tar_real = tar[:, 1:]

        enc_padding_mask, combined_mask, dec_padding_mask = self.mask_encoder.create_masks(inp, tar_inp)

        with tf.GradientTape() as tape:
            predictions, _ = self.transformer(inp, tar_inp,
                                              True,
                                              enc_padding_mask,
                                              combined_mask,
                                              dec_padding_mask)
            loss = self.loss_function(tar_real, predictions)

        gradients = tape.gradient(loss, self.transformer.trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, self.transformer.trainable_variables))

        self.train_loss(loss)
        self.train_accuracy(tar_real, predictions) 
开发者ID:msgi,项目名称:nlp-journey,代码行数:21,代码来源:training.py

示例14: verify_funcs_are_equivalent

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def verify_funcs_are_equivalent(dtype):
    x_np = np.random.uniform(-10, 10, size=(4, 4)).astype(dtype)
    x = tf.convert_to_tensor(x_np)
    lower = np.random.uniform(-10, 10)
    upper = lower + np.random.uniform(0, 10)

    with tf.GradientTape(persistent=True) as t:
        t.watch(x)
        y_native = _hardshrink_custom_op(x, lower, upper)
        y_py = _hardshrink_py(x, lower, upper)

    test_utils.assert_allclose_according_to_type(y_native, y_py)

    grad_native = t.gradient(y_native, x)
    grad_py = t.gradient(y_py, x)

    test_utils.assert_allclose_according_to_type(grad_native, grad_py) 
开发者ID:tensorflow,项目名称:addons,代码行数:19,代码来源:hardshrink_test.py

示例15: verify_funcs_are_equivalent

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import GradientTape [as 别名]
def verify_funcs_are_equivalent(dtype):
    x_np = np.random.uniform(-10, 10, size=(4, 4)).astype(dtype)
    x = tf.convert_to_tensor(x_np)
    lower = np.random.uniform(-10, 10)
    upper = lower + np.random.uniform(0, 10)

    with tf.GradientTape(persistent=True) as t:
        t.watch(x)
        y_native = softshrink(x, lower, upper)
        y_py = _softshrink_py(x, lower, upper)

    test_utils.assert_allclose_according_to_type(y_native, y_py)

    grad_native = t.gradient(y_native, x)
    grad_py = t.gradient(y_py, x)

    test_utils.assert_allclose_according_to_type(grad_native, grad_py) 
开发者ID:tensorflow,项目名称:addons,代码行数:19,代码来源:softshrink_test.py


注:本文中的tensorflow.GradientTape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。