当前位置: 首页>>代码示例>>Python>>正文


Python v2.one_hot方法代码示例

本文整理汇总了Python中tensorflow.compat.v2.one_hot方法的典型用法代码示例。如果您正苦于以下问题:Python v2.one_hot方法的具体用法?Python v2.one_hot怎么用?Python v2.one_hot使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.compat.v2的用法示例。


在下文中一共展示了v2.one_hot方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _batch_jacobian

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import one_hot [as 别名]
def _batch_jacobian(y, x, tape):
  """Computes a Jacobian w.r.t. last dimensions of y and x."""
  # y and x must have the same batch dimensions.
  # For input shapes (b, dy), (b, dx) yields shape (b, dy, dx).
  d = y.shape.as_list()[-1]
  if d is None:
    raise ValueError("Last dimension of state Tensors must be known.")
  grads = []
  for i in range(d):
    w = tf.broadcast_to(tf.one_hot(i, d, dtype=y.dtype), y.shape)
    # We must use tf.UnconnectedGradients.ZERO here and below, because some
    # state components may legitimately not depend on each other or some of the
    # params.
    grad = tape.gradient(y, x, output_gradients=w,
                         unconnected_gradients=tf.UnconnectedGradients.ZERO)
    grads.append(grad)
  return tf.stack(grads, axis=-2) 
开发者ID:google,项目名称:tf-quant-finance,代码行数:19,代码来源:custom_loops.py

示例2: train_step

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import one_hot [as 别名]
def train_step(model, loss_fn, optimizer_fn, metric, image, label):
  """Perform one training step for the model.

  Args:
    model: Keras model to train.
    loss_fn: Loss function to use.
    optimizer_fn: Optimizer function to use.
    metric: keras.metric to use.
    image: Tensor of training images of shape [batch_size, 28, 28, 1].
    label: Tensor of class labels of shape [batch_size].
  """
  with tf.GradientTape() as tape:
    preds = model(image)
    label_onehot = tf.one_hot(label, 10)
    loss_ = loss_fn(label_onehot, preds)
  grads = tape.gradient(loss_, model.trainable_variables)
  optimizer_fn.apply_gradients(zip(grads, model.trainable_variables))
  metric(loss_) 
开发者ID:tensorflow,项目名称:hub,代码行数:20,代码来源:export.py

示例3: _write_to_accumulators

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import one_hot [as 别名]
def _write_to_accumulators(nested_acc, nested_tensor, index):
  if isinstance(nested_tensor, tf.Tensor):
    assert isinstance(nested_acc, tf.Tensor)
    acc_size = nested_acc.shape.as_list()[0]
    one_hot = tf.one_hot(index, depth=acc_size)
    one_hot = tf.reshape(one_hot, [acc_size] + [1] * len(nested_tensor.shape))
    return tf.where(one_hot > 0, nested_tensor, nested_acc)

  return [_write_to_accumulators(acc, t, index)
          for acc, t in zip(nested_acc, nested_tensor)] 
开发者ID:google,项目名称:tf-quant-finance,代码行数:12,代码来源:custom_loops.py

示例4: convert_to_one_hot

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import one_hot [as 别名]
def convert_to_one_hot(self, samples):
    return tf.one_hot(
        tf.argmax(samples, axis=-1),
        self.distribution.event_size, dtype=self._output_dtype) 
开发者ID:tensorflow,项目名称:agents,代码行数:6,代码来源:gumbel_softmax.py

示例5: _sample_channels

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import one_hot [as 别名]
def _sample_channels(self, component_logits, locs, scales, coeffs=None, seed=None):
        """Sample a single pixel-iteration and apply channel conditioning.
        Args:
          component_logits: 4D `Tensor` of logits for the Categorical distribution
            over Quantized Logistic mixture components. Dimensions are `[batch_size,
            height, width, num_logistic_mix]`.
          locs: 4D `Tensor` of location parameters for the Quantized Logistic
            mixture components. Dimensions are `[batch_size, height, width,
            num_logistic_mix, num_channels]`.
          scales: 4D `Tensor` of location parameters for the Quantized Logistic
            mixture components. Dimensions are `[batch_size, height, width,
            num_logistic_mix, num_channels]`.
          coeffs: 4D `Tensor` of coefficients for the linear dependence among color
            channels, or `None` if there is only one channel. Dimensions are
            `[batch_size, height, width, num_logistic_mix, num_coeffs]`, where
            `num_coeffs = num_channels * (num_channels - 1) // 2`.
          seed: `int`, random seed.
        Returns:
          samples: 4D `Tensor` of sampled image data with autoregression among
            channels. Dimensions are `[batch_size, height, width, num_channels]`.
        """
        num_channels = self.event_shape[-1]

        # sample mixture components once for the entire pixel
        component_dist = categorical.Categorical(logits=component_logits)
        mask = tf.one_hot(indices=component_dist.sample(seed=seed), depth=self._num_logistic_mix)
        mask = tf.cast(mask[..., tf.newaxis], self.dtype)

        # apply mixture component mask and separate out RGB parameters
        masked_locs = tf.reduce_sum(locs * mask, axis=-2)
        loc_tensors = tf.split(masked_locs, num_channels, axis=-1)
        masked_scales = tf.reduce_sum(scales * mask, axis=-2)
        scale_tensors = tf.split(masked_scales, num_channels, axis=-1)

        if coeffs is not None:
            num_coeffs = num_channels * (num_channels - 1) // 2
            masked_coeffs = tf.reduce_sum(coeffs * mask, axis=-2)
            coef_tensors = tf.split(masked_coeffs, num_coeffs, axis=-1)

        channel_samples = []
        coef_count = 0
        for i in range(num_channels):
            loc = loc_tensors[i]
            for c in channel_samples:
                loc += c * coef_tensors[coef_count]
                coef_count += 1

            logistic_samp = logistic.Logistic(loc=loc, scale=scale_tensors[i]).sample(seed=seed)
            logistic_samp = tf.clip_by_value(logistic_samp, -1., 1.)
            channel_samples.append(logistic_samp)

        return tf.concat(channel_samples, axis=-1) 
开发者ID:SeldonIO,项目名称:alibi-detect,代码行数:54,代码来源:pixelcnn.py

示例6: _sample_paths

# 需要导入模块: from tensorflow.compat import v2 [as 别名]
# 或者: from tensorflow.compat.v2 import one_hot [as 别名]
def _sample_paths(self, times, grid_step, keep_mask, num_requested_times,
                    num_samples, initial_state, random_type, seed, swap_memory):
    """Returns a sample of paths from the process."""
    dt = times[1:] - times[:-1]
    sqrt_dt = tf.sqrt(dt)
    current_state = initial_state + tf.zeros(
        [num_samples, self.dim()], dtype=initial_state.dtype)
    steps_num = tf.shape(dt)[-1]
    wiener_mean = tf.zeros((self.dim(), 1), dtype=self._dtype)

    cond_fn = lambda i, *args: i < steps_num

    def step_fn(i, written_count, current_state, result):
      """Performs one step of Euler scheme."""
      current_time = times[i + 1]
      dw = random_ops.mv_normal_sample((num_samples,),
                                       mean=wiener_mean,
                                       random_type=random_type,
                                       seed=seed)
      dw = dw * sqrt_dt[i]
      dt_inc = dt[i] * self.drift_fn()(current_time, current_state)  # pylint: disable=not-callable
      dw_inc = tf.squeeze(
          tf.matmul(self.volatility_fn()(current_time, current_state), dw), -1)  # pylint: disable=not-callable
      next_state = current_state + dt_inc + dw_inc

      def write_next_state_to_result():
        # Replace result[:, written_count, :] with next_state.
        one_hot = tf.one_hot(written_count, depth=num_requested_times)
        mask = tf.expand_dims(one_hot > 0, axis=-1)
        return tf.where(mask, tf.expand_dims(next_state, axis=1), result)

      # Keep only states for times requested by user.
      result = tf.cond(keep_mask[i + 1],
                       write_next_state_to_result,
                       lambda: result)
      written_count += tf.cast(keep_mask[i + 1], dtype=tf.int32)
      return i + 1, written_count, next_state, result

    # Maximum number iterations is passed to the while loop below. It improves
    # performance of the while loop on a GPU and is needed for XLA-compilation
    # comptatiblity
    maximum_iterations = (
        tf.cast(1. / grid_step, dtype=tf.int32) + tf.size(times))
    result = tf.zeros((num_samples, num_requested_times, self.dim()))
    _, _, _, result = tf.compat.v1.while_loop(
        cond_fn,
        step_fn, (0, 0, current_state, result),
        maximum_iterations=maximum_iterations,
        swap_memory=swap_memory)

    return result 
开发者ID:google,项目名称:tf-quant-finance,代码行数:53,代码来源:ito_process.py


注:本文中的tensorflow.compat.v2.one_hot方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。