當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.stack方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.stack方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.stack方法的具體用法?Python v1.stack怎麽用?Python v1.stack使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.stack方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: two_class_log_likelihood

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def two_class_log_likelihood(predictions, labels, weights_fn=None):
  """Log-likelihood for two class classification with 0/1 labels.

  Args:
    predictions: A float valued tensor of shape [`batch_size`].  Each
      component should be between 0 and 1.
    labels: An int valued tensor of shape [`batch_size`].  Each component
      should either be 0 or 1.
    weights_fn: unused.

  Returns:
    A pair, with the average log likelihood in the first component.
  """
  del weights_fn
  float_predictions = tf.cast(tf.squeeze(predictions), dtype=tf.float64)
  batch_probs = tf.stack([1. - float_predictions, float_predictions], axis=-1)
  int_labels = tf.cast(tf.squeeze(labels), dtype=tf.int32)
  onehot_targets = tf.cast(tf.one_hot(int_labels, 2), dtype=tf.float64)
  chosen_probs = tf.einsum(
      "ij,ij->i", batch_probs, onehot_targets, name="chosen_probs")
  avg_log_likelihood = tf.reduce_mean(tf.log(chosen_probs))
  return avg_log_likelihood, tf.constant(1.0) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:metrics.py

示例2: combine

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, multiplied by the corresponding gates.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean.

    Returns:
      a list of num_datashards `Tensor`s with shapes
        `[batch_size[d], <extra_output_dims>]`.
    """
    expert_part_sizes = tf.unstack(
        tf.stack([d.part_sizes for d in self._dispatchers]),
        num=self._ep.n,
        axis=1)
    # list of lists of shape [num_experts][num_datashards]
    expert_output_parts = self._ep(tf.split, expert_out, expert_part_sizes)
    expert_output_parts_t = transpose_list_of_lists(expert_output_parts)
    def my_combine(dispatcher, parts):
      return dispatcher.combine(
          common_layers.convert_gradient_to_tensor(tf.concat(parts, 0)),
          multiply_by_gates=multiply_by_gates)
    return self._dp(my_combine, self._dispatchers, expert_output_parts_t) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:expert_utils.py

示例3: actnorm_3d

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def actnorm_3d(name, x, logscale_factor=3.):
  """Applies actnorm to each time-step independently.

  There are a total of 2*n_channels*n_steps parameters learnt.

  Args:
    name: variable scope.
    x: 5-D Tensor, (NTHWC)
    logscale_factor: Increases the learning rate of the scale by
                     logscale_factor.
  Returns:
    x: 5-D Tensor, (NTHWC) with the per-timestep, per-channel normalization.
  """
  with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
    x = tf.unstack(x, axis=1)
    x_normed = []
    for ind, x_step in enumerate(x):
      x_step, _ = actnorm("actnorm_%d" % ind, x_step,
                          logscale_factor=logscale_factor)
      x_normed.append(x_step)
    return tf.stack(x_normed, axis=1), None 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:glow_ops.py

示例4: inject_latent

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def inject_latent(self, layer, inputs, target, action):
    """Inject a VAE-style latent."""
    del action
    # Latent for stochastic model
    filters = 128
    full_video = tf.stack(inputs + [target], axis=1)
    latent_mean, latent_std = self.construct_latent_tower(
        full_video, time_axis=1)
    latent = common_video.get_gaussian_tensor(latent_mean, latent_std)
    latent = tfl.flatten(latent)
    latent = tf.expand_dims(latent, axis=1)
    latent = tf.expand_dims(latent, axis=1)
    latent_mask = tfl.dense(latent, filters, name="latent_mask")
    zeros_mask = tf.zeros(
        common_layers.shape_list(layer)[:-1] + [filters], dtype=tf.float32)
    layer = tf.concat([layer, latent_mask + zeros_mask], axis=-1)
    extra_loss = self.get_kl_loss([latent_mean], [latent_std])
    return layer, extra_loss 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:basic_stochastic.py

示例5: reward_prediction_mid

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def reward_prediction_mid(
      self, input_images, input_reward, action, latent, mid_outputs):
    """Builds a reward prediction network from intermediate layers."""
    encoded = []
    for i, output in enumerate(mid_outputs):
      enc = output
      enc = tfl.conv2d(enc, 64, [3, 3], strides=(1, 1), activation=tf.nn.relu)
      enc = tfl.conv2d(enc, 32, [3, 3], strides=(2, 2), activation=tf.nn.relu)
      enc = tfl.conv2d(enc, 16, [3, 3], strides=(2, 2), activation=tf.nn.relu)
      enc = tfl.flatten(enc)
      enc = tfl.dense(enc, 64, activation=tf.nn.relu, name="rew_enc_%d" % i)
      encoded.append(enc)
    x = encoded
    x = tf.stack(x, axis=1)
    x = tfl.flatten(x)
    x = tfl.dense(x, 256, activation=tf.nn.relu, name="rew_dense1")
    x = tfl.dense(x, 128, activation=tf.nn.relu, name="rew_dense2")
    return x 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:sv2p.py

示例6: argmax_with_score

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def argmax_with_score(logits, axis=None):
  """Argmax along with the value."""
  axis = axis or len(logits.get_shape()) - 1
  predictions = tf.argmax(logits, axis=axis)

  logits_shape = shape_list(logits)
  prefix_shape, vocab_size = logits_shape[:-1], logits_shape[-1]
  prefix_size = 1
  for d in prefix_shape:
    prefix_size *= d

  # Flatten to extract scores
  flat_logits = tf.reshape(logits, [prefix_size, vocab_size])
  flat_predictions = tf.reshape(predictions, [prefix_size])
  flat_indices = tf.stack(
      [tf.range(tf.to_int64(prefix_size)),
       tf.to_int64(flat_predictions)],
      axis=1)
  flat_scores = tf.gather_nd(flat_logits, flat_indices)

  # Unflatten
  scores = tf.reshape(flat_scores, prefix_shape)

  return predictions, scores 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:common_layers.py

示例7: _scanning_pack

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def _scanning_pack(self, dataset):
    """Apply scan based pack to a dataset."""
    if self._chop_long_sequences:
      dataset = dataset.map(lambda x: (x[:self._packed_length],))
    else:
      dataset = dataset.filter(lambda *x: tf.reduce_max(  # pylint: disable=g-long-lambda
          tf.stack([tf.shape(i)[0] for i in x]), axis=0) <= self._packed_length)

    # In order to retrieve the sequences which are still in the queue when the
    # dataset is exhausted, we feed dummy sequences which are guaranteed to
    # displace the remaining elements.
    dataset = dataset.concatenate(
        tf.data.Dataset.range(self._queue_size).map(self._eviction_fn))

    initial_state = self._scan_initial_state()
    step_fn = functools.partial(
        tf.autograph.to_graph(_scan_step_fn), packed_length=self._packed_length,
        queue_size=self._queue_size, spacing=self._spacing,
        num_sequences=self._num_sequences, token_dtype=self._token_dtype)

    dataset = dataset.apply(tf.data.experimental.scan(initial_state, step_fn))

    is_real_sample = lambda valid_sample, _: valid_sample
    return dataset.filter(is_real_sample) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:generator_utils.py

示例8: _merge_decode_results

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def _merge_decode_results(self, decode_results):
    """Merge across time."""
    assert decode_results
    time_axis = 1
    zipped_results = lstm_utils.LstmDecodeResults(*list(zip(*decode_results)))
    if zipped_results.rnn_output[0] is None:
      rnn_output = None
      rnn_input = None
    else:
      rnn_output = tf.concat(zipped_results.rnn_output, axis=time_axis)
      rnn_input = tf.concat(zipped_results.rnn_input, axis=time_axis)
    return lstm_utils.LstmDecodeResults(
        rnn_output=rnn_output,
        rnn_input=rnn_input,
        samples=tf.concat(zipped_results.samples, axis=time_axis),
        final_state=zipped_results.final_state[-1],
        final_sequence_lengths=tf.stack(
            zipped_results.final_sequence_lengths, axis=time_axis)) 
開發者ID:magenta,項目名稱:magenta,代碼行數:20,代碼來源:lstm_models.py

示例9: resize_and_crop_boxes

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def resize_and_crop_boxes(self):
    """Resize boxes and crop it to the self._output dimension."""
    boxlist = preprocessor.box_list.BoxList(self._boxes)
    boxes = preprocessor.box_list_scale(
        boxlist, self._scaled_height, self._scaled_width).get()
    # Adjust box coordinates based on the offset.
    box_offset = tf.stack([self._crop_offset_y, self._crop_offset_x,
                           self._crop_offset_y, self._crop_offset_x,])
    boxes -= tf.cast(tf.reshape(box_offset, [1, 4]), tf.float32)
    # Clip the boxes.
    boxes = self.clip_boxes(boxes)
    # Filter out ground truth boxes that are all zeros.
    indices = tf.where(tf.not_equal(tf.reduce_sum(boxes, axis=1), 0))
    boxes = tf.gather_nd(boxes, indices)
    classes = tf.gather_nd(self._classes, indices)
    return boxes, classes 
開發者ID:JunweiLiang,項目名稱:Object_Detection_Tracking,代碼行數:18,代碼來源:dataloader.py

示例10: _build_train_op

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def _build_train_op(self):
    """Builds a training op.

    Returns:
      train_op: An op performing one step of training from replay data.
    """
    actions = self._replay.actions
    indices = tf.stack([tf.range(actions.shape[0]), actions], axis=-1)
    replay_chosen_q = tf.gather_nd(
        self._replay_net_outputs.q_heads, indices=indices)
    target = tf.stop_gradient(self._build_target_q_op())
    loss = tf.losses.huber_loss(
        target, replay_chosen_q, reduction=tf.losses.Reduction.NONE)
    q_head_losses = tf.reduce_mean(loss, axis=0)
    final_loss = tf.reduce_mean(q_head_losses)
    if self.summary_writer is not None:
      with tf.variable_scope('Losses'):
        tf.summary.scalar('HuberLoss', final_loss)
    return self.optimizer.minimize(final_loss) 
開發者ID:google-research,項目名稱:batch_rl,代碼行數:21,代碼來源:multi_head_dqn_agent.py

示例11: call

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def call(self, state):
    """Creates the output tensor/op given the input state tensor.

    See https://www.tensorflow.org/api_docs/python/tf/keras/Model for more
    information on this. Note that tf.keras.Model implements `call` which is
    wrapped by `__call__` function by tf.keras.Model.

    Args:
      state: Tensor, input tensor.
    Returns:
      collections.namedtuple, output ops (graph mode) or output tensors (eager).
    """
    unordered_q_networks = [
        network(state).q_values for network in self._q_networks]
    unordered_q_networks = tf.stack(unordered_q_networks, axis=-1)
    q_networks, q_values = combine_q_functions(unordered_q_networks,
                                               self._transform_strategy,
                                               **self._kwargs)
    return MultiNetworkNetworkType(q_networks, unordered_q_networks, q_values) 
開發者ID:google-research,項目名稱:batch_rl,代碼行數:21,代碼來源:atari_helpers.py

示例12: apply_piecewise_monotonic_fn

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def apply_piecewise_monotonic_fn(self, wrapper, fn, boundaries, *args):
    valid_values = []
    for a in [self] + list(args):
      vs = []
      vs.append(a.lower)
      vs.append(a.upper)
      for b in boundaries:
        vs.append(
            tf.maximum(a.lower, tf.minimum(a.upper, b * tf.ones_like(a.lower))))
      valid_values.append(vs)
    outputs = []
    for inputs in itertools.product(*valid_values):
      outputs.append(fn(*inputs))
    outputs = tf.stack(outputs, axis=-1)
    return IntervalBounds(tf.reduce_min(outputs, axis=-1),
                          tf.reduce_max(outputs, axis=-1)) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:18,代碼來源:bounds.py

示例13: stack_intra_task_episodes

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def stack_intra_task_episodes(
    in_tensors,
    num_samples_per_task,
):
  """Stacks together tensors from different episodes of the same task.

  Args:
    in_tensors: The input tensors, stored with key names of the form
      "<name>/i", where i is an int in [0, (num_samples_per_task - 1)].
    num_samples_per_task: Number of episodes in the task.

  Returns:
    A structure of tensors that matches out_tensor_spec.
  """
  out_tensors = TSpecStructure()
  # Strip the "/i" postfix from all keys, then get the set of unique keys.
  key_set = set(['/'.join(key.split('/')[:-1]) for key in in_tensors.keys()])
  for key in key_set:
    data = []
    for i in range(num_samples_per_task):
      data.append(in_tensors['{:s}/{:d}'.format(key, i)])
    out_tensors[key] = tf.stack(data, axis=1)
  return out_tensors 
開發者ID:google-research,項目名稱:tensor2robot,代碼行數:25,代碼來源:preprocessors.py

示例14: ApplyPhotometricImageDistortionsCheap

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def ApplyPhotometricImageDistortionsCheap(
    images):
  """Apply photometric distortions to the input images.

  Args:
    images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
      to apply the random photometric distortions to. Assumed to be normalized
      to range (0, 1), float32 encoding.
  Returns:
    images: Tensor of shape [batch_size, h, w, 3] containing a batch of images
      resulting from applying random photometric distortions to the inputs.
  """
  with tf.name_scope('photometric_distortion'):
    channels = tf.unstack(images, axis=-1)
    # Per-channel random gamma correction.
    # Lower gamma = brighter image, decreased contrast.
    # Higher gamma = dark image, increased contrast.
    gamma_corrected = [c**tf.random_uniform([], 0.5, 1.5) for c in channels]
    images = tf.stack(gamma_corrected, axis=-1)
    return images 
開發者ID:google-research,項目名稱:tensor2robot,代碼行數:22,代碼來源:image_transformations.py

示例15: _decode_and_center_crop

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import stack [as 別名]
def _decode_and_center_crop(image_bytes, image_size):
  """Crops to center of image with padding then scales image_size."""
  shape = tf.image.extract_jpeg_shape(image_bytes)
  image_height = shape[0]
  image_width = shape[1]

  padded_center_crop_size = tf.cast(
      ((image_size / (image_size + CROP_PADDING)) *
       tf.cast(tf.minimum(image_height, image_width), tf.float32)),
      tf.int32)

  offset_height = ((image_height - padded_center_crop_size) + 1) // 2
  offset_width = ((image_width - padded_center_crop_size) + 1) // 2
  crop_window = tf.stack([offset_height, offset_width,
                          padded_center_crop_size, padded_center_crop_size])
  image = tf.image.decode_and_crop_jpeg(image_bytes, crop_window, channels=3)
  image = tf.image.resize_bicubic([image], [image_size, image_size])[0]
  return image 
開發者ID:lukemelas,項目名稱:EfficientNet-PyTorch,代碼行數:20,代碼來源:preprocessing.py


注:本文中的tensorflow.compat.v1.stack方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。