當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.expand_dims方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.expand_dims方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.expand_dims方法的具體用法?Python v1.expand_dims怎麽用?Python v1.expand_dims使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.expand_dims方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _distort_image

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def _distort_image(self, image):
    """Distort one image for training a network.

    Adopted the standard data augmentation scheme that is widely used for
    this dataset: the images are first zero-padded with 4 pixels on each side,
    then randomly cropped to again produce distorted images; half of the images
    are then horizontally mirrored.

    Args:
      image: input image.
    Returns:
      distorted image.
    """
    image = tf.image.resize_image_with_crop_or_pad(
        image, self.height + 8, self.width + 8)
    distorted_image = tf.random_crop(image,
                                     [self.height, self.width, self.depth])
    # Randomly flip the image horizontally.
    distorted_image = tf.image.random_flip_left_right(distorted_image)
    if self.summary_verbosity >= 3:
      tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
    return distorted_image 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:24,代碼來源:preprocessing.py

示例2: mask_from_lengths

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def mask_from_lengths(lengths, max_length=None, dtype=None, name=None):
  """Convert a length scalar to a vector of binary masks.

  This function will convert a vector of lengths to a matrix of binary masks.
  E.g. [2, 4, 3] will become [[1, 1, 0, 0], [1, 1, 1, 1], [1, 1, 1, 0]]

  Args:
    lengths: a d-dimensional vector of integers corresponding to lengths.
    max_length: an optional (default: None) scalar-like or 0-dimensional tensor
      indicating the maximum length of the masks. If not provided, the maximum
      length will be inferred from the lengths vector.
    dtype: the dtype of the returned mask, if specified. If None, the dtype of
      the lengths will be used.
    name: a name for the operation (optional).

  Returns:
    A d x max_length tensor of binary masks (int32).
  """
  with tf.name_scope(name, 'mask_from_lengths'):
    dtype = lengths.dtype if dtype is None else dtype
    max_length = tf.reduce_max(lengths) if max_length is None else max_length
    indexes = tf.range(max_length, dtype=lengths.dtype)
    mask = tf.less(tf.expand_dims(indexes, 0), tf.expand_dims(lengths, 1))
    cast_mask = tf.cast(mask, dtype)
  return tf.stop_gradient(cast_mask) 
開發者ID:deepmind,項目名稱:lamb,代碼行數:27,代碼來源:utils.py

示例3: simulate

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def simulate(self, action):
    reward, done = self._batch_env.simulate(action)
    with tf.control_dependencies([reward, done]):
      new_observ = tf.expand_dims(self._batch_env.observ, axis=1)

      # If we shouldn't stack, i.e. self.history == 1, then just assign
      # new_observ to self._observ and return from here.
      if self.history == 1:
        with tf.control_dependencies([self._observ.assign(new_observ)]):
          return tf.identity(reward), tf.identity(done)

      # If we should stack, then do the required work.
      old_observ = tf.gather(
          self._observ.read_value(),
          list(range(1, self.history)),
          axis=1)
      with tf.control_dependencies([new_observ, old_observ]):
        with tf.control_dependencies([self._observ.assign(
            tf.concat([old_observ, new_observ], axis=1))]):
          return tf.identity(reward), tf.identity(done) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:tf_atari_wrappers.py

示例4: testPrefixAccuracy

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def testPrefixAccuracy(self):
    vocab_size = 10
    predictions = tf.one_hot(
        tf.constant([[[1], [2], [3], [4], [9], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [9], [7], [8]],
                     [[1], [2], [3], [4], [5], [9], [7], [0]]]),
        vocab_size)
    labels = tf.expand_dims(
        tf.constant([[[1], [2], [3], [4], [5], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [6], [7], [8]],
                     [[1], [2], [3], [4], [5], [6], [7], [0]]]),
        axis=-1)
    expected_accuracy = np.average([4.0 / 8.0,
                                    5.0 / 8.0,
                                    5.0 / 7.0])
    accuracy, _ = metrics.prefix_accuracy(predictions, labels)
    with self.test_session() as session:
      accuracy_value = session.run(accuracy)
      self.assertAlmostEqual(expected_accuracy, accuracy_value) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:21,代碼來源:metrics_test.py

示例5: testSigmoidAccuracyOneHot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def testSigmoidAccuracyOneHot(self):
    logits = np.array([
        [-1., 1.],
        [1., -1.],
        [-1., 1.],
        [1., -1.]
    ])
    labels = np.array([
        [0, 1],
        [1, 0],
        [1, 0],
        [0, 1]
    ])
    logits = np.expand_dims(np.expand_dims(logits, 1), 1)
    labels = np.expand_dims(np.expand_dims(labels, 1), 1)

    with self.test_session() as session:
      score, _ = metrics.sigmoid_accuracy_one_hot(logits, labels)
      session.run(tf.global_variables_initializer())
      session.run(tf.local_variables_initializer())
      s = session.run(score)
    self.assertEqual(s, 0.5) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:metrics_test.py

示例6: testSigmoidPrecisionOneHot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def testSigmoidPrecisionOneHot(self):
    logits = np.array([
        [-1., 1.],
        [1., -1.],
        [1., -1.],
        [1., -1.]
    ])
    labels = np.array([
        [0, 1],
        [0, 1],
        [0, 1],
        [0, 1]
    ])
    logits = np.expand_dims(np.expand_dims(logits, 1), 1)
    labels = np.expand_dims(np.expand_dims(labels, 1), 1)

    with self.test_session() as session:
      score, _ = metrics.sigmoid_precision_one_hot(logits, labels)
      session.run(tf.global_variables_initializer())
      session.run(tf.local_variables_initializer())
      s = session.run(score)
    self.assertEqual(s, 0.25) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:metrics_test.py

示例7: testSigmoidRecallOneHot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def testSigmoidRecallOneHot(self):
    logits = np.array([
        [-1., 1.],
        [1., -1.],
        [1., -1.],
        [1., -1.]
    ])
    labels = np.array([
        [0, 1],
        [0, 1],
        [0, 1],
        [0, 1]
    ])
    logits = np.expand_dims(np.expand_dims(logits, 1), 1)
    labels = np.expand_dims(np.expand_dims(labels, 1), 1)

    with self.test_session() as session:
      score, _ = metrics.sigmoid_recall_one_hot(logits, labels)
      session.run(tf.global_variables_initializer())
      session.run(tf.local_variables_initializer())
      s = session.run(score)
    self.assertEqual(s, 0.25) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:metrics_test.py

示例8: testSigmoidCrossEntropyOneHot

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def testSigmoidCrossEntropyOneHot(self):
    logits = np.array([
        [-1., 1.],
        [1., -1.],
        [1., -1.],
        [1., -1.]
    ])
    labels = np.array([
        [0, 1],
        [1, 0],
        [0, 0],
        [0, 1]
    ])
    logits = np.expand_dims(np.expand_dims(logits, 1), 1)
    labels = np.expand_dims(np.expand_dims(labels, 1), 1)

    with self.test_session() as session:
      score, _ = metrics.sigmoid_cross_entropy_one_hot(logits, labels)
      session.run(tf.global_variables_initializer())
      session.run(tf.local_variables_initializer())
      s = session.run(score)
    self.assertAlmostEqual(s, 0.688, places=3) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:24,代碼來源:metrics_test.py

示例9: combine

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def combine(self, expert_out, multiply_by_gates=True):
    """Sum together the expert output, weighted by the gates.

    The slice corresponding to a particular batch element `b` is computed
    as the sum over all experts `i` of the expert output, weighted by the
    corresponding gate values.  If `multiply_by_gates` is set to False, the
    gate values are ignored.

    Args:
      expert_out: a list of `num_experts` `Tensor`s, each with shape
        `[expert_batch_size_i, <extra_output_dims>]`.
      multiply_by_gates: a boolean

    Returns:
      a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
    """
    # see comments on convert_gradient_to_tensor
    stitched = common_layers.convert_gradient_to_tensor(
        tf.concat(expert_out, 0))
    if multiply_by_gates:
      stitched *= tf.expand_dims(self._nonzero_gates, 1)
    combined = tf.unsorted_segment_sum(stitched, self._batch_index,
                                       tf.shape(self._gates)[0])
    return combined 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:26,代碼來源:expert_utils.py

示例10: standardize_shapes

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def standardize_shapes(features, batch_size=None):
  """Set the right shapes for the features."""
  for fname in ["inputs", "targets"]:
    if fname not in features:
      continue
    f = features[fname]
    while len(f.get_shape()) < 4:
      f = tf.expand_dims(f, axis=-1)
    features[fname] = f

  if batch_size:
    # Ensure batch size is set on all features
    for _, t in six.iteritems(features):
      shape = t.get_shape().as_list()
      shape[0] = batch_size
      t.set_shape(t.get_shape().merge_with(shape))
      # Assert shapes are fully known
      t.get_shape().assert_is_fully_defined()

  return features 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:data_reader.py

示例11: body

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def body(self, features):
    hparams = self._hparams
    inputs = features["inputs"]
    target_space = features["target_space_id"]

    inputs = common_layers.flatten4d3d(inputs)

    (encoder_input, encoder_self_attention_bias, _) = (
        transformer_prepare_encoder(inputs, target_space, hparams))

    encoder_input = tf.nn.dropout(encoder_input,
                                  1.0 - hparams.layer_prepostprocess_dropout)
    encoder_output = transformer_encoder(
        encoder_input,
        encoder_self_attention_bias,
        hparams,
        nonpadding=features_to_nonpadding(features, "inputs"))
    encoder_output = tf.expand_dims(encoder_output, 2)

    return encoder_output 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:transformer.py

示例12: initialize_write_strengths

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def initialize_write_strengths(self, batch_size):
    """Initialize write strengths to write to the first memory address.

    This is exposed as its own function so that it can be overridden to provide
    alternate write adressing schemes.

    Args:
      batch_size: The size of the current batch.

    Returns:
      A tf.float32 tensor of shape [num_write_heads, memory_size, 1] where the
      first element in the second dimension is set to 1.0.
    """
    return tf.expand_dims(
        tf.one_hot([[0] * self._num_write_heads] * batch_size,
                   depth=self._memory_size, dtype=tf.float32), axis=3) 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:18,代碼來源:neural_stack.py

示例13: get_read_mask

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def get_read_mask(self, read_head_index):
    """Creates a mask which allows us to attenuate subsequent read strengths.

    This is exposed as its own function so that it can be overridden to provide
    alternate read adressing schemes.

    Args:
      read_head_index: Identifies which read head we're getting the mask for.

    Returns:
      A tf.float32 tensor of shape [1, 1, memory_size, memory_size]
    """
    if read_head_index == 0:
      return tf.expand_dims(
          common_layers.mask_pos_lt(self._memory_size, self._memory_size),
          axis=0)
    else:
      raise ValueError("Read head index must be 0 for stack.") 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:neural_stack.py

示例14: decode_transformer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def decode_transformer(encoder_output, encoder_decoder_attention_bias, targets,
                       hparams, name):
  """Original Transformer decoder."""
  with tf.variable_scope(name):
    targets = common_layers.flatten4d3d(targets)

    decoder_input, decoder_self_bias = (
        transformer.transformer_prepare_decoder(targets, hparams))

    decoder_input = tf.nn.dropout(decoder_input,
                                  1.0 - hparams.layer_prepostprocess_dropout)

    decoder_output = transformer.transformer_decoder(
        decoder_input, encoder_output, decoder_self_bias,
        encoder_decoder_attention_bias, hparams)
    decoder_output = tf.expand_dims(decoder_output, axis=2)
    decoder_output_shape = common_layers.shape_list(decoder_output)
    decoder_output = tf.reshape(
        decoder_output, [decoder_output_shape[0], -1, 1, hparams.hidden_size])
    # Expand since t2t expects 4d tensors.
    return decoder_output 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:23,代碼來源:transformer_nat.py

示例15: body

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import expand_dims [as 別名]
def body(self, features):
    observations = features["inputs_raw"]
    observations = tf.cast(observations, tf.float32)
    flat_observations = tf.layers.flatten(observations)
    with tf.variable_scope("policy"):
      x = flat_observations
      for size in self.hparams.policy_layers:
        x = tf.layers.dense(x, size, activation=tf.nn.relu)
      logits = tf.layers.dense(x, self.hparams.problem.num_actions)
      logits = tf.expand_dims(logits, axis=1)
    with tf.variable_scope("value"):
      x = flat_observations
      for size in self.hparams.value_layers:
        x = tf.layers.dense(x, size, activation=tf.nn.relu)
      value = tf.layers.dense(x, 1)
    logits = clip_logits(logits, self.hparams)
    return {"target_policy": logits, "target_value": value} 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:19,代碼來源:rl.py


注:本文中的tensorflow.compat.v1.expand_dims方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。