當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.ceil方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.ceil方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.ceil方法的具體用法?Python v1.ceil怎麽用?Python v1.ceil使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.ceil方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: predict_target_lengths

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import ceil [as 別名]
def predict_target_lengths(
    encoder_output, inputs_mask, hparams, length_diff=None):
  """Predict target lengths."""
  bound = hparams.lendiff_bound
  inputs_length = tf.cast(tf.reduce_sum(inputs_mask, 1), tf.int32)
  targets_length = inputs_length
  loss = None
  if hparams.predict_target_length:
    encoder_output = gops.reduce_mean_over_l(encoder_output, inputs_mask)
    logits = tf.stop_gradient(encoder_output)
    logits = lenpred_mlp("lenpred", logits, hparams.hidden_size, bound)
    if length_diff is not None:
      labels = tf.maximum(tf.minimum(length_diff, bound), -bound)
      labels = tf.cast(labels + bound, tf.int32)
      labels = tf.stop_gradient(labels)
      loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
          labels=labels, logits=logits)
      loss = tf.reduce_mean(loss)
    diff_pred = tf.argmax(logits, 1)
    diff_pred = tf.cast(diff_pred - bound, tf.int32)
    targets_length = inputs_length + diff_pred
    targets_length = tf.maximum(targets_length, 1)
  divi = 4
  targets_length = tf.ceil(targets_length / divi) * divi
  targets_length = tf.cast(targets_length, tf.int32)
  return targets_length, loss 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:28,代碼來源:transformer_vae_flow_prior_ops.py

示例2: crop_or_pad

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import ceil [as 別名]
def crop_or_pad(waves, length, channels):
  """Crop or pad wave to have shape [N, length, channels].

  Args:
    waves: A 3D `Tensor` of NLC format.
    length: A Python scalar. The output wave size.
    channels: Number of output waves channels.

  Returns:
    A 3D `Tensor` of NLC format with shape [N, length, channels].
  """
  waves = tf.convert_to_tensor(waves)
  batch_size = int(waves.shape[0])
  waves_shape = tf.shape(waves)

  # Force audio length.
  pad = tf.maximum(0, length - waves_shape[1])
  right_pad = tf.to_int32(tf.to_float(pad) / 2.0)
  left_pad = pad - right_pad
  waves = tf.pad(waves, [[0, 0], [left_pad, right_pad], [0, 0]])
  waves = waves[:, :length, :]

  # Force number of channels.
  num_repeats = tf.to_int32(
      tf.ceil(tf.to_float(channels) / tf.to_float(waves_shape[2])))
  waves = tf.tile(waves, [1, 1, num_repeats])[:, :, :channels]

  waves.set_shape([batch_size, length, channels])
  return waves 
開發者ID:magenta,項目名稱:magenta,代碼行數:31,代碼來源:spectral_ops.py

示例3: select_random_chunk

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import ceil [as 別名]
def select_random_chunk(dataset,
                        max_length=gin.REQUIRED,
                        feature_key='targets',
                        **unused_kwargs):
  """Token-preprocessor to extract one span of at most `max_length` tokens.

  If the token sequence is longer than `max_length`, then we return a random
  subsequence.  Otherwise, we return the full sequence.

  This is generally followed by split_tokens.

  Args:
    dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
    max_length: an integer
    feature_key: an string

  Returns:
    a dataset
  """
  def _my_fn(x):
    """Select a random chunk of tokens.

    Args:
      x: a 1d Tensor
    Returns:
      a 1d Tensor
    """
    tokens = x[feature_key]
    n_tokens = tf.size(tokens)
    num_segments = tf.cast(
        tf.ceil(tf.cast(n_tokens, tf.float32)
                / tf.cast(max_length, tf.float32)),
        tf.int32)
    start = max_length * tf.random_uniform(
        [], maxval=num_segments, dtype=tf.int32)
    end = tf.minimum(start + max_length, n_tokens)
    return {feature_key: tokens[start:end]}
  # Filter empty examples.
  dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
  return dataset.map(_my_fn, num_parallel_calls=num_parallel_calls()) 
開發者ID:google-research,項目名稱:text-to-text-transfer-transformer,代碼行數:42,代碼來源:preprocessors.py

示例4: get_per_pixel_weights

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import ceil [as 別名]
def get_per_pixel_weights(self, true_image_shapes, input_height, input_width,
                            stride):
    output_height, output_width = (input_height // stride,
                                   input_width // stride)

    # TODO(vighneshb) Explore whether using floor here is safe.
    output_true_image_shapes = tf.ceil(tf.to_float(true_image_shapes) / stride)
    per_pixel_weights = cnma.get_valid_anchor_weights_in_flattened_image(
        output_true_image_shapes, output_height, output_width)
    per_pixel_weights = tf.expand_dims(per_pixel_weights, 2)
    return per_pixel_weights 
開發者ID:tensorflow,項目名稱:models,代碼行數:13,代碼來源:center_net_meta_arch_tf2_test.py

示例5: _get_crop_border

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import ceil [as 別名]
def _get_crop_border(border, size):
  border = tf.cast(border, tf.float32)
  size = tf.cast(size, tf.float32)

  i = tf.ceil(tf.log(2.0 * border / size) / tf.log(2.0))
  divisor = tf.pow(2.0, i)
  divisor = tf.clip_by_value(divisor, 1, border)
  divisor = tf.cast(divisor, tf.int32)

  return tf.cast(border, tf.int32) // divisor 
開發者ID:tensorflow,項目名稱:models,代碼行數:12,代碼來源:preprocessor.py

示例6: test_forward_ceil

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import ceil [as 別名]
def test_forward_ceil():
    ishape = (1, 3, 10, 10)
    inp_array = np.random.uniform(size=ishape).astype(np.float32)
    with tf.Graph().as_default():
        in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
        tf.ceil(in1)
        compare_tf_with_tvm(inp_array, 'Placeholder:0', 'Ceil:0') 
開發者ID:apache,項目名稱:incubator-tvm,代碼行數:9,代碼來源:test_forward.py

示例7: split_tokens

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import ceil [as 別名]
def split_tokens(dataset,
                 min_tokens_per_segment=None,
                 max_tokens_per_segment=gin.REQUIRED,
                 feature_key='targets',
                 **unused_kwargs):
  """Split examples into multiple examples each.

  The intended use case is to break up long examples for use in unsupervised
  transfer-learning.

  This function is generally preceded by select_random_chunk.

  If min_tokens_per_segment is provided, the segment length is chosen randomly
  per document from a log-uniform distribution.  If min_tokens_per_segment is
  None, then the segment length is max_tokens_per_segment (except for a possibly
  shorter last segment in each document).

  Args:
    dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
    min_tokens_per_segment: an optional integer
    max_tokens_per_segment: an integer, the maximum number of tokens in each
      segment. Only the final segment may be shorter.
    feature_key: a string, the feature to split

  Returns:
    a dataset
  """
  def _split_tokens(x):
    """Split one token sequence into multiple multiple."""
    tokens = x[feature_key]
    n_tokens = tf.size(tokens)
    if min_tokens_per_segment is None:
      length = max_tokens_per_segment
    else:
      # pick a length - log-uniformly distributed
      length = tf.cast(tf.exp(tf.random_uniform(
          [],
          minval=math.log(min_tokens_per_segment),
          maxval=math.log(max_tokens_per_segment))), tf.int32)

    # Pad to a multiple of length, then use tf.reshape to split up the tokens
    # into num_segments segments each of the given length.
    num_segments = tf.cast(
        tf.ceil(tf.cast(n_tokens, tf.float32) / tf.cast(length, tf.float32)),
        tf.int32)
    padding = num_segments * length - tf.size(tokens)
    tokens = tf.pad(tokens, [[0, padding]])
    return tf.reshape(tokens, [-1, length])

  def _strip_padding(x):
    return {feature_key: tf.boolean_mask(x, tf.cast(x, tf.bool))}

  # Filter empty examples.
  dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
  dataset = dataset.map(_split_tokens, num_parallel_calls=num_parallel_calls())
  dataset = dataset.unbatch()
  return dataset.map(
      _strip_padding, num_parallel_calls=tf.data.experimental.AUTOTUNE) 
開發者ID:google-research,項目名稱:text-to-text-transfer-transformer,代碼行數:60,代碼來源:preprocessors.py


注:本文中的tensorflow.compat.v1.ceil方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。