本文整理汇总了Python中tensorflow.compat.v1.ceil方法的典型用法代码示例。如果您正苦于以下问题:Python v1.ceil方法的具体用法?Python v1.ceil怎么用?Python v1.ceil使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.ceil方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: predict_target_lengths
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ceil [as 别名]
def predict_target_lengths(
encoder_output, inputs_mask, hparams, length_diff=None):
"""Predict target lengths."""
bound = hparams.lendiff_bound
inputs_length = tf.cast(tf.reduce_sum(inputs_mask, 1), tf.int32)
targets_length = inputs_length
loss = None
if hparams.predict_target_length:
encoder_output = gops.reduce_mean_over_l(encoder_output, inputs_mask)
logits = tf.stop_gradient(encoder_output)
logits = lenpred_mlp("lenpred", logits, hparams.hidden_size, bound)
if length_diff is not None:
labels = tf.maximum(tf.minimum(length_diff, bound), -bound)
labels = tf.cast(labels + bound, tf.int32)
labels = tf.stop_gradient(labels)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits)
loss = tf.reduce_mean(loss)
diff_pred = tf.argmax(logits, 1)
diff_pred = tf.cast(diff_pred - bound, tf.int32)
targets_length = inputs_length + diff_pred
targets_length = tf.maximum(targets_length, 1)
divi = 4
targets_length = tf.ceil(targets_length / divi) * divi
targets_length = tf.cast(targets_length, tf.int32)
return targets_length, loss
示例2: crop_or_pad
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ceil [as 别名]
def crop_or_pad(waves, length, channels):
"""Crop or pad wave to have shape [N, length, channels].
Args:
waves: A 3D `Tensor` of NLC format.
length: A Python scalar. The output wave size.
channels: Number of output waves channels.
Returns:
A 3D `Tensor` of NLC format with shape [N, length, channels].
"""
waves = tf.convert_to_tensor(waves)
batch_size = int(waves.shape[0])
waves_shape = tf.shape(waves)
# Force audio length.
pad = tf.maximum(0, length - waves_shape[1])
right_pad = tf.to_int32(tf.to_float(pad) / 2.0)
left_pad = pad - right_pad
waves = tf.pad(waves, [[0, 0], [left_pad, right_pad], [0, 0]])
waves = waves[:, :length, :]
# Force number of channels.
num_repeats = tf.to_int32(
tf.ceil(tf.to_float(channels) / tf.to_float(waves_shape[2])))
waves = tf.tile(waves, [1, 1, num_repeats])[:, :, :channels]
waves.set_shape([batch_size, length, channels])
return waves
示例3: select_random_chunk
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ceil [as 别名]
def select_random_chunk(dataset,
max_length=gin.REQUIRED,
feature_key='targets',
**unused_kwargs):
"""Token-preprocessor to extract one span of at most `max_length` tokens.
If the token sequence is longer than `max_length`, then we return a random
subsequence. Otherwise, we return the full sequence.
This is generally followed by split_tokens.
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
max_length: an integer
feature_key: an string
Returns:
a dataset
"""
def _my_fn(x):
"""Select a random chunk of tokens.
Args:
x: a 1d Tensor
Returns:
a 1d Tensor
"""
tokens = x[feature_key]
n_tokens = tf.size(tokens)
num_segments = tf.cast(
tf.ceil(tf.cast(n_tokens, tf.float32)
/ tf.cast(max_length, tf.float32)),
tf.int32)
start = max_length * tf.random_uniform(
[], maxval=num_segments, dtype=tf.int32)
end = tf.minimum(start + max_length, n_tokens)
return {feature_key: tokens[start:end]}
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
return dataset.map(_my_fn, num_parallel_calls=num_parallel_calls())
示例4: get_per_pixel_weights
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ceil [as 别名]
def get_per_pixel_weights(self, true_image_shapes, input_height, input_width,
stride):
output_height, output_width = (input_height // stride,
input_width // stride)
# TODO(vighneshb) Explore whether using floor here is safe.
output_true_image_shapes = tf.ceil(tf.to_float(true_image_shapes) / stride)
per_pixel_weights = cnma.get_valid_anchor_weights_in_flattened_image(
output_true_image_shapes, output_height, output_width)
per_pixel_weights = tf.expand_dims(per_pixel_weights, 2)
return per_pixel_weights
示例5: _get_crop_border
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ceil [as 别名]
def _get_crop_border(border, size):
border = tf.cast(border, tf.float32)
size = tf.cast(size, tf.float32)
i = tf.ceil(tf.log(2.0 * border / size) / tf.log(2.0))
divisor = tf.pow(2.0, i)
divisor = tf.clip_by_value(divisor, 1, border)
divisor = tf.cast(divisor, tf.int32)
return tf.cast(border, tf.int32) // divisor
示例6: test_forward_ceil
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ceil [as 别名]
def test_forward_ceil():
ishape = (1, 3, 10, 10)
inp_array = np.random.uniform(size=ishape).astype(np.float32)
with tf.Graph().as_default():
in1 = tf.placeholder(shape=inp_array.shape, dtype=inp_array.dtype)
tf.ceil(in1)
compare_tf_with_tvm(inp_array, 'Placeholder:0', 'Ceil:0')
示例7: split_tokens
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import ceil [as 别名]
def split_tokens(dataset,
min_tokens_per_segment=None,
max_tokens_per_segment=gin.REQUIRED,
feature_key='targets',
**unused_kwargs):
"""Split examples into multiple examples each.
The intended use case is to break up long examples for use in unsupervised
transfer-learning.
This function is generally preceded by select_random_chunk.
If min_tokens_per_segment is provided, the segment length is chosen randomly
per document from a log-uniform distribution. If min_tokens_per_segment is
None, then the segment length is max_tokens_per_segment (except for a possibly
shorter last segment in each document).
Args:
dataset: a tf.data.Dataset with dictionaries containing the key feature_key.
min_tokens_per_segment: an optional integer
max_tokens_per_segment: an integer, the maximum number of tokens in each
segment. Only the final segment may be shorter.
feature_key: a string, the feature to split
Returns:
a dataset
"""
def _split_tokens(x):
"""Split one token sequence into multiple multiple."""
tokens = x[feature_key]
n_tokens = tf.size(tokens)
if min_tokens_per_segment is None:
length = max_tokens_per_segment
else:
# pick a length - log-uniformly distributed
length = tf.cast(tf.exp(tf.random_uniform(
[],
minval=math.log(min_tokens_per_segment),
maxval=math.log(max_tokens_per_segment))), tf.int32)
# Pad to a multiple of length, then use tf.reshape to split up the tokens
# into num_segments segments each of the given length.
num_segments = tf.cast(
tf.ceil(tf.cast(n_tokens, tf.float32) / tf.cast(length, tf.float32)),
tf.int32)
padding = num_segments * length - tf.size(tokens)
tokens = tf.pad(tokens, [[0, padding]])
return tf.reshape(tokens, [-1, length])
def _strip_padding(x):
return {feature_key: tf.boolean_mask(x, tf.cast(x, tf.bool))}
# Filter empty examples.
dataset = dataset.filter(lambda x: tf.not_equal(tf.size(x[feature_key]), 0))
dataset = dataset.map(_split_tokens, num_parallel_calls=num_parallel_calls())
dataset = dataset.unbatch()
return dataset.map(
_strip_padding, num_parallel_calls=tf.data.experimental.AUTOTUNE)