本文整理汇总了Python中tensorflow.compat.v1.floordiv方法的典型用法代码示例。如果您正苦于以下问题:Python v1.floordiv方法的具体用法?Python v1.floordiv怎么用?Python v1.floordiv使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.compat.v1
的用法示例。
在下文中一共展示了v1.floordiv方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: int_to_bit
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import floordiv [as 别名]
def int_to_bit(self, x_int, num_bits, base=2):
"""Turn x_int representing numbers into a bitwise (lower-endian) tensor.
Args:
x_int: Tensor containing integer to be converted into base
notation.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Corresponding number expressed in base.
"""
x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))
# pylint: disable=g-complex-comprehension
x_labels = [
tf.floormod(
tf.floordiv(tf.to_int32(x_l),
tf.to_int32(base)**i), tf.to_int32(base))
for i in range(num_bits)]
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res)
示例2: int_to_bit
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import floordiv [as 别名]
def int_to_bit(x_int, num_bits, base=2):
"""Turn x_int representing numbers into a bitwise (lower-endian) tensor.
Args:
x_int: Tensor containing integer to be converted into base notation.
num_bits: Number of bits in the representation.
base: Base of the representation.
Returns:
Corresponding number expressed in base.
"""
x_l = tf.to_int32(tf.expand_dims(x_int, axis=-1))
x_labels = [tf.floormod(
tf.floordiv(tf.to_int32(x_l), tf.to_int32(base)**i), tf.to_int32(base))
for i in range(num_bits)]
res = tf.concat(x_labels, axis=-1)
return tf.to_float(res)
示例3: loss_function
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import floordiv [as 别名]
def loss_function(self, inputs, build_network_result):
"""Computes the ctc loss for the current batch of predictions.
Args:
inputs: the input list of the model.
build_network_result: a BuildNetworkResult returned by build_network().
Returns:
The loss tensor of the model.
"""
logits = build_network_result.logits
actual_time_steps = inputs[2]
probs = tf.nn.softmax(logits)
ctc_time_steps = tf.shape(probs)[1]
ctc_input_length = tf.to_float(
tf.multiply(actual_time_steps, ctc_time_steps))
ctc_input_length = tf.to_int32(
tf.floordiv(ctc_input_length, tf.to_float(self.max_time_steps)))
label_length = inputs[3]
label_length = tf.to_int32(tf.squeeze(label_length))
ctc_input_length = tf.to_int32(tf.squeeze(ctc_input_length))
labels = inputs[1]
sparse_labels = tf.to_int32(
tf.keras.backend.ctc_label_dense_to_sparse(labels, label_length))
y_pred = tf.log(
tf.transpose(probs, perm=[1, 0, 2]) + tf.keras.backend.epsilon())
losses = tf.expand_dims(
tf.nn.ctc_loss(
labels=sparse_labels,
inputs=y_pred,
sequence_length=ctc_input_length,
ignore_longer_outputs_than_inputs=True),
axis=1)
loss = tf.reduce_mean(losses)
return loss
示例4: aggregate_sparse_indices
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import floordiv [as 别名]
def aggregate_sparse_indices(indices, values, shape, agg_fn="sum"):
"""Sums values corresponding to repeated indices.
Returns the unique indices and their summed values.
Args:
indices: [num_nnz, rank] Tensor.
values: [num_nnz] Tensor.
shape: [rank] Tensor.
agg_fn: Method to use for aggregation - `sum` or `max`.
Returns:
indices: [num_uniq, rank] Tensor.
values: [num_uniq] Tensor.
"""
# Linearize the indices.
scaling_vec = tf.cumprod(tf.cast(shape, indices.dtype), exclusive=True)
linearized = tf.linalg.matvec(indices, scaling_vec)
# Get the unique indices, and their positions in the array
y, idx = tf.unique(linearized)
# Use the positions of the unique values as the segment ids to
# get the unique values
idx.set_shape([None])
if agg_fn == "sum":
values = tf.unsorted_segment_sum(values, idx, tf.shape(y)[0])
elif agg_fn == "max":
values = tf.unsorted_segment_max(values, idx, tf.shape(y)[0])
# Go back to ND indices
y = tf.expand_dims(y, 1)
indices = tf.floormod(
tf.floordiv(y, tf.expand_dims(scaling_vec, 0)),
tf.cast(tf.expand_dims(shape, 0), indices.dtype))
return indices, values
示例5: fpn_feature_levels
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import floordiv [as 别名]
def fpn_feature_levels(num_levels, unit_scale_index, image_ratio, boxes):
"""Returns fpn feature level for each box based on its area.
See section 4.2 of https://arxiv.org/pdf/1612.03144.pdf for details.
Args:
num_levels: An integer indicating the number of feature levels to crop boxes
from.
unit_scale_index: An 0-based integer indicating the index of feature map
which most closely matches the resolution of the pretrained model.
image_ratio: A float indicating the ratio of input image area to pretraining
image area.
boxes: A float tensor of shape [batch, num_boxes, 4] containing boxes of the
form [ymin, xmin, ymax, xmax] in normalized coordinates.
Returns:
An int32 tensor of shape [batch_size, num_boxes] containing feature indices.
"""
assert num_levels > 0, (
'`num_levels` must be > 0. Found {}'.format(num_levels))
assert unit_scale_index < num_levels and unit_scale_index >= 0, (
'`unit_scale_index` must be in [0, {}). Found {}.'.format(
num_levels, unit_scale_index))
box_height_width = boxes[:, :, 2:4] - boxes[:, :, 0:2]
areas_sqrt = tf.sqrt(tf.reduce_prod(box_height_width, axis=2))
log_2 = tf.cast(tf.log(2.0), dtype=boxes.dtype)
levels = tf.cast(
tf.floordiv(tf.log(areas_sqrt * image_ratio), log_2)
+
unit_scale_index,
dtype=tf.int32)
levels = tf.maximum(0, tf.minimum(num_levels - 1, levels))
return levels
示例6: compute_progress
# 需要导入模块: from tensorflow.compat import v1 [as 别名]
# 或者: from tensorflow.compat.v1 import floordiv [as 别名]
def compute_progress(current_image_id, stable_stage_num_images,
transition_stage_num_images, num_blocks):
"""Computes the training progress.
The training alternates between stable phase and transition phase.
The `progress` indicates the training progress, i.e. the training is at
- a stable phase p if progress = p
- a transition stage between p and p + 1 if progress = p + fraction
where p = 0,1,2.,...
Note the max value of progress is `num_blocks` - 1.
In terms of LOD (of the original implementation):
progress = `num_blocks` - 1 - LOD
Args:
current_image_id: An scalar integer `Tensor` of the current image id, count
from 0.
stable_stage_num_images: An integer representing the number of images in
each stable stage.
transition_stage_num_images: An integer representing the number of images in
each transition stage.
num_blocks: Number of network blocks.
Returns:
A scalar float `Tensor` of the training progress.
"""
# Note when current_image_id >= min_total_num_images - 1 (which means we
# are already at the highest resolution), we want to keep progress constant.
# Therefore, cap current_image_id here.
capped_current_image_id = tf.minimum(
current_image_id,
min_total_num_images(stable_stage_num_images, transition_stage_num_images,
num_blocks) - 1)
stage_num_images = stable_stage_num_images + transition_stage_num_images
progress_integer = tf.floordiv(capped_current_image_id, stage_num_images)
progress_fraction = tf.maximum(
0.0,
tf.to_float(
tf.mod(capped_current_image_id, stage_num_images) -
stable_stage_num_images) / tf.to_float(transition_stage_num_images))
return tf.to_float(progress_integer) + progress_fraction