本文整理匯總了Python中object_detection.utils.shape_utils.pad_or_clip_nd方法的典型用法代碼示例。如果您正苦於以下問題:Python shape_utils.pad_or_clip_nd方法的具體用法?Python shape_utils.pad_or_clip_nd怎麽用?Python shape_utils.pad_or_clip_nd使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類object_detection.utils.shape_utils
的用法示例。
在下文中一共展示了shape_utils.pad_or_clip_nd方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_pad_or_clip_nd_tensor
# 需要導入模塊: from object_detection.utils import shape_utils [as 別名]
# 或者: from object_detection.utils.shape_utils import pad_or_clip_nd [as 別名]
def test_pad_or_clip_nd_tensor(self):
tensor_placeholder = tf.placeholder(tf.float32, [None, 5, 4, 7])
output_tensor = shape_utils.pad_or_clip_nd(
tensor_placeholder, [None, 3, 5, tf.constant(6)])
self.assertAllEqual(output_tensor.shape.as_list(), [None, 3, 5, None])
with self.test_session() as sess:
output_tensor_np = sess.run(
output_tensor,
feed_dict={
tensor_placeholder: np.random.rand(2, 5, 4, 7),
})
self.assertAllEqual(output_tensor_np.shape, [2, 3, 5, 6])
示例2: _dense_pose_part_indices
# 需要導入模塊: from object_detection.utils import shape_utils [as 別名]
# 或者: from object_detection.utils.shape_utils import pad_or_clip_nd [as 別名]
def _dense_pose_part_indices(self, keys_to_tensors):
"""Creates a tensor that contains part indices for each DensePose point.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 2-D int32 tensor of shape [num_instances, num_points] where each element
contains the DensePose part index (0-23). The value `num_points`
corresponds to the maximum number of sampled points across all instances
in the image. Note that instances with less sampled points will be padded
with zeros in the last dimension.
"""
num_points_per_instances = keys_to_tensors['image/object/densepose/num']
part_index = keys_to_tensors['image/object/densepose/part_index']
if isinstance(num_points_per_instances, tf.SparseTensor):
num_points_per_instances = tf.sparse_tensor_to_dense(
num_points_per_instances)
if isinstance(part_index, tf.SparseTensor):
part_index = tf.sparse_tensor_to_dense(part_index)
part_index = tf.cast(part_index, dtype=tf.int32)
max_points_per_instance = tf.cast(
tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)
num_points_cumulative = tf.concat([
[0], tf.math.cumsum(num_points_per_instances)], axis=0)
def pad_parts_tensor(instance_ind):
points_range_start = num_points_cumulative[instance_ind]
points_range_end = num_points_cumulative[instance_ind + 1]
part_inds = part_index[points_range_start:points_range_end]
return shape_utils.pad_or_clip_nd(part_inds,
output_shape=[max_points_per_instance])
return tf.map_fn(pad_parts_tensor,
tf.range(tf.size(num_points_per_instances)),
dtype=tf.int32)
示例3: test_pad_or_clip_nd_tensor
# 需要導入模塊: from object_detection.utils import shape_utils [as 別名]
# 或者: from object_detection.utils.shape_utils import pad_or_clip_nd [as 別名]
def test_pad_or_clip_nd_tensor(self):
def graph_fn(input_tensor):
output_tensor = shape_utils.pad_or_clip_nd(
input_tensor, [None, 3, 5, tf.constant(6)])
return output_tensor
for n in [2, 3, 4, 5]:
input_np = np.zeros((n, 5, 4, 7))
output_tensor_np = self.execute(graph_fn, [input_np])
self.assertAllEqual(output_tensor_np.shape[1:], [3, 5, 6])
示例4: _dense_pose_surface_coordinates
# 需要導入模塊: from object_detection.utils import shape_utils [as 別名]
# 或者: from object_detection.utils.shape_utils import pad_or_clip_nd [as 別名]
def _dense_pose_surface_coordinates(self, keys_to_tensors):
"""Creates a tensor that contains surface coords for each DensePose point.
Args:
keys_to_tensors: a dictionary from keys to tensors.
Returns:
A 3-D float32 tensor of shape [num_instances, num_points, 4] where each
point contains (y, x, v, u) data for each sampled DensePose point. The
(y, x) coordinate has normalized image locations for the point, and (v, u)
contains the surface coordinate (also normalized) for the part. The value
`num_points` corresponds to the maximum number of sampled points across
all instances in the image. Note that instances with less sampled points
will be padded with zeros in dim=1.
"""
num_points_per_instances = keys_to_tensors['image/object/densepose/num']
dp_y = keys_to_tensors['image/object/densepose/y']
dp_x = keys_to_tensors['image/object/densepose/x']
dp_v = keys_to_tensors['image/object/densepose/v']
dp_u = keys_to_tensors['image/object/densepose/u']
if isinstance(num_points_per_instances, tf.SparseTensor):
num_points_per_instances = tf.sparse_tensor_to_dense(
num_points_per_instances)
if isinstance(dp_y, tf.SparseTensor):
dp_y = tf.sparse_tensor_to_dense(dp_y)
if isinstance(dp_x, tf.SparseTensor):
dp_x = tf.sparse_tensor_to_dense(dp_x)
if isinstance(dp_v, tf.SparseTensor):
dp_v = tf.sparse_tensor_to_dense(dp_v)
if isinstance(dp_u, tf.SparseTensor):
dp_u = tf.sparse_tensor_to_dense(dp_u)
max_points_per_instance = tf.cast(
tf.math.reduce_max(num_points_per_instances), dtype=tf.int32)
num_points_cumulative = tf.concat([
[0], tf.math.cumsum(num_points_per_instances)], axis=0)
def pad_surface_coordinates_tensor(instance_ind):
"""Pads DensePose surface coordinates for each instance."""
points_range_start = num_points_cumulative[instance_ind]
points_range_end = num_points_cumulative[instance_ind + 1]
y = dp_y[points_range_start:points_range_end]
x = dp_x[points_range_start:points_range_end]
v = dp_v[points_range_start:points_range_end]
u = dp_u[points_range_start:points_range_end]
# Create [num_points_i, 4] tensor, where num_points_i is the number of
# sampled points for instance i.
unpadded_tensor = tf.stack([y, x, v, u], axis=1)
return shape_utils.pad_or_clip_nd(
unpadded_tensor, output_shape=[max_points_per_instance, 4])
return tf.map_fn(pad_surface_coordinates_tensor,
tf.range(tf.size(num_points_per_instances)),
dtype=tf.float32)