本文整理匯總了Python中object_detection.core.batcher.BatchQueue方法的典型用法代碼示例。如果您正苦於以下問題:Python batcher.BatchQueue方法的具體用法?Python batcher.BatchQueue怎麽用?Python batcher.BatchQueue使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類object_detection.core.batcher
的用法示例。
在下文中一共展示了batcher.BatchQueue方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _create_losses
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def _create_losses(input_queue, create_model_fn):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
"""
detection_model = create_model_fn()
(images, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list
) = _get_inputs(input_queue, detection_model.num_classes)
images = [detection_model.preprocess(image) for image in images]
images = tf.concat(images, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
detection_model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list)
prediction_dict = detection_model.predict(images)
losses_dict = detection_model.loss(prediction_dict)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
示例2: _create_input_queue
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def _create_input_queue(batch_size_per_clone, create_tensor_dict_fn,
batch_queue_capacity, num_batch_queue_threads,
prefetch_queue_capacity, data_augmentation_options):
"""Sets up reader, prefetcher and returns input queue.
Args:
batch_size_per_clone: batch size to use per clone.
create_tensor_dict_fn: function to create tensor dictionary.
batch_queue_capacity: maximum number of elements to store within a queue.
num_batch_queue_threads: number of threads to use for batching.
prefetch_queue_capacity: maximum capacity of the queue used to prefetch
assembled batches.
data_augmentation_options: a list of tuples, where each tuple contains a
data augmentation function and a dictionary containing arguments and their
values (see preprocessor.py).
Returns:
input queue: a batcher.BatchQueue object holding enqueued tensor_dicts
(which hold images, boxes and targets). To get a batch of tensor_dicts,
call input_queue.Dequeue().
"""
tensor_dict = create_tensor_dict_fn()
tensor_dict[fields.InputDataFields.image] = tf.expand_dims(
tensor_dict[fields.InputDataFields.image], 0)
images = tensor_dict[fields.InputDataFields.image]
float_images = tf.to_float(images)
tensor_dict[fields.InputDataFields.image] = float_images
if data_augmentation_options:
tensor_dict = preprocessor.preprocess(tensor_dict,
data_augmentation_options)
input_queue = batcher.BatchQueue(
tensor_dict,
batch_size=batch_size_per_clone,
batch_queue_capacity=batch_queue_capacity,
num_batch_queue_threads=num_batch_queue_threads,
prefetch_queue_capacity=prefetch_queue_capacity)
return input_queue
示例3: _get_inputs
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def _get_inputs(input_queue, num_classes):
"""Dequeue batch and construct inputs to object detection model.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
num_classes: Number of classes.
Returns:
images: a list of 3-D float tensor of images.
locations_list: a list of tensors of shape [num_boxes, 4]
containing the corners of the groundtruth boxes.
classes_list: a list of padded one-hot tensors containing target classes.
masks_list: a list of 3-D float tensors of shape [num_boxes, image_height,
image_width] containing instance masks for objects if present in the
input_queue. Else returns None.
"""
read_data_list = input_queue.dequeue()
label_id_offset = 1
def extract_images_and_targets(read_data):
image = read_data[fields.InputDataFields.image]
location_gt = read_data[fields.InputDataFields.groundtruth_boxes]
classes_gt = tf.cast(read_data[fields.InputDataFields.groundtruth_classes],
tf.int32)
classes_gt -= label_id_offset
classes_gt = util_ops.padded_one_hot_encoding(indices=classes_gt,
depth=num_classes, left_pad=0)
masks_gt = read_data.get(fields.InputDataFields.groundtruth_instance_masks)
return image, location_gt, classes_gt, masks_gt
return zip(*map(extract_images_and_targets, read_data_list))
示例4: test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_1st_dimension(self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
boxes = tf.tile(
tf.reshape(tf.range(4), [1, 4]), tf.stack([counter, tf.constant(1)]))
batch_queue = batcher.BatchQueue(
tensor_dict={'boxes': boxes},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, 4], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.tile(np.arange(4), (i, 1)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
示例5: test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def test_batch_and_unpad_2d_tensors_of_different_sizes_in_all_dimensions(
self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
image = tf.reshape(
tf.range(counter * counter), tf.stack([counter, counter]))
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, None], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
示例6: test_batcher_when_batch_size_is_one
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def test_batcher_when_batch_size_is_one(self):
with self.test_session() as sess:
batch_size = 1
num_batches = 2
examples = tf.Variable(tf.constant(2, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 2)
image = tf.reshape(
tf.range(counter * counter), tf.stack([counter, counter]))
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([None, None], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 2
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(i * i).reshape((i, i)))
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
示例7: test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def test_batch_and_unpad_2d_tensors_of_same_size_in_all_dimensions(self):
with self.test_session() as sess:
batch_size = 3
num_batches = 2
examples = tf.Variable(tf.constant(1, dtype=tf.int32))
counter = examples.count_up_to(num_batches * batch_size + 1)
image = tf.reshape(tf.range(1, 13), [4, 3]) * counter
batch_queue = batcher.BatchQueue(
tensor_dict={'image': image},
batch_size=batch_size,
batch_queue_capacity=100,
num_batch_queue_threads=1,
prefetch_queue_capacity=100)
batch = batch_queue.dequeue()
for tensor_dict in batch:
for tensor in tensor_dict.values():
self.assertAllEqual([4, 3], tensor.get_shape().as_list())
tf.initialize_all_variables().run()
with slim.queues.QueueRunners(sess):
i = 1
for _ in range(num_batches):
batch_np = sess.run(batch)
for tensor_dict in batch_np:
for tensor in tensor_dict.values():
self.assertAllEqual(tensor, np.arange(1, 13).reshape((4, 3)) * i)
i += 1
with self.assertRaises(tf.errors.OutOfRangeError):
sess.run(batch)
示例8: _create_losses
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list, _) = get_inputs(
input_queue,
detection_model.num_classes,
train_config.merge_multiple_label_boxes)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list,
groundtruth_keypoints_list)
prediction_dict = detection_model.predict(images, true_image_shapes)
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)
示例9: _create_losses
# 需要導入模塊: from object_detection.core import batcher [as 別名]
# 或者: from object_detection.core.batcher import BatchQueue [as 別名]
def _create_losses(input_queue, create_model_fn, train_config):
"""Creates loss function for a DetectionModel.
Args:
input_queue: BatchQueue object holding enqueued tensor_dicts.
create_model_fn: A function to create the DetectionModel.
train_config: a train_pb2.TrainConfig protobuf.
"""
detection_model = create_model_fn()
(images, _, groundtruth_boxes_list, groundtruth_classes_list,
groundtruth_masks_list, groundtruth_keypoints_list, _) = get_inputs(
input_queue,
detection_model.num_classes,
train_config.merge_multiple_label_boxes,
train_config.use_multiclass_scores)
preprocessed_images = []
true_image_shapes = []
for image in images:
resized_image, true_image_shape = detection_model.preprocess(image)
preprocessed_images.append(resized_image)
true_image_shapes.append(true_image_shape)
images = tf.concat(preprocessed_images, 0)
true_image_shapes = tf.concat(true_image_shapes, 0)
if any(mask is None for mask in groundtruth_masks_list):
groundtruth_masks_list = None
if any(keypoints is None for keypoints in groundtruth_keypoints_list):
groundtruth_keypoints_list = None
detection_model.provide_groundtruth(groundtruth_boxes_list,
groundtruth_classes_list,
groundtruth_masks_list,
groundtruth_keypoints_list)
prediction_dict = detection_model.predict(images, true_image_shapes)
losses_dict = detection_model.loss(prediction_dict, true_image_shapes)
for loss_tensor in losses_dict.values():
tf.losses.add_loss(loss_tensor)