本文整理汇总了Python中object_detection.utils.dataset_util.int64_feature方法的典型用法代码示例。如果您正苦于以下问题:Python dataset_util.int64_feature方法的具体用法?Python dataset_util.int64_feature怎么用?Python dataset_util.int64_feature使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类object_detection.utils.dataset_util
的用法示例。
在下文中一共展示了dataset_util.int64_feature方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_tf_record
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def create_tf_record(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
example = tf.train.Example(features=tf.train.Features(feature={
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height': dataset_util.int64_feature(4),
'image/width': dataset_util.int64_feature(5),
'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]),
'image/object/class/label': dataset_util.int64_list_feature([2]),
'image/object/mask': dataset_util.float_list_feature(flat_mask),
}))
writer.write(example.SerializeToString())
writer.close()
return path
示例2: _create_tfexample
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def _create_tfexample(label_map_dict,
image_id, encoded_image, encoded_next_image,
disparity_image, next_disparity_image, flow):
#camera_intrinsics = np.array([982.529, 690.0, 233.1966])
camera_intrinsics = np.array([725.0, 620.5, 187.0], dtype=np.float32)
f, x0, y0 = camera_intrinsics
depth = _depth_from_disparity_image(disparity_image, f)
next_depth = _depth_from_disparity_image(next_disparity_image, f)
key = hashlib.sha256(encoded_image).hexdigest()
example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(image_id.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(image_id.encode('utf8')),
'image/encoded': dataset_util.bytes_feature(encoded_image),
'next_image/encoded': dataset_util.bytes_feature(encoded_next_image),
'image/format': dataset_util.bytes_feature('png'.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(key.encode('utf8')),
'image/depth': dataset_util.float_list_feature(depth.ravel().tolist()),
'next_image/depth': dataset_util.float_list_feature(next_depth.ravel().tolist()),
'image/flow': dataset_util.float_list_feature(example_flow.ravel().tolist()),
'image/camera/intrinsics': dataset_util.float_list_feature(camera_intrinsics.tolist())
}))
return example, num_instances
示例3: testDecodeEmptyPngInstanceMasks
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def testDecodeEmptyPngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
encoded_masks = []
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks),
'image/height':
dataset_util.int64_feature(10),
'image/width':
dataset_util.int64_feature(10),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True, instance_mask_type=input_reader_pb2.PNG_MASKS)
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
with self.test_session() as sess:
tensor_dict = sess.run(tensor_dict)
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
[0, 10, 10])
示例4: testInstancesNotAvailableByDefault
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def testInstancesNotAvailableByDefault(self):
num_instances = 4
image_height = 5
image_width = 3
# Randomly generate image.
image_tensor = np.random.randint(
256, size=(image_height, image_width, 3)).astype(np.uint8)
encoded_jpeg = self._EncodeImage(image_tensor)
# Randomly generate instance segmentation masks.
instance_masks = (
np.random.randint(2, size=(num_instances, image_height,
image_width)).astype(np.float32))
instance_masks_flattened = np.reshape(instance_masks, [-1])
# Randomly generate class labels for each instance.
object_classes = np.random.randint(
100, size=(num_instances)).astype(np.int64)
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'),
'image/height':
dataset_util.int64_feature(image_height),
'image/width':
dataset_util.int64_feature(image_width),
'image/object/mask':
dataset_util.float_list_feature(instance_masks_flattened),
'image/object/class/label':
dataset_util.int64_list_feature(object_classes)
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
tensor_dict = example_decoder.decode(tf.convert_to_tensor(example))
self.assertTrue(
fields.InputDataFields.groundtruth_instance_masks not in tensor_dict)
示例5: create_tf_record
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def create_tf_record(self, has_additional_channels=False, num_examples=1):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
additional_channels_tensor = np.random.randint(
255, size=(4, 5, 1)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
encoded_additional_channels_jpeg = tf.image.encode_jpeg(
tf.constant(additional_channels_tensor)).eval()
for i in range(num_examples):
features = {
'image/source_id': dataset_util.bytes_feature(str(i)),
'image/encoded': dataset_util.bytes_feature(encoded_jpeg),
'image/format': dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height': dataset_util.int64_feature(4),
'image/width': dataset_util.int64_feature(5),
'image/object/bbox/xmin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax': dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin': dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax': dataset_util.float_list_feature([1.0]),
'image/object/class/label': dataset_util.int64_list_feature([2]),
'image/object/mask': dataset_util.float_list_feature(flat_mask),
}
if has_additional_channels:
additional_channels_key = 'image/additional_channels/encoded'
features[additional_channels_key] = dataset_util.bytes_list_feature(
[encoded_additional_channels_jpeg] * 2)
example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(example.SerializeToString())
writer.close()
return path
示例6: create_tf_example
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def create_tf_example(group, path):
with tf.gfile.GFile(os.path.join(path, '{}'.format(group.filename)), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
filename = group.filename.encode('utf8')
image_format = b'jpg'
xmins = []
xmaxs = []
ymins = []
ymaxs = []
classes_text = []
classes = []
for index, row in group.object.iterrows():
xmins.append(row['xmin'] / width)
xmaxs.append(row['xmax'] / width)
ymins.append(row['ymin'] / height)
ymaxs.append(row['ymax'] / height)
classes_text.append(row['class'].encode('utf8'))
classes.append(class_text_to_int(row['class']))
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmins),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmaxs),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymins),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymaxs),
'image/object/class/text': dataset_util.bytes_list_feature(classes_text),
'image/object/class/label': dataset_util.int64_list_feature(classes),
}))
return tf_example
示例7: create_tf_example
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def create_tf_example(img_fname, logo_name, bbox, img_dir, logo_names):
x1, y1, w, h = list(map(int, bbox))
x2, y2 = x1 + w, y1 + h
cls_idx = logo_names[logo_name]
cls_text = logo_name.encode('utf8')
with tf.gfile.GFile(os.path.join(img_dir, img_fname), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
xmin = [x1 / width]
xmax = [x2 / width]
ymin = [y1 / height]
ymax = [y2 / height]
cls_text = [cls_text]
cls_idx = [cls_idx]
filename = img_fname.encode('utf8')
image_format = b'jpg'
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(cls_text),
'image/object/class/label': dataset_util.int64_list_feature(cls_idx),
}))
return tf_example
示例8: create_tf_example
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def create_tf_example(csv, img_dir):
img_fname = csv[0]
x1, y1, x2, y2 = list(map(int, csv[1:-1]))
cls_idx = int(csv[-1])
cls_text = config.CLASS_NAMES[cls_idx].encode('utf8')
with tf.gfile.GFile(os.path.join(img_dir, img_fname), 'rb') as fid:
encoded_jpg = fid.read()
encoded_jpg_io = io.BytesIO(encoded_jpg)
image = Image.open(encoded_jpg_io)
width, height = image.size
xmin = [x1 / width]
xmax = [x2 / width]
ymin = [y1 / height]
ymax = [y2 / height]
cls_text = [cls_text]
cls_idx = [cls_idx]
filename = img_fname.encode('utf8')
image_format = b'jpg'
tf_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(filename),
'image/source_id': dataset_util.bytes_feature(filename),
'image/encoded': dataset_util.bytes_feature(encoded_jpg),
'image/format': dataset_util.bytes_feature(image_format),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(cls_text),
'image/object/class/label': dataset_util.int64_list_feature(cls_idx),
}))
return tf_example
示例9: testDecodeEmptyPngInstanceMasks
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def testDecodeEmptyPngInstanceMasks(self):
image_tensor = np.random.randint(256, size=(10, 10, 3)).astype(np.uint8)
encoded_png, _ = self._create_encoded_and_decoded_data(image_tensor, 'png')
encoded_masks = []
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_png),
'image/format':
dataset_util.bytes_feature(six.b('png')),
'image/object/mask':
dataset_util.bytes_list_feature(encoded_masks),
'image/height':
dataset_util.int64_feature(10),
'image/width':
dataset_util.int64_feature(10),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder(
load_instance_masks=True,
instance_mask_type=input_reader_pb2.PNG_MASKS)
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertAllEqual(
tensor_dict[fields.InputDataFields.groundtruth_instance_masks].shape,
[0, 10, 10])
示例10: testContextFeaturesNotAvailableByDefault
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def testContextFeaturesNotAvailableByDefault(self):
image_tensor = np.random.randint(256, size=(4, 5, 3)).astype(np.uint8)
encoded_jpeg, _ = self._create_encoded_and_decoded_data(
image_tensor, 'jpeg')
bbox_ymins = [0.0, 4.0]
bbox_xmins = [1.0, 5.0]
bbox_ymaxs = [2.0, 6.0]
bbox_xmaxs = [3.0, 7.0]
num_features = 10
context_feature_length = 10
context_features = np.random.random(num_features*context_feature_length)
def graph_fn():
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature(six.b('jpeg')),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_feature(context_feature_length),
'image/object/bbox/ymin':
dataset_util.float_list_feature(bbox_ymins),
'image/object/bbox/xmin':
dataset_util.float_list_feature(bbox_xmins),
'image/object/bbox/ymax':
dataset_util.float_list_feature(bbox_ymaxs),
'image/object/bbox/xmax':
dataset_util.float_list_feature(bbox_xmaxs),
})).SerializeToString()
example_decoder = tf_example_decoder.TfExampleDecoder()
return example_decoder.decode(tf.convert_to_tensor(example))
tensor_dict = self.execute_cpu(graph_fn, [])
self.assertNotIn(fields.InputDataFields.context_features,
tensor_dict)
示例11: create_tf_record_with_context
# 需要导入模块: from object_detection.utils import dataset_util [as 别名]
# 或者: from object_detection.utils.dataset_util import int64_feature [as 别名]
def create_tf_record_with_context(self):
path = os.path.join(self.get_temp_dir(), 'tfrecord')
writer = tf.python_io.TFRecordWriter(path)
image_tensor = np.random.randint(255, size=(4, 5, 3)).astype(np.uint8)
flat_mask = (4 * 5) * [1.0]
context_features = (10 * 3) * [1.0]
with self.test_session():
encoded_jpeg = tf.image.encode_jpeg(tf.constant(image_tensor)).eval()
example = tf.train.Example(
features=tf.train.Features(
feature={
'image/encoded':
dataset_util.bytes_feature(encoded_jpeg),
'image/format':
dataset_util.bytes_feature('jpeg'.encode('utf8')),
'image/height':
dataset_util.int64_feature(4),
'image/width':
dataset_util.int64_feature(5),
'image/object/bbox/xmin':
dataset_util.float_list_feature([0.0]),
'image/object/bbox/xmax':
dataset_util.float_list_feature([1.0]),
'image/object/bbox/ymin':
dataset_util.float_list_feature([0.0]),
'image/object/bbox/ymax':
dataset_util.float_list_feature([1.0]),
'image/object/class/label':
dataset_util.int64_list_feature([2]),
'image/object/mask':
dataset_util.float_list_feature(flat_mask),
'image/context_features':
dataset_util.float_list_feature(context_features),
'image/context_feature_length':
dataset_util.int64_list_feature([10]),
}))
writer.write(example.SerializeToString())
writer.close()
return path