本文整理汇总了Python中chainercv.utils.read_image方法的典型用法代码示例。如果您正苦于以下问题:Python utils.read_image方法的具体用法?Python utils.read_image怎么用?Python utils.read_image使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainercv.utils
的用法示例。
在下文中一共展示了utils.read_image方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: export_onnx
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def export_onnx(input_image_path, output_path, gpu, only_output=True):
"""Export ResNet50 model to ONNX graph
'model.onnx' file will be exported under ``output_path``.
"""
model = C.ResNet50(pretrained_model='imagenet', arch='fb')
input_image = read_image(input_image_path)
input_image = scale(input_image, 256)
input_image = center_crop(input_image, (224, 224))
input_image -= model.mean
input_image = input_image[None, :]
if gpu >= 0:
model.to_gpu()
input_image = chainer.cuda.to_gpu(input_image)
if only_output:
os.makedirs(output_path, exist_ok=True)
name = os.path.join(output_path, 'model.onnx')
export(model, input_image, filename=name)
else:
# an input and output given by Chainer will be also emitted
# for using as test dataset
export_testcase(model, input_image, output_path)
示例2: main
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--pretrained-model')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('image')
args = parser.parse_args()
img = read_image(args.image)
model = SSPYOLOv2()
chainer.serializers.load_npz(args.pretrained_model, model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
points, labels, scores = model.predict([img])
point = points[0]
label = labels[0]
score = scores[0]
vis_point(img, point[:1])
plt.show()
示例3: _get_example
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def _get_example(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
img = read_image(img_path)
anno_path = img_path.replace(
'images', 'labels').replace(
'JPEGImages', 'labels').replace(
'.jpg', '.txt').replace('.png', '.txt')
anno = np.zeros(50*21)
if os.path.getsize(anno_path):
_, H, W = img.shape
tmp = read_truths_args(anno_path, 8.0/W)
size = tmp.size
if size > 50*21:
anno = tmp[0:50*21]
elif size > 0:
anno[0:size] = tmp
anno = anno.reshape(-1, 21)
anno = anno[:truths_length(anno)]
point = anno[:, 1:19].reshape(-1, 9, 2).astype(np.float32)
point[:, :, 0] *= W
point[:, :, 1] *= H
label = anno[:, 0].astype(np.int32)
return img, point, label
示例4: get_example
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def get_example(self, i):
img_id = self.ids[i]
img_root = os.path.join(
self.data_dir, 'images', self.img_dirs[img_id])
img_fn = os.path.join(
img_root, self.img_props[img_id]['file_name'])
img = utils.read_image(img_fn, dtype=np.float32, color=True)
_, H, W = img.shape
bbox, whole_mask, label, crowded, area = self._get_annotations(i)
if not self.use_crowded:
bbox = bbox[np.logical_not(crowded)]
label = label[np.logical_not(crowded)]
whole_mask = whole_mask[np.logical_not(crowded)]
area = area[np.logical_not(crowded)]
crowded = crowded[np.logical_not(crowded)]
example = [img, bbox, whole_mask, label]
if self.return_crowded:
example += [crowded]
if self.return_area:
example += [area]
return tuple(example)
示例5: _get_image
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def _get_image(self, i):
image_file_name = self.image_file_names[i]
image_file_path = os.path.join(self.images_dir_path, image_file_name)
image = read_image(image_file_path, color=True)
if self._transform is not None:
image = self._transform(image)
return image
示例6: export_onnx
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def export_onnx(input_image_path, output_path, gpu, only_output=True):
"""Export YOLOv2 Tiny model to ONNX graph
'model.onnx' file will be exported under ``output_path``.
"""
model = YOLOv2Tiny(pretrained_model='voc0712')
input_image = read_image(input_image_path)
input_image = input_image[None, :]
if gpu >= 0:
model.to_gpu()
input_image = chainer.cuda.to_gpu(input_image)
if only_output:
os.makedirs(output_path, exist_ok=True)
name = os.path.join(output_path, 'model.onnx')
export(
model, input_image, filename=name,
output_names=('locs', 'objs', 'confs'))
else:
# an input and output given by Chainer will be also emitted
# for using as test dataset
export_testcase(
model, input_image, output_path,
output_names=('locs', 'objs', 'confs'))
示例7: _get_msk
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def _get_msk(self, i):
img_path = os.path.join(self.base_dir, self.img_paths[i].rstrip())
mskpath = img_path.replace('JPEGImages', 'mask').replace(
'/00', '/').replace('.jpg', '.png')
msk = read_image(mskpath, color=False)[0]
return msk > 0
示例8: _get_img
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def _get_img(self, i):
video_id, frame_id = self._get_ids(i)
anno = self.annos[self.video_names[video_id]]
img_path = os.path.join(self.img_dir, anno['img_names'][frame_id])
return read_image(img_path)
示例9: main
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('image')
args = parser.parse_args()
model = ResNet50(
pretrained_model=args.pretrained_model,
n_class=len(voc_bbox_label_names))
model.pick = 'fc6'
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
predict_func = PredictFunc(model, thresh=0.5)
labels, scores = predict_func([img])
label = labels[0]
score = scores[0]
print('predicted labels')
for lb, sc in zip(label, score):
print('names={} score={:.4f}'.format(
voc_bbox_label_names[lb], sc))
vis_image(img)
plt.show()
示例10: main
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('--input-size', type=int, default=448)
args = parser.parse_args()
label_names = voc_semantic_segmentation_label_names
colors = voc_semantic_segmentation_label_colors
n_class = len(label_names)
input_size = (args.input_size, args.input_size)
model = get_pspnet_resnet50(n_class)
chainer.serializers.load_npz(args.pretrained_model, model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu(args.gpu)
dataset = get_sbd_augmented_voc()
for i in range(1, 100):
img = dataset[i][0]
# img = read_image(args.image)
labels = model.predict([img])
label = labels[0]
from chainercv.utils import write_image
write_image(
label[None], '{}.png'.format(i))
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
vis_image(img, ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
ax2, legend_handles = vis_semantic_segmentation(
img, label, label_names, colors, ax=ax2)
ax2.legend(handles=legend_handles, bbox_to_anchor=(1, 1), loc=2)
plt.show()
示例11: _get_image
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def _get_image(self, i):
img_path = os.path.join(
self.data_dir, 'JPEGImages', self.ids[i] + '.jpg')
img = read_image(img_path, color=True)
return img
示例12: _get_label
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def _get_label(self, i):
label_path = os.path.join(
self.data_dir, 'SegmentationClass', self.ids[i] + '.png')
label = read_image(label_path, dtype=np.int32, color=False)
label[label == 255] = -1
# (1, H, W) -> (H, W)
return label[0]
示例13: main
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('voc',), default='voc')
parser.add_argument('image')
args = parser.parse_args()
if args.model == 'ssd300':
cls = SSD300
elif args.model == 'ssd512':
cls = SSD512
if args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc0712'
label_names = voc_bbox_label_names
model = cls(n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
bboxes, labels, scores = model.predict([img])
bbox, label, score = bboxes[0], labels[0], scores[0]
vis_bbox(
img, bbox, label, score, label_names=label_names)
plt.show()
示例14: main
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def main():
chainer.config.train = False
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument('--dataset', choices=('camvid',), default='camvid')
parser.add_argument('image')
args = parser.parse_args()
if args.dataset == 'camvid':
if args.pretrained_model is None:
args.pretrained_model = 'camvid'
label_names = camvid_label_names
colors = camvid_label_colors
model = SegNetBasic(
n_class=len(label_names),
pretrained_model=args.pretrained_model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
labels = model.predict([img])
label = labels[0]
fig = plt.figure()
ax1 = fig.add_subplot(1, 2, 1)
vis_image(img, ax=ax1)
ax2 = fig.add_subplot(1, 2, 2)
# Do not overlay the label image on the color image
vis_semantic_segmentation(None, label, label_names, colors, ax=ax2)
plt.show()
示例15: main
# 需要导入模块: from chainercv import utils [as 别名]
# 或者: from chainercv.utils import read_image [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('yolo_v2', 'yolo_v2_tiny', 'yolo_v3'),
default='yolo_v2')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--pretrained-model')
parser.add_argument(
'--dataset', choices=('voc',), default='voc')
parser.add_argument('image')
args = parser.parse_args()
if args.model == 'yolo_v2':
cls = YOLOv2
elif args.model == 'yolo_v2_tiny':
cls = YOLOv2Tiny
elif args.model == 'yolo_v3':
cls = YOLOv3
if args.dataset == 'voc':
if args.pretrained_model is None:
args.pretrained_model = 'voc0712'
label_names = voc_bbox_label_names
model = cls(n_fg_class=len(label_names),
pretrained_model=args.pretrained_model)
if args.gpu >= 0:
chainer.cuda.get_device_from_id(args.gpu).use()
model.to_gpu()
img = utils.read_image(args.image, color=True)
bboxes, labels, scores = model.predict([img])
bbox, label, score = bboxes[0], labels[0], scores[0]
vis_bbox(
img, bbox, label, score, label_names=label_names)
plt.show()