本文整理匯總了Python中PIL.Image.fromarray方法的典型用法代碼示例。如果您正苦於以下問題:Python Image.fromarray方法的具體用法?Python Image.fromarray怎麽用?Python Image.fromarray使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類PIL.Image
的用法示例。
在下文中一共展示了Image.fromarray方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: draw_bounding_boxes
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def draw_bounding_boxes(image, gt_boxes, im_info):
num_boxes = gt_boxes.shape[0]
gt_boxes_new = gt_boxes.copy()
gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2])
disp_image = Image.fromarray(np.uint8(image[0]))
for i in range(num_boxes):
this_class = int(gt_boxes_new[i, 4])
disp_image = _draw_single_box(disp_image,
gt_boxes_new[i, 0],
gt_boxes_new[i, 1],
gt_boxes_new[i, 2],
gt_boxes_new[i, 3],
'N%02d-C%02d' % (i, this_class),
FONT,
color=STANDARD_COLORS[this_class % NUM_COLORS])
image[0, :] = np.array(disp_image)
return image
開發者ID:Sunarker,項目名稱:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代碼行數:21,代碼來源:visualization.py
示例2: _prepare_sample_data
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def _prepare_sample_data(self, submission_type):
"""Prepares sample data for the submission.
Args:
submission_type: type of the submission.
"""
# write images
images = np.random.randint(0, 256,
size=[BATCH_SIZE, 299, 299, 3], dtype=np.uint8)
for i in range(BATCH_SIZE):
Image.fromarray(images[i, :, :, :]).save(
os.path.join(self._sample_input_dir, IMAGE_NAME_PATTERN.format(i)))
# write target class for targeted attacks
if submission_type == 'targeted_attack':
target_classes = np.random.randint(1, 1001, size=[BATCH_SIZE])
target_class_filename = os.path.join(self._sample_input_dir,
'target_class.csv')
with open(target_class_filename, 'w') as f:
for i in range(BATCH_SIZE):
f.write((IMAGE_NAME_PATTERN + ',{1}\n').format(i, target_classes[i]))
示例3: draw_keypoints_on_image_array
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def draw_keypoints_on_image_array(image,
keypoints,
color='red',
radius=2,
use_normalized_coordinates=True):
"""Draws keypoints on an image (numpy array).
Args:
image: a numpy array with shape [height, width, 3].
keypoints: a numpy array with shape [num_keypoints, 2].
color: color to draw the keypoints with. Default is red.
radius: keypoint radius. Default value is 2.
use_normalized_coordinates: if True (default), treat keypoint values as
relative to the image. Otherwise treat them as absolute.
"""
image_pil = Image.fromarray(np.uint8(image)).convert('RGB')
draw_keypoints_on_image(image_pil, keypoints, color, radius,
use_normalized_coordinates)
np.copyto(image, np.array(image_pil))
示例4: __call__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def __call__(self, video):
"""
Args:
img (numpy array): Input image, shape (... x H x W x C), dtype uint8.
Returns:
PIL Image: Color jittered image.
"""
transforms = self.get_params(self.brightness, self.contrast, self.saturation, self.hue)
reshaped_video = video.reshape((-1, *video.shape[-3:]))
n_channels = video.shape[-1]
for i in range(reshaped_video.shape[0]):
img = reshaped_video[i]
if n_channels == 1:
img = img.squeeze(axis=2)
img = Image.fromarray(img)
for t in transforms:
img = t(img)
img = np.array(img)
if n_channels == 1:
img = img[..., np.newaxis]
reshaped_video[i] = img
video = reshaped_video.reshape(video.shape)
return video
示例5: save_images
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def save_images(images, filenames, output_dir):
"""Saves images to the output directory.
Args:
images: array with minibatch of images
filenames: list of filenames without path
If number of file names in this list less than number of images in
the minibatch then only first len(filenames) images will be saved.
output_dir: directory where to save images
"""
for i, filename in enumerate(filenames):
# Images for inception classifier are normalized to be in [-1, 1] interval,
# so rescale them back to [0, 1].
with tf.gfile.Open(os.path.join(output_dir, filename), 'w') as f:
img = (((images[i, :, :, :] + 1.0) * 0.5) * 255.0).astype(np.uint8)
Image.fromarray(img).save(f, format='PNG')
示例6: Chainer2PIL
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def Chainer2PIL(data, rescale=True):
data = np.array(data)
if rescale:
data *= 256
# data += 128
if data.dtype != np.uint8:
data = np.clip(data, 0, 255)
data = data.astype(np.uint8)
if data.shape[0] == 1:
buf = data.astype(np.uint8).reshape((data.shape[1], data.shape[2]))
else:
buf = np.zeros((data.shape[1], data.shape[2], data.shape[0]), dtype=np.uint8)
for i in range(3):
a = data[i,:,:]
buf[:,:,i] = a
img = Image.fromarray(buf)
return img
示例7: main
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def main():
"""Module main execution"""
# Initialization variables - update to change your model and execution context
model_prefix = "FCN8s_VGG16"
epoch = 19
# By default, MXNet will run on the CPU. Change to ctx = mx.gpu() to run on GPU.
ctx = mx.cpu()
fcnxs, fcnxs_args, fcnxs_auxs = mx.model.load_checkpoint(model_prefix, epoch)
fcnxs_args["data"] = mx.nd.array(get_data(args.input), ctx)
data_shape = fcnxs_args["data"].shape
label_shape = (1, data_shape[2]*data_shape[3])
fcnxs_args["softmax_label"] = mx.nd.empty(label_shape, ctx)
exector = fcnxs.bind(ctx, fcnxs_args, args_grad=None, grad_req="null", aux_states=fcnxs_args)
exector.forward(is_train=False)
output = exector.outputs[0]
out_img = np.uint8(np.squeeze(output.asnumpy().argmax(axis=1)))
out_img = Image.fromarray(out_img)
out_img.putpalette(get_palette())
out_img.save(args.output)
示例8: resolve
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def resolve(ctx):
from PIL import Image
if isinstance(ctx, list):
ctx = [ctx[0]]
net.load_parameters('superres.params', ctx=ctx)
img = Image.open(opt.resolve_img).convert('YCbCr')
y, cb, cr = img.split()
data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
out_img.save('resolved.png')
示例9: main
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def main():
width = 512
height = 384
context = Context()
context.set_ray_type_count(1)
context['result_buffer'] = Buffer.empty((height, width, 4), buffer_type='o', dtype=np.float32, drop_last_dim=True)
ray_gen_program = Program('draw_color.cu', 'draw_solid_color')
ray_gen_program['draw_color'] = np.array([0.462, 0.725, 0.0], dtype=np.float32)
entry_point = EntryPoint(ray_gen_program)
entry_point.launch(size=(width, height))
result_array = context['result_buffer'].to_array()
result_array *= 255
result_image = Image.fromarray(result_array.astype(np.uint8)[:, :, :3])
ImageWindow(result_image)
示例10: addNoiseAndGray
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def addNoiseAndGray(surf):
# https://stackoverflow.com/questions/34673424/how-to-get-numpy-array-of-rgb-colors-from-pygame-surface
imgdata = pygame.surfarray.array3d(surf)
imgdata = imgdata.swapaxes(0, 1)
# print('imgdata shape %s' % imgdata.shape) # shall be IMG_HEIGHT * IMG_WIDTH
imgdata2 = noise_generator('s&p', imgdata)
img2 = Image.fromarray(np.uint8(imgdata2))
# img2.save('/home/zhichyu/Downloads/2sp.jpg')
grayscale2 = ImageOps.grayscale(img2)
# grayscale2.save('/home/zhichyu/Downloads/2bw2.jpg')
# return grayscale2
array = np.asarray(np.uint8(grayscale2))
# print('array.shape %s' % array.shape)
selem = disk(random.randint(0, 1))
eroded = erosion(array, selem)
return eroded
示例11: main
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def main():
# location of depth module, config and parameters
module_fn = 'models/depth.py'
config_fn = 'models/depth.conf'#網絡結構
params_dir = 'weights/depth'#網絡相關參數
# load depth network
machine = net.create_machine(module_fn, config_fn, params_dir)
# demo image
rgb = Image.open('demo_nyud_rgb.jpg')
rgb = rgb.resize((320, 240), Image.BICUBIC)
# build depth inference function and run
rgb_imgs = np.asarray(rgb).reshape((1, 240, 320, 3))
pred_depths = machine.infer_depth(rgb_imgs)
# save prediction
(m, M) = (pred_depths.min(), pred_depths.max())
depth_img_np = (pred_depths[0] - m) / (M - m)
depth_img = Image.fromarray((255*depth_img_np).astype(np.uint8))
depth_img.save('demo_nyud_depth_prediction.png')
示例12: save_image
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def save_image(fn, img, **kwargs):
'''
Save an image img to filename fn in the current output dir.
kwargs the same as for PIL Image.save()
'''
(h, w, c) = img.shape
if not isinstance(img, np.ndarray):
img = np.array(img)
if c == 1:
img = np.concatenate((img,)*3, axis=2)
if img.dtype.kind == 'f':
img = (img * 255).astype('uint8')
elif img.dtype.kind == 'f':
img = img.astype('uint8')
else:
raise ValueError('bad dtype: %s' % img.dtype)
i = Image.fromarray(img)
with open(fn, 'w') as f:
i.save(f, **kwargs)
示例13: __call__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def __call__(self, np_image):
"""
Args:
img (PIL Image): Image to be cropped and resized.
Returns:
PIL Image: Randomly cropped and resized image.
"""
if self.size is None:
size = np_image.shape
else:
size = self.size
image = Image.fromarray(np_image)
i, j, h, w = self.get_params(image, self.scale, self.ratio)
image = resized_crop(image, i, j, h, w, size, self.interpolation)
np_image = np.array(image)
return np_image
示例14: save_annotation
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def save_annotation(label, filename, add_colormap=True):
'''
Saves the given label to image on disk.
Args:
label: The numpy array to be saved. The data will be converted to uint8 and saved as png image.
save_dir: The directory to which the results will be saved.
filename: The image filename.
add_colormap: Add color map to the label or not.
colormap_type: Colormap type for visualization.
'''
# Add colormap for visualizing the prediction.
colored_label = label_to_color_image(label) if add_colormap else label
image = Image.fromarray(colored_label.astype(dtype=np.uint8))
image.save(filename)
示例15: __getitem__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import fromarray [as 別名]
def __getitem__(self, index):
img_path = self.files[self.split][index].rstrip()
lbl_path = os.path.join(self.annotations_base,
img_path.split(os.sep)[-2],
os.path.basename(img_path)[:-15] + 'gtFine_labelIds.png')
_img = Image.open(img_path).convert('RGB')
_tmp = np.array(Image.open(lbl_path), dtype=np.uint8)
_tmp = self.encode_segmap(_tmp)
_target = Image.fromarray(_tmp)
sample = {'image': _img, 'label': _target}
if self.split == 'train':
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample)
elif self.split == 'test':
return self.transform_ts(sample)