本文整理匯總了Python中PIL.Image.BILINEAR屬性的典型用法代碼示例。如果您正苦於以下問題:Python Image.BILINEAR屬性的具體用法?Python Image.BILINEAR怎麽用?Python Image.BILINEAR使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類PIL.Image
的用法示例。
在下文中一共展示了Image.BILINEAR屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __call__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def __call__(self, sample):
img = sample['image']
mask = sample['label']
assert img.size == mask.size
w, h = img.size
# if one side is 512
if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]):
return {'image': img,
'label': mask}
# if both sides is not equal to 512, resize to 512 * 512
oh, ow = self.size
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
return {'image': img,
'label': mask}
示例2: __call__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def __call__(self, sample):
img = sample['image']
mask = sample['label']
w, h = img.size
if w > h:
oh = self.crop_size
ow = int(1.0 * w * oh / h)
else:
ow = self.crop_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - self.crop_size) / 2.))
y1 = int(round((h - self.crop_size) / 2.))
img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
return {'image': img,
'label': mask}
示例3: resized_crop
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
"""Crop the given PIL Image and resize it to desired size.
Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
Args:
img (PIL Image): Image to be cropped.
i (int): i in (i,j) i.e coordinates of the upper left corner
j (int): j in (i,j) i.e coordinates of the upper left corner
h (int): Height of the cropped image.
w (int): Width of the cropped image.
size (sequence or int): Desired output size. Same semantics as ``resize``.
interpolation (int, optional): Desired interpolation. Default is
``PIL.Image.BILINEAR``.
Returns:
PIL Image: Cropped image.
"""
img = crop(img, i, j, h, w)
img = resize(img, size, interpolation)
return img
示例4: _load_img
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def _load_img(self, image_path):
image = Image.open(image_path)
model_input_width = model_utils.ModelData.get_input_width()
model_input_height = model_utils.ModelData.get_input_height()
# Note: Bilinear interpolation used by Pillow is a little bit
# different than the one used by Tensorflow, so if network receives
# an image that is not 300x300, the network output may differ
# from the one output by Tensorflow
image_resized = image.resize(
size=(model_input_width, model_input_height),
resample=Image.BILINEAR
)
img_np = self._load_image_into_numpy_array(image_resized)
# HWC -> CHW
img_np = img_np.transpose((2, 0, 1))
# Normalize to [-1.0, 1.0] interval (expected by model)
img_np = (2.0 / 255.0) * img_np - 1.0
img_np = img_np.ravel()
return img_np
# This class is similar as TRTInference inference, but it manages Tensorflow
示例5: changeScale
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def changeScale(self, img, size, interpolation=Image.BILINEAR):
if not _is_pil_image(img):
raise TypeError(
'img should be PIL Image. Got {}'.format(type(img)))
if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
raise TypeError('Got inappropriate size arg: {}'.format(size))
if isinstance(size, int):
w, h = img.size
if (w <= h and w == size) or (h <= w and h == size):
return img
if w < h:
ow = size
oh = int(size * h / w)
return img.resize((ow, oh), interpolation)
else:
oh = size
ow = int(size * w / h)
return img.resize((ow, oh), interpolation)
else:
return img.resize(size[::-1], interpolation)
示例6: __call__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def __call__(self, img, mask):
if self.padding > 0:
img = ImageOps.expand(img, border=self.padding, fill=0)
mask = ImageOps.expand(mask, border=self.padding, fill=0)
assert img.size == mask.size
w, h = img.size
th, tw = self.size
if w == tw and h == th:
return img, mask
if w < tw or h < th:
return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))
示例7: resize_image
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def resize_image(data, sz=(256, 256)):
"""
Resize image. Please use this resize logic for best results instead of the
caffe, since it was used to generate training dataset
:param str data:
The image data
:param sz tuple:
The resized image dimensions
:returns bytearray:
A byte array with the resized image
"""
img_data = str(data)
im = Image.open(StringIO(img_data))
if im.mode != "RGB":
im = im.convert('RGB')
imr = im.resize(sz, resample=Image.BILINEAR)
fh_im = StringIO()
imr.save(fh_im, format='JPEG')
fh_im.seek(0)
return bytearray(fh_im.read())
示例8: __getitem__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def __getitem__(self, index):
id = self.ids[index]
filename = '{:05d}.png'.format(id)
img_path = os.path.join(self.root, 'images', filename)
label_path = os.path.join(self.root, 'labels', filename)
img = Image.open(img_path).convert('RGB')
target = Image.open(label_path)
img = img.resize(target.size, resample=Image.BILINEAR)
if self.transform is not None:
img = self.transform(img)
if self.remap_labels:
target = np.asarray(target)
target = remap_labels_to_train_ids(target)
#target = self.label2train(target)
target = Image.fromarray(target, 'L')
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
示例9: __call__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def __call__(self, input, target):
# do something to both images and labels
if self.reshape_size is not None:
input = input.resize(self.reshape_size,Image.BILINEAR)
target = target.resize(self.reshape_size,Image.NEAREST)
if self.augment :
input, target = RandomCrop(self.crop_size)(input,target) # RandomCrop for image and label in the same area
input, target = self.flip(input,target) # RandomFlip for both croped image and label
input, target = self.rotate(input,target)
else:
input, target = CenterCrop(self.crop_size)(input, target) # CenterCrop for the validation data
input = ToTensor()(input)
Normalize([.485, .456, .406], [.229, .224, .225])(input) #normalize with the params of imagenet
target = torch.from_numpy(np.array(target)).long().unsqueeze(0)
return input, target
示例10: _val_sync_transform
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = outsize
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize) / 2.))
y1 = int(round((h - outsize) / 2.))
img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
示例11: _val_sync_transform
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = min(outsize)
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize[1]) / 2.))
y1 = int(round((h - outsize[0]) / 2.))
img = img.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))
mask = mask.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
示例12: open_base_img
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def open_base_img(full_profile, res, base_color, color):
# get base image according to profile and perceptual gray of key color
base_num = str([0xE0, 0xB0, 0x80, 0x50, 0x20].index(base_color) + 1)
# open image and convert to Lab
with Image.open('images/{0}_{1}{2}.png'.format(*full_profile, base_num)) as img:
key_img = img.resize((int(s * res / 200) for s in img.size), resample=Image.BILINEAR).convert('RGBA')
if full_profile[1] in ('ISO', 'BIGENTER'): alpha = key_img.split()[-1]
l, a, b = ImageCms.applyTransform(key_img, rgb2lab_transform).split()
# convert key color to Lab
# a and b should be scaled by 128/100, but desaturation looks more natural
rgb_color = color_objects.sRGBColor(*ImageColor.getrgb(color), is_upscaled=True)
lab_color = color_conversions.convert_color(rgb_color, color_objects.LabColor)
l1, a1, b1 = lab_color.get_value_tuple()
l1, a1, b1 = int(l1 * 256 / 100), int(a1 + 128), int(b1 + 128)
# change Lab of base image to match that of key color
l = ImageMath.eval('convert(l + l1 - l_avg, "L")', l=l, l1=l1, l_avg=base_color)
a = ImageMath.eval('convert(a + a1 - a, "L")', a=a, a1=a1)
b = ImageMath.eval('convert(b + b1 - b, "L")', b=b, b1=b1)
key_img = ImageCms.applyTransform(Image.merge('LAB', (l, a, b)), lab2rgb_transform).convert('RGBA')
if full_profile[1] in ('ISO', 'BIGENTER'): key_img.putalpha(alpha)
return key_img
示例13: make_request_json
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def make_request_json(self, uri, output_json):
"""Produces a JSON request suitable to send to CloudML Prediction API.
Args:
uri: The input image URI.
output_json: File handle of the output json where request will be written.
"""
def _open_file_read_binary(uri):
try:
return file_io.FileIO(uri, mode='rb')
except errors.InvalidArgumentError:
return file_io.FileIO(uri, mode='r')
with open(output_json, 'w') as outf:
with _open_file_read_binary(uri) as f:
image_bytes = f.read()
image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
image = image.resize((299, 299), Image.BILINEAR)
resized_image = io.BytesIO()
image.save(resized_image, format='JPEG')
encoded_image = base64.b64encode(resized_image.getvalue())
row = json.dumps({'key': uri, 'image_bytes': {'b64': encoded_image}})
outf.write(row)
outf.write('\n')
示例14: write
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def write(self, buf):
img = Image.frombytes('RGB', (64, 64), buf)
img = img.resize((16, 16), Image.BILINEAR)
for x in range(16):
for y in range(16):
r, g, b = img.getpixel((x, y))
self.hat.set_pixel(x, y, r, g, b)
self.hat.show()
示例15: convert_mat_to_images
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import BILINEAR [as 別名]
def convert_mat_to_images(args):
'''convert the caltech101 mat file to images
Examples
--------
python convert_data.py --dataset /home/ubuntu/datasets/caltech101/data/caltech101_silhouettes_28.mat --save_path /home/ubuntu/datasets/caltech101/data/ --invert --height 32 --width 32
'''
dataset = scipy.io.loadmat("{}/{}".format(args.save_path, args.dataset))
# image pixel data
X = dataset['X']
# image class labels (not used in this project)
Y = dataset['Y']
total_image = X.shape[0]
h=args.height
w=args.width
for i in range(total_image):
img = X[i]
img = np.reshape(img, (28, 28))
if args.invert:
img = (1-img)*255
else:
img = img*255
img = Image.fromarray(img, 'L')
img = img.rotate(-90)
img = img.resize([h, w], Image.BILINEAR)
img.save(args.save_path + '/img' + str(i) + '.png')