当前位置: 首页>>代码示例>>Python>>正文


Python Image.BILINEAR属性代码示例

本文整理汇总了Python中PIL.Image.BILINEAR属性的典型用法代码示例。如果您正苦于以下问题:Python Image.BILINEAR属性的具体用法?Python Image.BILINEAR怎么用?Python Image.BILINEAR使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在PIL.Image的用法示例。


在下文中一共展示了Image.BILINEAR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __call__

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        assert img.size == mask.size
        w, h = img.size

        # if one side is 512
        if (w >= h and w == self.size[1]) or (h >= w and h == self.size[0]):
            return {'image': img,
                    'label': mask}
        # if both sides is not equal to 512, resize to 512 * 512
        oh, ow = self.size
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)

        return {'image': img,
                'label': mask} 
开发者ID:songdejia,项目名称:DeepLab_v3_plus,代码行数:19,代码来源:transform.py

示例2: __call__

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def __call__(self, sample):
        img = sample['image']
        mask = sample['label']
        w, h = img.size
        if w > h:
            oh = self.crop_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = self.crop_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - self.crop_size) / 2.))
        y1 = int(round((h - self.crop_size) / 2.))
        img = img.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))
        mask = mask.crop((x1, y1, x1 + self.crop_size, y1 + self.crop_size))

        return {'image': img,
                'label': mask} 
开发者ID:clovaai,项目名称:overhaul-distillation,代码行数:23,代码来源:custom_transforms.py

示例3: resized_crop

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def resized_crop(img, i, j, h, w, size, interpolation=Image.BILINEAR):
    """Crop the given PIL Image and resize it to desired size.
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
    Args:
        img (PIL Image): Image to be cropped.
        i (int): i in (i,j) i.e coordinates of the upper left corner
        j (int): j in (i,j) i.e coordinates of the upper left corner
        h (int): Height of the cropped image.
        w (int): Width of the cropped image.
        size (sequence or int): Desired output size. Same semantics as ``resize``.
        interpolation (int, optional): Desired interpolation. Default is
            ``PIL.Image.BILINEAR``.
    Returns:
        PIL Image: Cropped image.
    """
    img = crop(img, i, j, h, w)
    img = resize(img, size, interpolation)
    return img 
开发者ID:lRomul,项目名称:argus-freesound,代码行数:20,代码来源:random_resized_crop.py

示例4: _load_img

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def _load_img(self, image_path):
        image = Image.open(image_path)
        model_input_width = model_utils.ModelData.get_input_width()
        model_input_height = model_utils.ModelData.get_input_height()
        # Note: Bilinear interpolation used by Pillow is a little bit
        # different than the one used by Tensorflow, so if network receives
        # an image that is not 300x300, the network output may differ
        # from the one output by Tensorflow
        image_resized = image.resize(
            size=(model_input_width, model_input_height),
            resample=Image.BILINEAR
        )
        img_np = self._load_image_into_numpy_array(image_resized)
        # HWC -> CHW
        img_np = img_np.transpose((2, 0, 1))
        # Normalize to [-1.0, 1.0] interval (expected by model)
        img_np = (2.0 / 255.0) * img_np - 1.0
        img_np = img_np.ravel()
        return img_np


# This class is similar as TRTInference inference, but it manages Tensorflow 
开发者ID:aimuch,项目名称:iAI,代码行数:24,代码来源:inference.py

示例5: changeScale

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def changeScale(self, img, size, interpolation=Image.BILINEAR):

        if not _is_pil_image(img):
            raise TypeError(
                'img should be PIL Image. Got {}'.format(type(img)))
        if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
            raise TypeError('Got inappropriate size arg: {}'.format(size))

        if isinstance(size, int):
            w, h = img.size
            if (w <= h and w == size) or (h <= w and h == size):
                return img
            if w < h:
                ow = size
                oh = int(size * h / w)
                return img.resize((ow, oh), interpolation)
            else:
                oh = size
                ow = int(size * w / h)
                return img.resize((ow, oh), interpolation)
        else:
            return img.resize(size[::-1], interpolation) 
开发者ID:JunjH,项目名称:Visualizing-CNNs-for-monocular-depth-estimation,代码行数:24,代码来源:nyu_transform.py

示例6: __call__

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def __call__(self, img, mask):
        if self.padding > 0:
            img = ImageOps.expand(img, border=self.padding, fill=0)
            mask = ImageOps.expand(mask, border=self.padding, fill=0)

        assert img.size == mask.size
        w, h = img.size
        th, tw = self.size
        if w == tw and h == th:
            return img, mask
        if w < tw or h < th:
            return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)

        x1 = random.randint(0, w - tw)
        y1 = random.randint(0, h - th)
        return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)) 
开发者ID:zhechen,项目名称:PLARD,代码行数:18,代码来源:augmentations.py

示例7: resize_image

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def resize_image(data, sz=(256, 256)):
    """
    Resize image. Please use this resize logic for best results instead of the 
    caffe, since it was used to generate training dataset 
    :param str data:
        The image data
    :param sz tuple:
        The resized image dimensions
    :returns bytearray:
        A byte array with the resized image
    """
    img_data = str(data)
    im = Image.open(StringIO(img_data))
    if im.mode != "RGB":
        im = im.convert('RGB')
    imr = im.resize(sz, resample=Image.BILINEAR)
    fh_im = StringIO()
    imr.save(fh_im, format='JPEG')
    fh_im.seek(0)
    return bytearray(fh_im.read()) 
开发者ID:yahoo,项目名称:open_nsfw,代码行数:22,代码来源:classify_nsfw.py

示例8: __getitem__

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def __getitem__(self, index):
        id = self.ids[index]
        filename = '{:05d}.png'.format(id)
        img_path = os.path.join(self.root, 'images', filename)
        label_path = os.path.join(self.root, 'labels', filename)
        img = Image.open(img_path).convert('RGB')
        target = Image.open(label_path)
        img = img.resize(target.size, resample=Image.BILINEAR)
        if self.transform is not None:
            img = self.transform(img)
        if self.remap_labels:
            target = np.asarray(target)
            target = remap_labels_to_train_ids(target)
            #target = self.label2train(target)
            target = Image.fromarray(target, 'L')
        if self.target_transform is not None:
            target = self.target_transform(target)
        return img, target 
开发者ID:jhoffman,项目名称:cycada_release,代码行数:20,代码来源:cyclegta5.py

示例9: __call__

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def __call__(self, input, target):
        # do something to both images and labels
        if self.reshape_size is not None:
            input = input.resize(self.reshape_size,Image.BILINEAR)
            target = target.resize(self.reshape_size,Image.NEAREST)
 
        if self.augment :
            input, target = RandomCrop(self.crop_size)(input,target) # RandomCrop for  image and label in the same area
            input, target = self.flip(input,target)               # RandomFlip for both croped image and label
            input, target = self.rotate(input,target)
        else:
            input, target =  CenterCrop(self.crop_size)(input, target) # CenterCrop for the validation data
            
        input = ToTensor()(input)  
        Normalize([.485, .456, .406], [.229, .224, .225])(input) #normalize with the params of imagenet
          
        target = torch.from_numpy(np.array(target)).long().unsqueeze(0)

        return input, target 
开发者ID:mapleneverfade,项目名称:pytorch-semantic-segmentation,代码行数:21,代码来源:transform.py

示例10: _val_sync_transform

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = outsize
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize) / 2.))
        y1 = int(round((h - outsize) / 2.))
        img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
        mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
开发者ID:AceCoooool,项目名称:LEDNet,代码行数:23,代码来源:base_seg.py

示例11: _val_sync_transform

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def _val_sync_transform(self, img, mask):
        outsize = self.crop_size
        short_size = min(outsize)
        w, h = img.size
        if w > h:
            oh = short_size
            ow = int(1.0 * w * oh / h)
        else:
            ow = short_size
            oh = int(1.0 * h * ow / w)
        img = img.resize((ow, oh), Image.BILINEAR)
        mask = mask.resize((ow, oh), Image.NEAREST)
        # center crop
        w, h = img.size
        x1 = int(round((w - outsize[1]) / 2.))
        y1 = int(round((h - outsize[0]) / 2.))
        img = img.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))
        mask = mask.crop((x1, y1, x1 + outsize[1], y1 + outsize[0]))

        # final transform
        img, mask = self._img_transform(img), self._mask_transform(mask)
        return img, mask 
开发者ID:LikeLy-Journey,项目名称:SegmenTron,代码行数:24,代码来源:seg_data_base.py

示例12: open_base_img

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def open_base_img(full_profile, res, base_color, color):
    # get base image according to profile and perceptual gray of key color
    base_num = str([0xE0, 0xB0, 0x80, 0x50, 0x20].index(base_color) + 1)

    # open image and convert to Lab
    with Image.open('images/{0}_{1}{2}.png'.format(*full_profile, base_num)) as img:
        key_img = img.resize((int(s * res / 200) for s in img.size), resample=Image.BILINEAR).convert('RGBA')
    if full_profile[1] in ('ISO', 'BIGENTER'): alpha = key_img.split()[-1]
    l, a, b = ImageCms.applyTransform(key_img, rgb2lab_transform).split()

    # convert key color to Lab
    # a and b should be scaled by 128/100, but desaturation looks more natural
    rgb_color = color_objects.sRGBColor(*ImageColor.getrgb(color), is_upscaled=True)
    lab_color = color_conversions.convert_color(rgb_color, color_objects.LabColor)
    l1, a1, b1 = lab_color.get_value_tuple()
    l1, a1, b1 = int(l1 * 256 / 100), int(a1 + 128), int(b1 + 128)

    # change Lab of base image to match that of key color
    l = ImageMath.eval('convert(l + l1 - l_avg, "L")', l=l, l1=l1, l_avg=base_color)
    a = ImageMath.eval('convert(a + a1 - a, "L")', a=a, a1=a1)
    b = ImageMath.eval('convert(b + b1 - b, "L")', b=b, b1=b1)

    key_img = ImageCms.applyTransform(Image.merge('LAB', (l, a, b)), lab2rgb_transform).convert('RGBA')
    if full_profile[1] in ('ISO', 'BIGENTER'): key_img.putalpha(alpha)
    return key_img 
开发者ID:CQCumbers,项目名称:kle_render,代码行数:27,代码来源:key.py

示例13: make_request_json

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def make_request_json(self, uri, output_json):
    """Produces a JSON request suitable to send to CloudML Prediction API.

    Args:
      uri: The input image URI.
      output_json: File handle of the output json where request will be written.
    """
    def _open_file_read_binary(uri):
      try:
        return file_io.FileIO(uri, mode='rb')
      except errors.InvalidArgumentError:
        return file_io.FileIO(uri, mode='r')

    with open(output_json, 'w') as outf:
      with _open_file_read_binary(uri) as f:
        image_bytes = f.read()
        image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
        image = image.resize((299, 299), Image.BILINEAR)
        resized_image = io.BytesIO()
        image.save(resized_image, format='JPEG')
        encoded_image = base64.b64encode(resized_image.getvalue())
        row = json.dumps({'key': uri, 'image_bytes': {'b64': encoded_image}})
        outf.write(row)
        outf.write('\n') 
开发者ID:GoogleCloudPlatform,项目名称:cloudml-samples,代码行数:26,代码来源:pipeline.py

示例14: write

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def write(self, buf):
        img = Image.frombytes('RGB', (64, 64), buf)
        img = img.resize((16, 16), Image.BILINEAR)

        for x in range(16):
            for y in range(16):
                r, g, b = img.getpixel((x, y))
                self.hat.set_pixel(x, y, r, g, b)

        self.hat.show() 
开发者ID:pimoroni,项目名称:unicorn-hat-hd,代码行数:12,代码来源:camera.py

示例15: convert_mat_to_images

# 需要导入模块: from PIL import Image [as 别名]
# 或者: from PIL.Image import BILINEAR [as 别名]
def convert_mat_to_images(args):
    '''convert the caltech101 mat file to images
    Examples
    --------
    python convert_data.py --dataset /home/ubuntu/datasets/caltech101/data/caltech101_silhouettes_28.mat --save_path /home/ubuntu/datasets/caltech101/data/ --invert --height 32 --width 32
    '''
    dataset = scipy.io.loadmat("{}/{}".format(args.save_path, args.dataset))

    # image pixel data
    X = dataset['X']

    # image class labels (not used in this project)
    Y = dataset['Y']

    total_image = X.shape[0]

    h=args.height
    w=args.width

    for i in range(total_image):
        img = X[i]
        img = np.reshape(img, (28, 28))
        if args.invert:
            img = (1-img)*255
        else:
            img = img*255
        img = Image.fromarray(img, 'L')
        img = img.rotate(-90)
        img = img.resize([h, w], Image.BILINEAR)
        img.save(args.save_path + '/img' + str(i) + '.png') 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:32,代码来源:convert_data.py


注:本文中的PIL.Image.BILINEAR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。