當前位置: 首頁>>代碼示例>>Python>>正文


Python Image.Image方法代碼示例

本文整理匯總了Python中PIL.Image.Image方法的典型用法代碼示例。如果您正苦於以下問題:Python Image.Image方法的具體用法?Python Image.Image怎麽用?Python Image.Image使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在PIL.Image的用法示例。


在下文中一共展示了Image.Image方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: adjust_brightness

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.

    Returns:
        PIL Image: Brightness adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Brightness(img)
    img = enhancer.enhance(brightness_factor)
    return img 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:20,代碼來源:transforms.py

示例2: adjust_contrast

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def adjust_contrast(img, contrast_factor):
    """Adjust contrast of an Image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.

    Returns:
        PIL Image: Contrast adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Contrast(img)
    img = enhancer.enhance(contrast_factor)
    return img 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:20,代碼來源:transforms.py

示例3: adjust_saturation

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def adjust_saturation(img, saturation_factor):
    """Adjust color saturation of an image.

    Args:
        img (PIL Image): PIL Image to be adjusted.
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.

    Returns:
        PIL Image: Saturation adjusted image.
    """
    if not _is_pil_image(img):
        raise TypeError('img should be PIL Image. Got {}'.format(type(img)))

    enhancer = ImageEnhance.Color(img)
    img = enhancer.enhance(saturation_factor)
    return img 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:20,代碼來源:transforms.py

示例4: __call__

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def __call__(self, img):
        """Convert a ``numpy.ndarray`` to tensor.

        Args:
            img (numpy.ndarray): Image to be converted to tensor.

        Returns:
            Tensor: Converted image.
        """
        if not(_is_numpy_image(img)):
            raise TypeError('img should be ndarray. Got {}'.format(type(img)))

        if isinstance(img, np.ndarray):
            # handle numpy array
            if img.ndim == 3:
                img = torch.from_numpy(img.transpose((2, 0, 1)).copy())
            elif img.ndim == 2:
                img = torch.from_numpy(img.copy())
            else:
                raise RuntimeError('img should be ndarray with 2 or 3 dimensions. Got {}'.format(img.ndim))

            # backward compatibility
            #return img.float().div(255)
            return img.float() 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:26,代碼來源:transforms.py

示例5: get_params

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def get_params(img, output_size):
        """Get parameters for ``crop`` for center crop.

        Args:
            img (numpy.ndarray (C x H x W)): Image to be cropped.
            output_size (tuple): Expected output size of the crop.

        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for center crop.
        """
        h = img.shape[0]
        w = img.shape[1]
        th, tw = output_size
        i = int(round((h - th) / 2.))
        j = int(round((w - tw) / 2.))

        # # randomized cropping
        # i = np.random.randint(i-3, i+4)
        # j = np.random.randint(j-3, j+4)

        return i, j, th, tw 
開發者ID:miraiaroha,項目名稱:ACAN,代碼行數:23,代碼來源:transforms.py

示例6: __cutoff_right

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def __cutoff_right(bmp) -> PILImage:
        first_pix = bmp.getpixel((0, 0))
        width, height = bmp.size
        
        count = 0
        for x in range(8, width):
            dif = False
            for y in range(0, height):
                if not Inputs.rgb_equal(first_pix, bmp.getpixel((x, y))):
                    dif = True
                    break
            
            if dif: count = 0
            else:
                count += 1
                if count > 8:
                    return bmp.crop((0, 0, x , height))
        
        return bmp
    
    # splits the three parts of the resource breakdown (pow, bars, cap) 
開發者ID:kujan,項目名稱:NGU-scripts,代碼行數:23,代碼來源:features.py

示例7: __call__

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def __call__(self, sample):
        image, depth = sample['image'], sample['depth']

        applied_angle = random.uniform(-self.angle, self.angle)
        angle1 = applied_angle
        angle1_rad = angle1 * np.pi / 180

        # print('before rotating:',image.size)

        image = ndimage.interpolation.rotate(
            image, angle1, reshape=self.reshape, order=self.order)
        depth = ndimage.interpolation.rotate(
            depth, angle1, reshape=self.reshape, order=self.order)

        image = Image.fromarray(image)
        depth = Image.fromarray(depth)

        # print('after rotating:',image.shape,depth.shape)

        return {'image': image, 'depth': depth} 
開發者ID:JunjH,項目名稱:Visualizing-CNNs-for-monocular-depth-estimation,代碼行數:22,代碼來源:nyu_transform.py

示例8: changeScale

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def changeScale(self, img, size, interpolation=Image.BILINEAR):

        if not _is_pil_image(img):
            raise TypeError(
                'img should be PIL Image. Got {}'.format(type(img)))
        if not (isinstance(size, int) or (isinstance(size, collections.Iterable) and len(size) == 2)):
            raise TypeError('Got inappropriate size arg: {}'.format(size))

        if isinstance(size, int):
            w, h = img.size
            if (w <= h and w == size) or (h <= w and h == size):
                return img
            if w < h:
                ow = size
                oh = int(size * h / w)
                return img.resize((ow, oh), interpolation)
            else:
                oh = size
                ow = int(size * w / h)
                return img.resize((ow, oh), interpolation)
        else:
            return img.resize(size[::-1], interpolation) 
開發者ID:JunjH,項目名稱:Visualizing-CNNs-for-monocular-depth-estimation,代碼行數:24,代碼來源:nyu_transform.py

示例9: expect_crop

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def expect_crop(self, left_x=None, right_x=None, top_y=None, bottom_y=None):
    """Setup a mox expectation to images_stub._Crop."""
    crop_xform = images_service_pb.Transform()
    if left_x is not None:
      if not isinstance(left_x, float):
        raise self.failureException('Crop argument must be a float.')
      crop_xform.set_crop_left_x(left_x)
    if right_x is not None:
      if not isinstance(right_x, float):
        raise self.failureException('Crop argument must be a float.')
      crop_xform.set_crop_right_x(right_x)
    if top_y is not None:
      if not isinstance(top_y, float):
        raise self.failureException('Crop argument must be a float.')
      crop_xform.set_crop_top_y(top_y)
    if bottom_y is not None:
      if not isinstance(bottom_y, float):
        raise self.failureException('Crop argument must be a float.')
      crop_xform.set_crop_bottom_y(bottom_y)
    self._images_stub._Crop(mox.IsA(Image.Image), crop_xform).AndReturn(
        self._image) 
開發者ID:elsigh,項目名稱:browserscope,代碼行數:23,代碼來源:blob_image_test.py

示例10: get_params

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def get_params(img, output_size):
        """Get parameters for ``crop`` for a random crop.
        Args:
            img (PIL Image): Image to be cropped.
            output_size (tuple): Expected output size of the crop.
        Returns:
            tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.
        """
        w, h = img.size
        tw, th = output_size
        if w == tw and h == th:
            return 0, 0, h, w
            
        i = random.randint(0, h - th)
        j = random.randint(0, w - tw)
        return i, j, th, tw 
開發者ID:mapleneverfade,項目名稱:pytorch-semantic-segmentation,代碼行數:18,代碼來源:functional.py

示例11: to_tensor

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def to_tensor(pic):
    """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.
    See ``ToTensor`` for more details.
    Args:
        pic (PIL Image or numpy.ndarray): Image to be converted to tensor.
    Returns:
        Tensor: Converted image.
    """
    if not(_is_numpy_image(pic)):
        raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))

    # handle numpy array
    img = torch.from_numpy(pic.transpose((2, 0, 1)))
    # backward compatibility
    if isinstance(img, torch.ByteTensor) or img.dtype==torch.uint8:
        return img.float()
    else:
        return img 
開發者ID:CMU-CREATE-Lab,項目名稱:deep-smoke-machine,代碼行數:20,代碼來源:opencv_functional.py

示例12: resized_crop

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def resized_crop(img, i, j, h, w, size, interpolation=cv2.INTER_LINEAR):
    """Crop the given numpy ndarray and resize it to desired size.
    Notably used in :class:`~torchvision.transforms.RandomResizedCrop`.
    Args:
        img (numpy ndarray): Image to be cropped.
        i: Upper pixel coordinate.
        j: Left pixel coordinate.
        h: Height of the cropped image.
        w: Width of the cropped image.
        size (sequence or int): Desired output size. Same semantics as ``scale``.
        interpolation (int, optional): Desired interpolation. Default is
            ``cv2.INTER_LINEAR``.
    Returns:
        PIL Image: Cropped image.
    """
    assert _is_numpy_image(img), 'img should be numpy image'
    img = crop(img, i, j, h, w)
    img = resize(img, size, interpolation=interpolation)
    return img 
開發者ID:CMU-CREATE-Lab,項目名稱:deep-smoke-machine,代碼行數:21,代碼來源:opencv_functional.py

示例13: adjust_brightness

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def adjust_brightness(img, brightness_factor):
    """Adjust brightness of an Image.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        brightness_factor (float):  How much to adjust the brightness. Can be
            any non negative number. 0 gives a black image, 1 gives the
            original image while 2 increases the brightness by a factor of 2.
    Returns:
        numpy ndarray: Brightness adjusted image.
    """
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ i*brightness_factor for i in range (0,256)]).clip(0,255).astype('uint8')
    # same thing but a bit slower
    # cv2.convertScaleAbs(img, alpha=brightness_factor, beta=0)
    if img.shape[2] == 1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
開發者ID:CMU-CREATE-Lab,項目名稱:deep-smoke-machine,代碼行數:21,代碼來源:opencv_functional.py

示例14: adjust_contrast

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def adjust_contrast(img, contrast_factor):
    """Adjust contrast of an mage.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        contrast_factor (float): How much to adjust the contrast. Can be any
            non negative number. 0 gives a solid gray image, 1 gives the
            original image while 2 increases the contrast by a factor of 2.
    Returns:
        numpy ndarray: Contrast adjusted image.
    """
    # much faster to use the LUT construction than anything else I've tried
    # it's because you have to change dtypes multiple times
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    table = np.array([ (i-74)*contrast_factor+74 for i in range (0,256)]).clip(0,255).astype('uint8')
    # enhancer = ImageEnhance.Contrast(img)
    # img = enhancer.enhance(contrast_factor)
    if img.shape[2] == 1:
        return cv2.LUT(img, table)[:,:,np.newaxis]
    else:
        return cv2.LUT(img, table) 
開發者ID:CMU-CREATE-Lab,項目名稱:deep-smoke-machine,代碼行數:23,代碼來源:opencv_functional.py

示例15: adjust_saturation

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import Image [as 別名]
def adjust_saturation(img, saturation_factor):
    """Adjust color saturation of an image.
    Args:
        img (numpy ndarray): numpy ndarray to be adjusted.
        saturation_factor (float):  How much to adjust the saturation. 0 will
            give a black and white image, 1 will give the original image while
            2 will enhance the saturation by a factor of 2.
    Returns:
        numpy ndarray: Saturation adjusted image.
    """
    # ~10ms slower than PIL!
    if not _is_numpy_image(img):
        raise TypeError('img should be numpy Image. Got {}'.format(type(img)))
    img = Image.fromarray(img)
    enhancer = ImageEnhance.Color(img)
    img = enhancer.enhance(saturation_factor)
    return np.array(img) 
開發者ID:CMU-CREATE-Lab,項目名稱:deep-smoke-machine,代碼行數:19,代碼來源:opencv_functional.py


注:本文中的PIL.Image.Image方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。