本文整理匯總了Python中PIL.Image.merge方法的典型用法代碼示例。如果您正苦於以下問題:Python Image.merge方法的具體用法?Python Image.merge怎麽用?Python Image.merge使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類PIL.Image
的用法示例。
在下文中一共展示了Image.merge方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: resolve
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def resolve(ctx):
from PIL import Image
if isinstance(ctx, list):
ctx = [ctx[0]]
net.load_parameters('superres.params', ctx=ctx)
img = Image.open(opt.resolve_img).convert('YCbCr')
y, cb, cr = img.split()
data = mx.nd.expand_dims(mx.nd.expand_dims(mx.nd.array(y), axis=0), axis=0)
out_img_y = mx.nd.reshape(net(data), shape=(-3, -2)).asnumpy()
out_img_y = out_img_y.clip(0, 255)
out_img_y = Image.fromarray(np.uint8(out_img_y[0]), mode='L')
out_img_cb = cb.resize(out_img_y.size, Image.BICUBIC)
out_img_cr = cr.resize(out_img_y.size, Image.BICUBIC)
out_img = Image.merge('YCbCr', [out_img_y, out_img_cb, out_img_cr]).convert('RGB')
out_img.save('resolved.png')
示例2: distort_image
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def distort_image(im, hue, sat, val):
im = im.convert('HSV')
cs = list(im.split())
cs[1] = cs[1].point(lambda i: i * sat)
cs[2] = cs[2].point(lambda i: i * val)
def change_hue(x):
x += hue*255
if x > 255:
x -= 255
if x < 0:
x += 255
return x
cs[0] = cs[0].point(change_hue)
im = Image.merge(im.mode, tuple(cs))
im = im.convert('RGB')
return im
示例3: distort_image
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def distort_image(im, hue, sat, val):
im = im.convert('HSV')
cs = list(im.split())
cs[1] = cs[1].point(lambda i: i * sat)
cs[2] = cs[2].point(lambda i: i * val)
def change_hue(x):
x += hue*255
if x > 255:
x -= 255
if x < 0:
x += 255
return x
cs[0] = cs[0].point(change_hue)
im = Image.merge(im.mode, tuple(cs))
im = im.convert('RGB')
#constrain_image(im)
return im
示例4: open_base_img
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def open_base_img(full_profile, res, base_color, color):
# get base image according to profile and perceptual gray of key color
base_num = str([0xE0, 0xB0, 0x80, 0x50, 0x20].index(base_color) + 1)
# open image and convert to Lab
with Image.open('images/{0}_{1}{2}.png'.format(*full_profile, base_num)) as img:
key_img = img.resize((int(s * res / 200) for s in img.size), resample=Image.BILINEAR).convert('RGBA')
if full_profile[1] in ('ISO', 'BIGENTER'): alpha = key_img.split()[-1]
l, a, b = ImageCms.applyTransform(key_img, rgb2lab_transform).split()
# convert key color to Lab
# a and b should be scaled by 128/100, but desaturation looks more natural
rgb_color = color_objects.sRGBColor(*ImageColor.getrgb(color), is_upscaled=True)
lab_color = color_conversions.convert_color(rgb_color, color_objects.LabColor)
l1, a1, b1 = lab_color.get_value_tuple()
l1, a1, b1 = int(l1 * 256 / 100), int(a1 + 128), int(b1 + 128)
# change Lab of base image to match that of key color
l = ImageMath.eval('convert(l + l1 - l_avg, "L")', l=l, l1=l1, l_avg=base_color)
a = ImageMath.eval('convert(a + a1 - a, "L")', a=a, a1=a1)
b = ImageMath.eval('convert(b + b1 - b, "L")', b=b, b1=b1)
key_img = ImageCms.applyTransform(Image.merge('LAB', (l, a, b)), lab2rgb_transform).convert('RGBA')
if full_profile[1] in ('ISO', 'BIGENTER'): key_img.putalpha(alpha)
return key_img
示例5: __call__
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def __call__(self, video_path, frame_indices):
with h5py.File(video_path, 'r') as f:
flow_data = []
for flow in self.flows:
flow_data.append(f[f'video_{flow}'])
video = []
for i in frame_indices:
if i < len(flow_data[0]):
frame = [
Image.open(io.BytesIO(video_data[i]))
for video_data in flow_data
]
frame.append(frame[-1]) # add dummy data into third channel
video.append(Image.merge('RGB', frame))
return video
示例6: test_consistency_5x5
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def test_consistency_5x5(self):
source = Image.open("Tests/images/hopper.bmp")
reference = Image.open("Tests/images/hopper_emboss_more.bmp")
kernel = ImageFilter.Kernel((5, 5), # noqa: E127
(-1, -1, -1, -1, 0,
-1, -1, -1, 0, 1,
-1, -1, 0, 1, 1,
-1, 0, 1, 1, 1,
0, 1, 1, 1, 1), 0.3)
source = source.split() * 2
reference = reference.split() * 2
for mode in ['L', 'LA', 'RGB', 'CMYK']:
self.assert_image_equal(
Image.merge(mode, source[:len(mode)]).filter(kernel),
Image.merge(mode, reference[:len(mode)]),
)
示例7: test_channels_order
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def test_channels_order(self):
g = Image.linear_gradient('L')
im = Image.merge('RGB', [g, g.transpose(Image.ROTATE_90),
g.transpose(Image.ROTATE_180)])
# Reverse channels by splitting and using table
self.assert_image_equal(
Image.merge('RGB', im.split()[::-1]),
im._new(im.im.color_lut_3d('RGB', Image.LINEAR,
3, 2, 2, 2, [
0, 0, 0, 0, 0, 1,
0, 1, 0, 0, 1, 1,
1, 0, 0, 1, 0, 1,
1, 1, 0, 1, 1, 1,
])))
示例8: wedge
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def wedge(self):
w = Image._wedge()
w90 = w.rotate(90)
(px, h) = w.size
r = Image.new('L', (px*3, h))
g = r.copy()
b = r.copy()
r.paste(w, (0, 0))
r.paste(w90, (px, 0))
g.paste(w90, (0, 0))
g.paste(w, (2*px, 0))
b.paste(w, (px, 0))
b.paste(w90, (2*px, 0))
img = Image.merge('RGB', (r, g, b))
return img
示例9: _fry
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def _fry(img):
e = ImageEnhance.Sharpness(img)
img = e.enhance(100)
e = ImageEnhance.Contrast(img)
img = e.enhance(100)
e = ImageEnhance.Brightness(img)
img = e.enhance(.27)
r, b, g = img.split()
e = ImageEnhance.Brightness(r)
r = e.enhance(4)
e = ImageEnhance.Brightness(g)
g = e.enhance(1.75)
e = ImageEnhance.Brightness(b)
b = e.enhance(.6)
img = Image.merge('RGB', (r, g, b))
e = ImageEnhance.Brightness(img)
img = e.enhance(1.5)
temp = BytesIO()
temp.name = 'deepfried.png'
img.save(temp)
temp.seek(0)
return temp
示例10: save_to_disk
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def save_to_disk(self, filename, format='.png'):
"""Save this image to disk (requires PIL installed)."""
filename = _append_extension(filename, format)
try:
from PIL import Image as PImage
except ImportError:
raise RuntimeError(
'cannot import PIL, make sure pillow package is installed')
image = PImage.frombytes(
mode='RGBA',
size=(self.width, self.height),
data=self.raw_data,
decoder_name='raw')
color = image.split()
image = PImage.merge("RGB", color[2::-1])
folder = os.path.dirname(filename)
if not os.path.isdir(folder):
os.makedirs(folder)
image.save(filename, quality=100)
示例11: from_png_to_bmp
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def from_png_to_bmp(png_path, output_path=BMP_IMAGE_TEST_TO_PATH):
"""
Convert a png_path image into a bmp 3-channel one and return the path to the converted image
:param png_path: path of the image
:param output_path: path in which we save the image
:return: the file path
"""
# convert a .png image file to a .bmp image file using PIL
file_name = os.path.splitext(png_path)[0] \
.split("/")[-1]
file_in = png_path
img = Image.open(file_in)
file_out = os.path.join(output_path, str(file_name), str(file_name) + '.bmp')
len(img.split()) # test
if len(img.split()) == 4:
# prevent IOError: cannot write mode RGBA as BMP
r, g, b, a = img.split()
img = Image.merge("RGB", (r, g, b))
img.save(file_out)
else:
img.save(file_out)
return file_out
示例12: save_to_disk
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def save_to_disk(self, filename):
"""Save this image to disk (requires PIL installed)."""
filename = _append_extension(filename, '.png')
try:
from PIL import Image as PImage
except ImportError:
raise RuntimeError(
'cannot import PIL, make sure pillow package is installed')
image = PImage.frombytes(
mode='RGBA',
size=(self.width, self.height),
data=self.raw_data,
decoder_name='raw')
color = image.split()
image = PImage.merge("RGB", color[2::-1])
folder = os.path.dirname(filename)
if not os.path.isdir(folder):
os.makedirs(folder)
image.save(filename)
示例13: distort_image
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def distort_image(im, hue, sat, val):
im = im.convert('HSV')
cs = list(im.split())
cs[1] = cs[1].point(lambda i: i * sat)
cs[2] = cs[2].point(lambda i: i * val)
def change_hue(x):
x += hue*255
if x > 255:
x -= 255
if x < 0:
x += 255
return x
cs[0] = cs[0].point(change_hue)
im = Image.merge(im.mode, tuple(cs))
im = im.convert('RGB')
return im
# generate random scale.
示例14: perform_inference
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def perform_inference(sym, arg_params, aux_params, input_img, img_cb, img_cr):
"""Perform inference on image using mxnet"""
metadata = onnx_mxnet.get_model_metadata('super_resolution.onnx')
data_names = [input_name[0] for input_name in metadata.get('input_tensor_data')]
# create module
mod = mx.mod.Module(symbol=sym, data_names=data_names, label_names=None)
mod.bind(for_training=False, data_shapes=[(data_names[0], input_img.shape)])
mod.set_params(arg_params=arg_params, aux_params=aux_params)
# run inference
batch = namedtuple('Batch', ['data'])
mod.forward(batch([mx.nd.array(input_img)]))
# Save the result
img_out_y = Image.fromarray(np.uint8(mod.get_outputs()[0][0][0].
asnumpy().clip(0, 255)), mode='L')
result_img = Image.merge(
"YCbCr", [img_out_y,
img_cb.resize(img_out_y.size, Image.BICUBIC),
img_cr.resize(img_out_y.size, Image.BICUBIC)]).convert("RGB")
output_img_dim = 672
assert result_img.size == (output_img_dim, output_img_dim)
LOGGER.info("Super Resolution example success.")
result_img.save("super_res_output.jpg")
return result_img
示例15: color
# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import merge [as 別名]
def color(src, target):
num_pixels = src.size[0] * src.size[1]
colors = src.getcolors(num_pixels)
rgb = sum(c[0] * c[1][0] for c in colors), sum(c[0] * c[1][1] for c in colors), sum(
c[0] * c[1][2] for c in colors)
rgb = rgb[0] / num_pixels, rgb[1] / num_pixels, rgb[2] / num_pixels
bands = target.split()
for i, v in enumerate(rgb):
out = bands[i].point(lambda p: int(p * v / 255))
bands[i].paste(out)
return Image.merge(target.mode, bands)