本文整理汇总了Python中skimage.color.rgb2lab函数的典型用法代码示例。如果您正苦于以下问题:Python rgb2lab函数的具体用法?Python rgb2lab怎么用?Python rgb2lab使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了rgb2lab函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_color
def run_color(image, image_out):
caffe.set_mode_cpu()
net = caffe.Net('colorization_deploy_v0.prototxt', 'colorization_release_v0.caffemodel', caffe.TEST)
(H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
(H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
net.blobs['Trecip'].data[...] = 6/np.log(10) # 1/T, set annealing temperature
img_rgb = caffe.io.load_image(image)
img_lab = color.rgb2lab(img_rgb) # convert image to lab color space
img_l = img_lab[:,:,0] # pull out L channel
(H_orig,W_orig) = img_rgb.shape[:2] # original image size
# resize image to network input size
img_rs = caffe.io.resize_image(img_rgb,(H_in,W_in)) # resize image to network input size
img_lab_rs = color.rgb2lab(img_rs)
img_l_rs = img_lab_rs[:,:,0]
net.blobs['data_l'].data[0,0,:,:] = img_l_rs-50 # subtract 50 for mean-centering
net.forward() # run network
ab_dec = net.blobs['class8_ab'].data[0,:,:,:].transpose((1,2,0)) # this is our result
ab_dec_us = sni.zoom(ab_dec,(1.*H_orig/H_out,1.*W_orig/W_out,1)) # upsample to match size of original image L
img_lab_out = np.concatenate((img_l[:,:,np.newaxis],ab_dec_us),axis=2) # concatenate with original image L
img_rgb_out = np.clip(color.lab2rgb(img_lab_out),0,1) # convert back to rgb
scipy.misc.imsave(image_out, img_rgb_out)
示例2: applyNailPolish
def applyNailPolish(x , y , r = Rg, g = Gg, b = Bg):
val = color.rgb2lab((im[x, y]/255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val[:,0]), mean(val[:,1]), mean(val[:,2])
L1, A1, B1 = color.rgb2lab(np.array((r/255., g/255., b/255.)).reshape(1, 1, 3)).reshape(3,)
ll, aa, bb = L1 - L, A1 - A, B1 - B
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im[x, y] = color.lab2rgb(val.reshape(len(x), 1, 3)).reshape(len(x), 3)*255
示例3: convertLAB
def convertLAB(img):
"convert an RGB img into LAB color space"
if img.shape[2]==4:
return rgb2lab(img[:,:,0:3])
else:
if img.shape[2]==3:
return rgb2lab(img)
else:
print ("Image format not supported")
示例4: process_pair
def process_pair(ref, recons):
ref_lab = color.rgb2lab(decode_y4m_buffer(ref))
recons_lab = color.rgb2lab(decode_y4m_buffer(recons))
# "Color Image Quality Assessment Based on CIEDE2000"
# Yang Yang, Jun Ming and Nenghai Yu, 2012
# http://dx.doi.org/10.1155/2012/273723
dE = color.deltaE_ciede2000(ref_lab, recons_lab, kL=0.65, kC=1.0, kH=4.0)
scores.append(45. - 20. * np.log10(dE.mean()))
print('%08d: %2.4f' % (ref.count, scores[-1]))
示例5: get_train_data
def get_train_data(img_file):
image = img_to_array(load_img(img_file))
image_shape = image.shape
image = np.array(image, dtype=float)
x = rgb2lab(1.0 / 255 * image)[:, :, 0]
y = rgb2lab(1.0 / 255 * image)[:, :, 1:]
y /= 128
x = x.reshape(1, image_shape[0], image_shape[1], 1)
y = y.reshape(1, image_shape[0], image_shape[1], 2)
return x, y, image_shape
示例6: merge_images
def merge_images(self, img_a, img_b):
i_a = skic.rgb2lab(img_a)
i_b = skic.rgb2lab(img_b)
norm_lum = np.max(np.asarray([i_a[..., 0], i_b[..., 0]]), axis=0)
res_img = i_a.copy()
res_img[..., 0] = norm_lum
return skic.lab2rgb(res_img)
示例7: applyBlushColor
def applyBlushColor(r = Rg, g = Gg, b = Bg):
global im
val = color.rgb2lab((im/255.)).reshape(width*height, 3)
L, A, B = mean(val[:,0]), mean(val[:,1]), mean(val[:,2])
L1, A1, B1 = color.rgb2lab(np.array((r/255., g/255., b/255.)).reshape(1, 1, 3)).reshape(3,)
ll, aa, bb = (L1 - L)*intensity, (A1 - A)*intensity, (B1 - B)*intensity
val[:, 0] = np.clip(val[:, 0] + ll, 0, 100)
val[:, 1] = np.clip(val[:, 1] + aa, -127, 128)
val[:, 2] = np.clip(val[:, 2] + bb, -127, 128)
im = color.lab2rgb(val.reshape(height, width, 3))*255
示例8: apply_texture
def apply_texture(x, y):
xmin, ymin = amin(x), amin(y)
X = (x - xmin).astype(int)
Y = (y - ymin).astype(int)
val1 = color.rgb2lab((text[X, Y] / 255.).reshape(len(X), 1, 3)).reshape(len(X), 3)
val2 = color.rgb2lab((im[x, y] / 255.).reshape(len(x), 1, 3)).reshape(len(x), 3)
L, A, B = mean(val2[:, 0]), mean(val2[:, 1]), mean(val2[:, 2])
val2[:, 0] = np.clip(val2[:, 0] - L + val1[:, 0], 0, 100)
val2[:, 1] = np.clip(val2[:, 1] - A + val1[:, 1], -127, 128)
val2[:, 2] = np.clip(val2[:, 2] - B + val1[:, 2], -127, 128)
im[x, y] = color.lab2rgb(val2.reshape(len(x), 1, 3)).reshape(len(x), 3) * 255
示例9: LAB
def LAB(img, k, filename):
# print 'lab'
# restructure image pixel values into range from 0 to 1 - needed for library
img = img * 1.0 / MAX_COLOR_VAL
# convert rgb to LAB
pixels_lab = color.rgb2lab(img)
# remove the L channel
L = pixels_lab[:, :, 0]
# reshape, cluster, and retrieve quantized values
pixels_l = np.reshape(L, (L.shape[0] * L.shape[1], 1))
clustered = cluster_pixels(pixels_l, k, (L.shape[0], L.shape[1]))
pixels_lab[:, :, 0] = clustered[:, :, 0]
# convert result to 255 RGB space
quanted_img = color.lab2rgb(pixels_lab) * MAX_COLOR_VAL
quanted_img = quanted_img.astype('uint8')
fig = plt.figure(1)
plt.imshow(quanted_img)
plt.title("LAB quantization where k is " + str(k))
plt.savefig('Q2/' + filename + '_LAB.png')
plt.close(fig)
return quanted_img
示例10: b
def b():
from skimage import io, color
print 'imported'
rgb = io.imread('window_exp_1_1.jpg')
print 'opened'
lab = color.rgb2lab(rgb)
print lab[0,0]
示例11: compute_saliency
def compute_saliency(img):
"""
Computes Boolean Map Saliency (BMS).
"""
img_lab = rgb2lab(img)
img_lab -= img_lab.min()
img_lab /= img_lab.max()
thresholds = np.arange(0, 1, 1.0 / N_THRESHOLDS)[1:]
# compute boolean maps
bool_maps = []
for thresh in thresholds:
img_lab_T = img_lab.transpose(2, 0, 1)
img_thresh = (img_lab_T > thresh)
bool_maps.extend(list(img_thresh))
# compute mean attention map
attn_map = np.zeros(img_lab.shape[:2], dtype=np.float)
for bool_map in bool_maps:
attn_map += activate_boolean_map(bool_map)
attn_map /= N_THRESHOLDS
# gaussian smoothing
attn_map = cv2.GaussianBlur(attn_map, (0, 0), 3)
# perform normalization
norm = np.sqrt((attn_map**2).sum())
attn_map /= norm
attn_map /= attn_map.max() / 255
return attn_map.astype(np.uint8)
示例12: dominant_colors
def dominant_colors(image, num_colors, mask=None):
"""Reduce image colors to a representative set of a given size.
Args:
image (ndarray): BGR image of shape n x m x 3.
num_colors (int): Number of colors to reduce to.
mask (array_like, optional): Foreground mask. Defaults to None.
Returns:
list: The list of Color objects representing the most dominant colors in the image.
"""
image = rgb2lab(image / 255.0)
if mask is not None:
data = image[mask > 250]
else:
data = np.reshape(image, (-1, 3))
# kmeans algorithm has inherent randomness - result will not be exactly the same
# every time. Fairly consistent with >= 30 iterations
centroids, labels = kmeans2(data, num_colors, iter=30)
counts = np.histogram(labels, bins=range(0, num_colors + 1), normed=True)[0]
centroids_RGB = lab2rgb(centroids.reshape(-1, 1, 3))[:, 0, :] * 255.0
colors = [Color(centroid, count) for centroid, count in zip(centroids_RGB, counts)]
colors.sort(key=lambda color: np.mean(color.BGR))
return colors
示例13: __init__
def __init__(self, image_path):
rgb = io.imread(image_path)
self.lab = color.rgb2lab(rgb)
self.im_shp = self.lab.shape
self.r_image = np.zeros((self.im_shp[0], self.im_shp[1]-1))
pass
示例14: snap_ab
def snap_ab(input_l, input_rgb, return_type='rgb'):
''' given an input lightness and rgb, snap the color into a region where l,a,b is in-gamut
'''
T = 20
warnings.filterwarnings("ignore")
input_lab = rgb2lab_1d(np.array(input_rgb)) # convert input to lab
conv_lab = input_lab.copy() # keep ab from input
for t in range(T):
conv_lab[0] = input_l # overwrite input l with input ab
old_lab = conv_lab
tmp_rgb = color.lab2rgb(conv_lab[np.newaxis, np.newaxis, :]).flatten()
tmp_rgb = np.clip(tmp_rgb, 0, 1)
conv_lab = color.rgb2lab(tmp_rgb[np.newaxis, np.newaxis, :]).flatten()
dif_lab = np.sum(np.abs(conv_lab-old_lab))
if dif_lab < 1:
break
# print(conv_lab)
conv_rgb_ingamut = lab2rgb_1d(conv_lab, clip=True, dtype='uint8')
if (return_type == 'rgb'):
return conv_rgb_ingamut
elif(return_type == 'lab'):
conv_lab_ingamut = rgb2lab_1d(conv_rgb_ingamut)
return conv_lab_ingamut
示例15: color2gray
def color2gray(image, itns):
global width
global height
global lab
width = image.shape[1]
height = image.shape[0]
# Convert rgb to lab color space
lab = color.rgb2lab(image);
g0 = lab[:, :, 0]
g0 = g0.astype(np.uint8)
g0 = g0.flatten()
# Solve Least square Optimization
res = minimize(objective, g0, method='BFGS', jac=objective_der, options={'maxiter':itns, 'disp': True})
output = res.x.reshape(height, width)
output = output.astype(np.uint8)
output += 50
return output