本文整理汇总了Python中skimage.exposure.rescale_intensity方法的典型用法代码示例。如果您正苦于以下问题:Python exposure.rescale_intensity方法的具体用法?Python exposure.rescale_intensity怎么用?Python exposure.rescale_intensity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类skimage.exposure
的用法示例。
在下文中一共展示了exposure.rescale_intensity方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: scale_rgb
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def scale_rgb(layers, min_max, lidx):
layers_c = np.empty(layers.shape, dtype='float32')
# Rescale and blur.
for li in range(0, 3):
layer = layers[li]
layer = np.float32(rescale_intensity(layer,
in_range=(min_max[li][0],
min_max[li][1]),
out_range=(0, 1)))
layers_c[lidx[li]] = rescale_intensity(cv2.GaussianBlur(layer,
ksize=(3, 3),
sigmaX=3),
in_range=(0, 1),
out_range=(-1, 1))
return layers_c
示例2: project_object_edge
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def project_object_edge(img, dimension):
""" scale the image, binarise with Othu and project to one dimension
:param ndarray img:
:param int dimension: select dimension for projection
:return list(float):
>>> img = np.zeros((20, 10, 3))
>>> img[2:6, 1:7, :] = 1
>>> img[10:17, 4:6, :] = 1
>>> project_object_edge(img, 0).tolist() # doctest: +NORMALIZE_WHITESPACE
[0.0, 0.0, 0.7, 0.7, 0.7, 0.7, 0.0, 0.0, 0.0, 0.0,
0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.2, 0.0, 0.0, 0.0]
"""
assert dimension in (0, 1), 'not supported dimension %i' % dimension
assert img.ndim == 3, 'unsupported image shape %r' % img.shape
img_gray = np.mean(img, axis=-1)
img_gray = GaussianBlur(img_gray, (5, 5), 0)
p_low, p_high = np.percentile(img_gray, (1, 95))
img_gray = rescale_intensity(img_gray, in_range=(p_low, p_high))
img_bin = img_gray > threshold_otsu(img_gray)
img_edge = np.mean(img_bin, axis=1 - dimension)
return img_edge
示例3: _stretch_im
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def _stretch_im(arr, str_clip):
"""Stretch an image in numpy ndarray format using a specified clip value.
Parameters
----------
arr: numpy array
N-dimensional array in rasterio band order (bands, rows, columns)
str_clip: int
The % of clip to apply to the stretch. Default = 2 (2 and 98)
Returns
----------
arr: numpy array with values stretched to the specified clip %
"""
s_min = str_clip
s_max = 100 - str_clip
arr_rescaled = np.zeros_like(arr)
for ii, band in enumerate(arr):
lower, upper = np.percentile(band, (s_min, s_max))
arr_rescaled[ii] = exposure.rescale_intensity(
band, in_range=(lower, upper)
)
return arr_rescaled.copy()
示例4: rgb2illumination_invariant
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def rgb2illumination_invariant(img, alpha, hist_eq=False):
"""
this is an implementation of the illuminant-invariant color space published
by Maddern2014
http://www.robots.ox.ac.uk/~mobile/Papers/2014ICRA_maddern.pdf
:param img:
:param alpha: camera paramete
:return:
"""
ii_img = 0.5 + np.log(img[:, :, 1] + 1e-8) - \
alpha * np.log(img[:, :, 2] + 1e-8) - \
(1 - alpha) * np.log(img[:, :, 0] + 1e-8)
# ii_img = exposure.rescale_intensity(ii_img, out_range=(0, 1))
if hist_eq:
ii_img = exposure.equalize_hist(ii_img)
print np.max(ii_img)
print np.min(ii_img)
return ii_img
示例5: scale_image_intensity
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def scale_image_intensity(img, im_range=1., quantiles=(2, 98)):
""" scale image values with in give quntile range to filter some outlaiers
:param ndarray img: input image
:param im_range: range to scale image values (1. or 255)
:param tuple(int,int) quantiles: scale image values in certain quantile range
:return ndarray:
>>> np.random.seed(0)
>>> img = np.random.randint(10, 255, (25, 30))
>>> im = scale_image_intensity(img)
>>> im.min()
0.0
>>> im.max()
1.0
"""
p_low = np.percentile(img, quantiles[0])
p_high = np.percentile(img, quantiles[1])
img = exposure.rescale_intensity(img.astype(float), in_range=(p_low, p_high),
out_range='float')
if im_range == 255:
img = np.array(img * im_range).astype(np.uint8)
return img
示例6: process_image
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def process_image(orig_image_arr):
ratio = orig_image_arr.shape[0] / 300.0
display_image_arr = normalize_contrs(orig_image_arr,crop_display(orig_image_arr))
#display image is now segmented.
gry_disp_arr = cv2.cvtColor(display_image_arr, cv2.COLOR_BGR2GRAY)
gry_disp_arr = exposure.rescale_intensity(gry_disp_arr, out_range= (0,255))
#thresholding
ret, thresh = cv2.threshold(gry_disp_arr,127,255,cv2.THRESH_BINARY)
return thresh
示例7: tensor2im
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def tensor2im(image_tensor, imgtype='img', datatype=np.uint8):
image_numpy = image_tensor[0].cpu().float().numpy()
if image_numpy.ndim == 4:# image_numpy (C x W x H x S)
mid_slice = image_numpy.shape[-1]//2
image_numpy = image_numpy[:,:,:,mid_slice]
if image_numpy.shape[0] == 1:
image_numpy = np.tile(image_numpy, (3, 1, 1))
image_numpy = np.transpose(image_numpy, (1, 2, 0))
if imgtype == 'img':
image_numpy = (image_numpy + 8) / 16.0 * 255.0
if np.unique(image_numpy).size == int(1):
return image_numpy.astype(datatype)
return rescale_intensity(image_numpy.astype(datatype))
示例8: save_image_collections
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def save_image_collections(x, filename, shape=(10, 10), scale_each=False,
transpose=False):
"""
:param shape: tuple
The shape of final big images.
:param x: numpy array
Input image collections. (number_of_images, rows, columns, channels) or
(number_of_images, channels, rows, columns)
:param scale_each: bool
If true, rescale intensity for each image.
:param transpose: bool
If true, transpose x to (number_of_images, rows, columns, channels),
i.e., put channels behind.
:return: `uint8` numpy array
The output image.
"""
from skimage import io, img_as_ubyte
from skimage.exposure import rescale_intensity
makedirs(filename)
n = x.shape[0]
if transpose:
x = x.transpose(0, 2, 3, 1)
if scale_each is True:
for i in range(n):
x[i] = rescale_intensity(x[i], out_range=(0, 1))
n_channels = x.shape[3]
x = img_as_ubyte(x)
r, c = shape
if r * c < n:
print('Shape too small to contain all images')
h, w = x.shape[1:3]
ret = np.zeros((h * r, w * c, n_channels), dtype='uint8')
for i in range(r):
for j in range(c):
if i * c + j < n:
ret[i * h:(i + 1) * h, j * w:(j + 1) * w, :] = x[i * c + j]
ret = ret.squeeze()
io.imsave(filename, ret)
示例9: hist_stretch
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def hist_stretch(im, percentiles=(1, 99)):
p2, p98 = np.percentile(im, percentiles)
im = im *100000
#im = np.array(im, np.int64)
return exposure.rescale_intensity(im, in_range=percentiles)
示例10: apply_random_intensity_rescale
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def apply_random_intensity_rescale(self, image, percent=30):
"""Apply random intensity rescale on an image (not used)"""
random = np.random.randint(0, 100)
if random < percent:
v_min, v_max = np.percentile(image, (0.2, 99.8))
image = exposure.rescale_intensity(image, in_range=(v_min, v_max))
return image
示例11: scale_intensity
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def scale_intensity(data, out_min=0, out_max=255):
"""Scale intensity of data in a range defined by [out_min, out_max], based on the 2nd and 98th percentiles."""
p2, p98 = np.percentile(data, (2, 98))
return rescale_intensity(data, in_range=(p2, p98), out_range=(out_min, out_max))
示例12: normalize_volume
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def normalize_volume(volume):
p10 = np.percentile(volume, 10)
p99 = np.percentile(volume, 99)
volume = rescale_intensity(volume, in_range=(p10, p99))
m = np.mean(volume, axis=(0, 1, 2))
s = np.std(volume, axis=(0, 1, 2))
volume = (volume - m) / s
return volume
示例13: visualise_overlap
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def visualise_overlap(path_img, path_seg, path_out,
b_img_scale=BOOL_IMAGE_RESCALE_INTENSITY,
b_img_contour=BOOL_SAVE_IMAGE_CONTOUR,
b_relabel=BOOL_ANNOT_RELABEL,
segm_alpha=MIDDLE_ALPHA_OVERLAP):
img, _ = tl_data.load_image_2d(path_img)
seg, _ = tl_data.load_image_2d(path_seg)
# normalise alpha in range (0, 1)
segm_alpha = tl_visu.norm_aplha(segm_alpha)
if b_relabel:
seg, _, _ = segmentation.relabel_sequential(seg)
if img.ndim == 2: # for gray images of ovary
img = np.rollaxis(np.tile(img, (3, 1, 1)), 0, 3)
if b_img_scale:
p_low, p_high = np.percentile(img, q=(3, 98))
# plt.imshow(255 - img, cmap='Greys')
img = exposure.rescale_intensity(img, in_range=(p_low, p_high),
out_range='uint8')
if b_img_contour:
path_im_visu = os.path.splitext(path_out)[0] + '_contour.png'
img_contour = segmentation.mark_boundaries(img[:, :, :3], seg,
color=COLOR_CONTOUR, mode='subpixel')
plt.imsave(path_im_visu, img_contour)
# else: # for colour images of disc
# mask = (np.sum(img, axis=2) == 0)
# img[mask] = [255, 255, 255]
fig = tl_visu.figure_image_segm_results(img, seg, SIZE_SUB_FIGURE,
mid_labels_alpha=segm_alpha,
mid_image_gray=MIDDLE_IMAGE_GRAY)
fig.savefig(path_out)
plt.close(fig)
示例14: segment_image
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def segment_image(im, parameter_object):
dims, rows, cols = im.shape
image2segment = np.dstack((rescale_intensity(im[0],
in_range=(parameter_object.image_min,
parameter_object.image_max),
out_range=(0, 255)),
rescale_intensity(im[1],
in_range=(parameter_object.image_min,
parameter_object.image_max),
out_range=(0, 255)),
rescale_intensity(im[2],
in_range=(parameter_object.image_min,
parameter_object.image_max),
out_range=(0, 255))))
felzer = felzenszwalb(np.uint8(image2segment),
scale=50,
sigma=.01,
min_size=5,
multichannel=True).reshape(rows, cols)
props = regionprops(felzer)
props = np.array([p.area for p in props], dtype='uint64')
return fill_labels(np.uint64(felzer), props)
示例15: segmenter_data_transform
# 需要导入模块: from skimage import exposure [as 别名]
# 或者: from skimage.exposure import rescale_intensity [as 别名]
def segmenter_data_transform(imb, rotate=None, normalize_pctwise=False):
if isinstance(imb, tuple) and len(imb) == 2:
imgs,labels = imb
else:
imgs = imb
# rotate image if training
if rotate is not None:
for i in xrange(imgs.shape[0]):
degrees = float(np.random.randint(rotate[0], rotate[1])) if \
isinstance(rotate, tuple) else rotate
imgs[i,0] = scipy.misc.imrotate(imgs[i,0], degrees, interp='bilinear')
if isinstance(imb, tuple):
labels[i,0] = scipy.misc.imrotate(labels[i,0], degrees, interp='bilinear')
# assume they are square
sz = c.fcn_img_size
x,y = np.random.randint(0,imgs.shape[2]-sz,2) if imgs.shape[2] > sz else (0,0)
imgs = nn.utils.floatX(imgs[:,:, x:x+sz, y:y+sz])/255.
if not normalize_pctwise:
pad = imgs.shape[2] // 5
cut = imgs[:,0,pad:-pad,pad:-pad]
mu = cut.mean(axis=(1,2)).reshape(imgs.shape[0],1,1,1)
sigma = cut.std(axis=(1,2)).reshape(imgs.shape[0],1,1,1)
imgs = (imgs - mu) / sigma
imgs = np.minimum(3, np.maximum(-3, imgs))
else:
pclow, pchigh = normalize_pctwise if isinstance(normalize_pctwise, tuple) else (20,70)
for i in xrange(imgs.shape[0]):
pl,ph = np.percentile(imgs[i],(pclow, pchigh))
imgs[i] = exposure.rescale_intensity(imgs[i], in_range=(pl, ph));
imgs[i] = 2*imgs[i]/imgs[i].max() - 1.
# or other rescaling here to approximate ~ N(0,1)
if isinstance(imb, tuple):
labels = nn.utils.floatX(labels[:,:, x:x+sz, y:y+sz])
return imgs, labels
return imgs