本文整理汇总了Python中skimage.exposure.equalize_adapthist函数的典型用法代码示例。如果您正苦于以下问题:Python equalize_adapthist函数的具体用法?Python equalize_adapthist怎么用?Python equalize_adapthist使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了equalize_adapthist函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_adapthist_grayscale
def test_adapthist_grayscale():
"""Test a grayscale float image
"""
img = skimage.img_as_float(data.astronaut())
img = rgb2gray(img)
img = np.dstack((img, img, img))
with expected_warnings(['precision loss|non-contiguous input',
'deprecated']):
adapted_old = exposure.equalize_adapthist(img, 10, 9, clip_limit=0.01,
nbins=128)
adapted = exposure.equalize_adapthist(img, kernel_size=(57, 51), clip_limit=0.01, nbins=128)
assert img.shape == adapted.shape
assert_almost_equal(peak_snr(img, adapted), 102.078, 3)
assert_almost_equal(norm_brightness_err(img, adapted), 0.0529, 3)
return data, adapted
示例2: get_data
def get_data(mypath):
t0 = time.time()
print mypath
n = 100000
data = []
paths = []
for i in range(n):
if i % 100 == 0:
if i > 0:
elapsed = time.time() - t0
left = n-i
rate = i/elapsed
ETA = left/rate
print "ETA: {0}min".format(int(ETA/60))
path = join(mypath, str(i))+'.fits'
if not os.path.exists(path): continue
#this line reads the data
img = pyfits.getdata(path,0,memmap=False)
#this line take the absolute value (negative noise)
img_adapteq = np.abs(img)
#this is the preprocessing algorithm, comment this out to remove the preprocessing of the image completely
img_adapteq = exposure.equalize_adapthist(np.log(img_adapteq + 1.0), clip_limit=0.5,kernel_size=(4,4))
#saving the paths is useful to restore which array belonged to which image on the harddrive
paths.append(path)
#add data to list
data.append(img_adapteq)
示例3: pre_process
def pre_process(y_dict, train_directories, images, output_shape, adaptive_histogram, jobid, arraysize, clip_limit=0.03):
# Store preprocessed images
X = []
y = []
for train_directory in train_directories:
# Get valid training images
filenames = []
for filename in os.listdir(train_directory):
if filename.endswith(".jpeg") and filename.split('.')[0] in images:
filenames.append(filename)
start = len(filenames)/arraysize*jobid
end = len(filenames)/arraysize*(jobid+1)
if jobid+1 == arraysize:
end = len(filenames)
# preprocess each image
for filename in filenames[start:end]:
im = io.imread(train_directory + "/" + filename)
im = rgb2gray(im)
im = resize(im, output_shape)
if adaptive_histogram:
im = exposure.equalize_adapthist(im, clip_limit=clip_limit)
X.append(im.flatten())
y.append(y_dict[filename.split(".jpeg")[0]])
return X, y
示例4: find_blobs
def find_blobs(filename):
feature = ""
raw_image = io.imread(filename)
for channel in range(0, 4):
if channel < 3:
image = raw_image[:,:,channel]
image_gray = rgb2gray(image)
# Smoothing
image_gray = img_as_ubyte(image_gray)
image_gray = mean_bilateral(image_gray.astype(numpy.uint16), disk(20), s0=10, s1=10)
# Increase contrast
image_gray = exposure.equalize_adapthist(image_gray, clip_limit=0.03)
# Find blobs
blobs_doh = blob_doh(image_gray, min_sigma=1, max_sigma=20, threshold=.005)
count = 0
for blob in blobs_doh:
y, x, r = blob
if (x-400)**2 + (y-400)**2 > distance:
continue
count = count + 1
feature = feature + " " + str(channel + 1) + ":" + str(count)
return feature
示例5: equalize_hist_adapt
def equalize_hist_adapt(img=None, window_shape=(10, 10), nbins=256):
'''
Contrast Limited Adaptive Histogram Equalization (CLAHE).
Increases local contrast.
Parameters
----------
img : array_like
Single image as numpy array or multiple images as array-like object
window_shape : tuple of integers
Specifies the shape of the window as follows (dx, dy)
nbins : integer
Number of bins to calculate histogram
References
----------
.. [1] http://scikit-image.org/docs/dev/auto_examples/color_exposure/plot_local_equalize.html # noqa
.. [2] https://en.wikipedia.org/wiki/Histogram_equalization
'''
minimum = img.min()
maximum = img.max()
img = rescale_intensity(img, 0, 1)
img = exposure.equalize_adapthist(img, kernel_size=window_shape,
nbins=nbins)
img_out = rescale_intensity(img, minimum, maximum)
return img_out
示例6: Image_ws_tranche
def Image_ws_tranche(image):
laser = Detect_laser(image)
laser_tranche = tranche_image(laser,60)
image_g = skimage.color.rgb2gray(image)
image_g = image_g * laser_tranche
image_med = rank2.median((image_g*255).astype('uint8'),disk(8))
image_clahe = exposure.equalize_adapthist(image_med, clip_limit=0.03)
image_clahe_stretch = exposure.rescale_intensity(image_clahe, out_range=(0, 256))
image_grad = rank2.gradient(image_clahe_stretch,disk(3))
image_grad_mark = image_grad<20
image_grad_forws = rank2.gradient(image_clahe_stretch,disk(1))
image_grad_mark_closed = closing(image_grad_mark,disk(1))
Labelised = (skimage.measure.label(image_grad_mark_closed,8,0))+1
Watersheded = watershed(image_grad_forws,Labelised)
cooc = coocurence_liste(Watersheded,laser,3)
x,y = compte_occurences(cooc)
return x,y
示例7: returnProcessedImage
def returnProcessedImage(que,folder,img_flist):
X = []
for fname in img_flist:
cur_img = imread(folder+'/'+fname , as_grey=True)
cur_img = 1 - cur_img
######## randomly add samples
# random add contrast
r_for_eq = random()
cur_img = equalize_adapthist(cur_img,ntiles_x=8,ntiles_y=8,clip_limit=(r_for_eq+0.5)/3)
#random morphological operation
r_for_mf_1 = random()
if 0.05 < r_for_mf_1 < 0.25: # small vessel
selem1 = disk(0.5+r_for_mf_1)
cur_img = dilation(cur_img,selem1)
cur_img = erosion(cur_img,selem1)
elif 0.25 < r_for_mf_1 < 0.5: # large vessel
selem2 = disk(2.5+r_for_mf_1*3)
cur_img = dilation(cur_img,selem2)
cur_img = erosion(cur_img,selem2)
elif 0.5 < r_for_mf_1 < 0.75: # exudate
selem1 = disk(9.21)
selem2 = disk(7.21)
dilated1 = dilation(cur_img, selem1)
dilated2 = dilation(cur_img, selem2)
cur_img = np.subtract(dilated1, dilated2)
cur_img = img_as_float(cur_img)
X.append([cur_img.tolist()])
# X = np.array(X , dtype = theano.config.floatX)
que.put(X)
return X
示例8: set_roi_images
def set_roi_images(self):
for i,rois_ in enumerate(self.roi_sets):
temp_im = rois_[0]['patches'][self.roi_idx][self._show_im[i]]
temp_im /= np.max(temp_im)
im_to_set = exposure.equalize_adapthist(temp_im,
clip_limit=.005)
#im_to_set *= (im_to_set+sobel(im_to_set))
#im_to_set = temp_im
self.imgs[i].setImage(im_to_set,autolevels=1)
#if 'centroid_patches' in rois_[0].keys():
# self.centroid_patches[i].setImage(rois_[0]['centroid_patches'][self.roi_idx],autolevels=1)
m_ = rois_[0]['masks'][self.roi_idx]
#print np.mean(np.array(np.where(m_)),axis=1)
#print m_.shape==(100,100)
#print np.all([iii.shape==(100,100) for iii in rois_[0]['masks']]), i
m2_ = np.dstack([m_,np.zeros(m_.shape),np.zeros(m_.shape),m_])
self.masks[i].setImage(m2_)
self.masks[i].setOpacity(.2)
if rois_[0]['isPresent'][self.roi_idx]==0:
fr = self.redframe
else:
fr = self.greenframe
self.frames[i].setImage(fr)
if rois_[0]['drawn_onday'][self.roi_idx]:
self.drawnTexts[i].setText('Drawn On Day',color=[0,0,250])
else:
self.drawnTexts[i].setText('Copied',color=[250,0,0])
self.confTxts[i].setText(self.confidence_labels[(rois_[0]['confidence'][self.roi_idx])])
示例9: extract_patches
def extract_patches(path, numPatchesPerImage, patchSize):
"""
:param path: path to a RGB fundus image
:param numPatchesPerImage: number of patches to extract per image
:param patchSize: patch is nxn size
:return: patches: matrix with an image patch in each row
"""
img = load(path)
img = img[:,:,1]
#contrast enhancemenet
img = equalize_adapthist(img)
windows = view_as_windows(img, (patchSize,patchSize))
j = 0
patches = np.zeros((numPatchesPerImage, patchSize*patchSize))
while(j < numPatchesPerImage):
sx = np.random.randint(0, windows.shape[0] - 1)
sy = np.random.randint(0, windows.shape[0] - 1)
x = (patchSize/2 - 1) + sx
y = (patchSize/2 - 1) + sy
r = (img.shape[0]/2) - 1
if np.sqrt((x - r) ** 2 + (y - r) **2 ) < r:
patch = windows[sx, sy, :].flatten()
patches[j,:] = patch
j += 1
else:
if j > 0:
j -= 1
return patches
示例10: imagefile2dat
def imagefile2dat(imageFilename, rotate = False, overwrite = False):
"""Load an image file and save in format to be read by C code"""
global m
global n
global fringeDatFilename
global wrappedDatFilename
# read image file
orig = io.imread(imageFilename, as_grey=True)
img = exposure.equalize_adapthist(orig)
img = exposure.rescale_intensity(img,out_range=(0, 255))
if rotate:
img = np.transpose(img)
n = len(img)
m = len(img[0])
fileroot, ext = os.path.splitext(imageFilename)
fringeDatFilename = fileroot+'.dat'
wrappedDatFilename = fileroot+'W.dat'
if os.path.isfile(fringeDatFilename) == False or overwrite == True:
print 'Writing '+fringeDatFilename
# write in proper binary format
data = np.reshape( np.transpose(img), (n*m,1))
newFile = open (fringeDatFilename, "wb")
newFile.write(pack(str(n*m)+'B', *data))
newFile.close()
else:
print 'Skipped overwriting '+fringeDatFilename
return img
示例11: enhance
def enhance(in_file, clip_limit=0.010, in_mask=None, out_file=None):
import numpy as np
import nibabel as nb
import os.path as op
from skimage import exposure, img_as_int
if out_file is None:
fname, fext = op.splitext(op.basename(in_file))
if fext == '.gz':
fname, _ = op.splitext(fname)
out_file = op.abspath('./%s_enh.nii.gz' % fname)
im = nb.load(in_file)
imdata = im.get_data()
imshape = im.get_shape()
if in_mask is not None:
msk = nb.load(in_mask).get_data()
msk[msk > 0] = 1
msk[msk < 1] = 0
imdata = imdata * msk
immin = imdata.min()
imdata = (imdata - immin).astype(np.uint16)
adapted = exposure.equalize_adapthist(imdata.reshape(imshape[0], -1),
clip_limit=clip_limit)
nb.Nifti1Image(adapted.reshape(imshape), im.get_affine(),
im.get_header()).to_filename(out_file)
return out_file
示例12: main
def main(image):
matplotlib.rcParams["font.size"] = 10
def show_img(img, axes):
"""Plot the image as float"""
# img = img_as_float(img)
ax_img = axes
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
return ax_img
# Open and read in the fits image
try:
fits = pyfits.open(image)
# fits = Image.open(image)
except IOError:
print "Can not read the fits image: " + image + " !!"
# Check the input image
img = fits[0].data
# img = np.array(fits)
if img.ndim != 2:
raise NameError("Data need to be 2-D image !")
# Logrithm scaling of the image
img_log = np.log10(img)
img_log = img_as_float(img_log)
# Contrast streching
p5, p95 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p5, p95))
# Adaptive equalization
img_new = bytescale(img_rescale)
img_ahe = exposure.equalize_adapthist(img_new, ntiles_x=16, ntiles_y=16, clip_limit=0.05, nbins=256)
img_ahe = img_as_float(img_ahe)
# Display results
fig, axes = plt.subplots(nrows=1, ncols=3, figsize=(16, 5))
# Original image
ax_img = show_img(img_log, axes[0])
ax_img.set_title("Original")
# Contrast Enhanced one
ax_img = show_img(img_rescale, axes[1])
ax_img.set_title("Rescale")
# AHE Enhanced one
ax_img = show_img(img_ahe, axes[2])
ax_img.set_title("AHE")
# Prevent overlap of y-axis
fig.subplots_adjust(bottom=0.1, right=0.9, top=0.9, left=0.1, wspace=0.05)
# Save a PNG file
plt.gcf().savefig("ahe_test.png")
示例13: equalize_adaptive
def equalize_adaptive(image, n_tiles=8, clip_limit=0.01):
eqproj = equalize_adapthist(image,
ntiles_x=n_tiles,
ntiles_y=n_tiles,
clip_limit=clip_limit)
return eqproj
示例14: analyse
def analyse(self, **kwargs):
image_object = kwargs['image']
if image_object is None:
raise RuntimeError()
# Read the image
image = cv2.imread(self.image_utils.getOutputFilename(image_object.id))
if image is None:
print('File not found')
return
# Work on green channel
gray = image[:, :, 1]
# Apply otsu thresholding
thresh = filters.threshold_otsu(gray)
gray[gray < thresh] = 0
# Apply histogram equalization
gray = exposure.equalize_adapthist(gray) * 255
# Create elevation map
elevation_map = filters.sobel(gray)
gray = gray.astype(int)
# Create cell markers
markers = numpy.zeros_like(gray)
markers[gray < 100] = 2 # seen as white in plot
markers[gray > 150] = 1 # seen as black in plot
# Segment with watershed using elevation map
segmentation = morphology.watershed(elevation_map, markers)
segmentation = ndi.binary_fill_holes(segmentation - 1)
# labeled_image, n = ndi.label(segmentation)
# Watershed with distance transform
kernel = numpy.ones((5, 5), numpy.uint8)
distance = ndi.distance_transform_edt(segmentation)
distance2 = cv2.erode(distance, kernel)
distance2 = cv2.dilate(distance2, kernel)
local_max = peak_local_max(distance2, num_peaks=1, indices=False, labels=segmentation)
markers2 = ndi.label(local_max)[0]
labels = morphology.watershed(-distance2, markers2, mask=segmentation)
# Extract regions (caching signifies more memory use)
regions = regionprops(labels, cache=True)
# Filter out big wrong regions
regions = [region for region in regions if region.area < 2000]
# Set result
result = str(len(regions))
return result
示例15: segment
def segment(self, image):
img = equalize_adapthist(image)
(x,y,c) = img.shape
points = img.reshape(x * y, c)
labels = self.logreg.predict(points)
labeled_img = labels.reshape(x, y)
(top, right, bottom, left) = self.get_bounding_rect(labeled_img)
# return img[left-self.margin:right+self.margin,top-self.margin:bottom+self.margin]
return img[left:right,top:bottom]