本文整理汇总了Python中skimage.morphology.skeletonize函数的典型用法代码示例。如果您正苦于以下问题:Python skeletonize函数的具体用法?Python skeletonize怎么用?Python skeletonize使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了skeletonize函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: edge_detect
def edge_detect(depth, color):
# Get the gradient direction from the depth image
graddir = grad_dir(depth)
# plt.imshow(graddir)
# plt.show()
# kernel for dilation
kernel = np.ones((5, 5), np.uint8)
# Threshold the image so it is in the RGB color space
bw2 = (((graddir - graddir.min()) / (graddir.max() - graddir.min())) * 255.9).astype(np.uint8)
# removes the salt and pepper noise
# by replacing pixels with
# the median value of the area
median = cv2.medianBlur(bw2, 9)
# find edges with the canny edge detector
bw2 = auto_canny(median)
dilation2 = cv2.dilate(bw2, kernel, iterations=1)
skel2 = morphology.skeletonize(dilation2 > 0)
# Now run canny edge detector on the colour image
# create a CLAHE object (Arguments are optional).
# this does adaptive histogram equalization on the image
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(color)
# median = cv2.medianBlur(bw2,5)
# bw1 = cv2.GaussianBlur(cl1, (3,3), 0)
# Perform canny edge detection on colour image, twice
# 1. Detect outlines and fill with close function
# 2. Detect outlines of now filled contoured image
bw1 = auto_canny(cl1)
closing = cv2.morphologyEx(bw1, cv2.MORPH_CLOSE, kernel, iterations=6)
# dilation1 = cv2.dilate(bw1,kernel,iterations = 1)
# skel1 = morphology.skeletonize(dilation1 > 0)
bw1 = auto_canny(closing)
# combine the edges from the color image and the depth image
orop = (np.logical_or(bw1, skel2)).astype('uint8')
# display results
plt.subplot(1, 2, 1), plt.imshow(graddir, cmap='jet')
plt.title('gradient dir'), plt.xticks([]), plt.yticks([])
plt.subplot(1, 2, 2), plt.imshow(median, cmap='gray')
plt.title('blurred image'), plt.xticks([]), plt.yticks([])
plt.show()
# dilate and skeletonize on combined image
kernel = np.ones((5, 5), np.uint8)
dilation = cv2.dilate(orop, kernel, iterations=1)
img_out = morphology.skeletonize(dilation > 0)
return img_out
示例2: get_mask
def get_mask(img_org):
noise_ratio = 0.3
img = img_org > noise_ratio * img_org.max()
skeleton = skeletonize(img)
mask = dilation(skeleton > 0, disk(2))
return mask
示例3: get_descriptor_lvX
def get_descriptor_lvX(self,img):
ori = img
#img = cv2.bitwise_not(numpy.array(img))
#img = threshold_adaptive(numpy.array(img), 40)
#img = cv2.bitwise_not(img*255.)
img = skeletonize(numpy.array(img)/255.)*255.
'''figure()
gray()
subplot(221)
imshow(ori)
subplot(222)
imshow(img)
show()'''
#e = stats.entropy(img.flatten())
#if math.isnan(e) or math.isinf(e):
# return 0
#else:
# return e
descs = hog(numpy.array(img), orientations=4, pixels_per_cell=(10, 10),cells_per_block=(3, 3),visualise=False)
'''figure()
gray()
imshow(img)
figure()
imshow(hpgimg)
show()'''
return descs
示例4: polylinesFromBinImage
def polylinesFromBinImage(img, minimum_cluster_size=6,
remove_small_obj_size=3,
reconnect_size=3,
max_n_contours=None, max_len_contour=None,
copy=True):
'''
return a list of arrays of un-branching contours
img -> (boolean) array
optional:
---------
minimum_cluster_size -> minimum number of pixels connected together to build a contour
##search_kernel_size -> TODO
##min_search_kernel_moment -> TODO
numeric:
-------------
max_n_contours -> maximum number of possible contours in img
max_len_contour -> maximum contour length
'''
assert minimum_cluster_size > 1
assert reconnect_size % 2, 'ksize needs to be odd'
# assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd'
# assume array size parameters, is not given:
if max_n_contours is None:
max_n_contours = max(img.shape)
if max_len_contour is None:
max_len_contour = sum(img.shape[:2])
# array containing coord. of all contours:
contours = np.zeros(shape=(max_n_contours, max_len_contour, 2),
dtype=np.uint16) # if not search_kernel_size else np.float32)
if img.dtype != np.bool:
img = img.astype(bool)
elif copy:
img = img.copy()
if remove_small_obj_size:
remove_small_objects(img, remove_small_obj_size,
connectivity=2, in_place=True)
if reconnect_size:
# remove gaps
maximum_filter(img, reconnect_size, output=img)
# reduce contour width to 1
img = skeletonize(img)
n_contours = _populateContoursArray(img, contours, minimum_cluster_size)
contours = contours[:n_contours]
l = []
for c in contours:
ind = np.zeros(shape=len(c), dtype=bool)
_getValidInd(c, ind)
# remove all empty spaces:
l.append(c[ind])
return l
示例5: bottleneck_distribution
def bottleneck_distribution(image):
"""
Count the distribution of bottlenecks
:param image: data (binary)
:type image: :py:class:`numpy.ndarray`
:return: count of bottlenecks of size 4 and size 2
:rtype: tuple(int)
"""
skel = morphology.skeletonize(image)
# get the distances
dists = ndimage.distance_transform_edt(image)
# ok for all the nonzero in the skeleton, we get the distances
x_nz, y_nz = skel.nonzero() # get all the nonzero indices
width4 = 0
width2 = 0
for i in range(len(x_nz)):
x = x_nz[i]
y = y_nz[i]
dist = dists[x,y]
if dist <= 4:
width4 += 1
if dist <= 2:
width2 += 1
return width4, width2
示例6: skeletonize_mitochondria
def skeletonize_mitochondria(mch_channel):
mch_collector = np.max(mch_channel, axis=0) # TODO: check max projection v.s. sum
skeleton_labels = np.zeros(mch_collector.shape, dtype=np.uint8)
# thresh = np.max(mch_collector)/2.
thresh = threshold_otsu(mch_collector)
# use adaptative threshold? => otsu seems to be sufficient in this case
skeleton_labels[mch_collector > thresh] = 1
skeleton2 = skeletonize(skeleton_labels)
skeleton, distance = medial_axis(skeleton_labels, return_distance=True)
active_threshold = np.mean(mch_collector[skeleton_labels]) * 5
# print active_threshold
transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
skeleton = transform_filter * distance
skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] / \
divider_convolve[divider_convolve > 0]
new_skeleton = np.zeros_like(skeleton)
new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
skeleton = new_skeleton
return skeleton_labels, mch_collector, skeleton, transform_filter
示例7: ruler_scale_factor
def ruler_scale_factor(image, distance):
"""Returns the scale factor to convert from image coordinates to real world coordinates
Args:
image: BGR image of shape n x m x 3.
distance: The real world size of the smallest graduation spacing
Returns:
float: Unitless scale factor from image coordinates to real world coordinates.
"""
height, width = image.shape[:2]
image, mask = find_ruler(image)
binary_image = mask * threshold(image, mask)
if binary_image[mask].mean() > 0.5:
binary_image[mask] = ~binary_image[mask]
remove_large_components(binary_image, max(height, width))
edges = skeletonize(binary_image)
hspace, angles, distances = hough_transform(edges)
features = hspace_features(hspace, splits=16)
angle_index = best_angles(np.array(features))
max_graduation_size = int(max(image.shape))
line_separation_pixels = find_grid(hspace[:, angle_index], max_graduation_size)
logging.info('Line separation: {:.3f}'.format(line_separation_pixels))
return distance / line_separation_pixels
示例8: skeletonize
def skeletonize(mask):
"""Reduces binary objects to 1 pixel wide representations (skeleton)
Inputs:
mask = Binary image data
Returns:
skeleton = skeleton image
:param mask: numpy.ndarray
:return skeleton: numpy.ndarray
"""
# Store debug
debug = params.debug
params.debug = None
# Convert mask to boolean image, rather than 0 and 255 for skimage to use it
skeleton = skmorph.skeletonize(mask.astype(bool))
skeleton = skeleton.astype(np.uint8) * 255
# Reset debug mode
params.debug = debug
# Auto-increment device
params.device += 1
if params.debug == 'print':
print_image(skeleton, os.path.join(params.debug_outdir, str(params.device) + '_skeleton.png'))
elif params.debug == 'plot':
plot_image(skeleton, cmap='gray')
return skeleton
示例9: skeletonize_mitochondria
def skeletonize_mitochondria(mCh_channel):
mch_collector = np.max(mCh_channel, axis=0) # TODO: check how max affects v.s. sum
labels = np.zeros(mch_collector.shape, dtype=np.uint8)
# thresh = np.max(mch_collector)/2.
thresh = threshold_otsu(mch_collector)
# TODO: use adaptative threshold? => otsu seems to be sufficient in this case
# http://scikit-image.org/docs/dev/auto_examples/xx_applications/plot_thresholding.html#sphx
# -glr-auto-examples-xx-applications-plot-thresholding-py
# log-transform? => Nope, does not work
# TODO: hessian/laplacian of gaussian blob detection?
labels[mch_collector > thresh] = 1
skeleton2 = skeletonize(labels)
skeleton, distance = medial_axis(labels, return_distance=True)
active_threshold = np.mean(mch_collector[labels]) * 5
# print active_threshold
transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
skeleton = transform_filter * distance
skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] \
/ divider_convolve[divider_convolve > 0]
new_skeleton = np.zeros_like(skeleton)
new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
skeleton = new_skeleton
return labels, mch_collector, skeleton, transform_filter
示例10: get_thinned
def get_thinned(binaryArr):
"""
Return thinned output
Parameters
----------
binaryArr : Numpy array
2D or 3D binary numpy array
Returns
-------
result : boolean Numpy array
2D or 3D binary thinned numpy array of the same shape
"""
assert np.max(binaryArr) in [0, 1], "input must always be a binary array"
voxCount = np.sum(binaryArr)
if binaryArr.sum() == 0:
return binaryArr.astype(bool)
elif len(binaryArr.shape) == 2:
return skeletonize(binaryArr).astype(bool)
else:
start_skeleton = time.time()
zOrig, yOrig, xOrig = np.shape(binaryArr)
orig = np.lib.pad(binaryArr, 1, 'constant')
result = cy_get_thinned3D(np.uint64(orig))
print("thinned %i number of pixels in %0.2f seconds" % (voxCount, time.time() - start_skeleton))
return result[1:zOrig + 1, 1: yOrig + 1, 1: xOrig + 1].astype(bool)
示例11: calculate
def calculate(self, image: np.ndarray, disk_size: int=9,
mean_threshold: int=100, min_object_size: int=750) -> float:
# Find edges that have a strong vertical direction
vertical_edges = sobel_v(image)
# Separate out the areas where there is a large amount of vertically-oriented stuff
segmentation = self._segment_edge_areas(vertical_edges, disk_size, mean_threshold, min_object_size)
# Draw a line that follows the center of the segments at each point, which should be roughly vertical
# We should expect this to give us four approximately-vertical lines, possibly with many gaps in
# each line
skeletons = skeletonize(segmentation)
# Use the Hough transform to get the closest lines that approximate those four lines
hough = transform.hough_line(skeletons, np.arange(-constants.FIFTEEN_DEGREES_IN_RADIANS,
constants.FIFTEEN_DEGREES_IN_RADIANS,
0.0001))
# Create a list of the angles (in radians) of all of the lines the Hough transform produced, with 0.0
# being completely vertical
# These angles correspond to the angles of the four sides of the channels, which we need to
# correct for
angles = [angle for _, angle, dist in zip(*transform.hough_line_peaks(*hough))]
if not angles:
raise ValueError("Image rotation could not be calculated. Check the images to see if they're weird.")
else:
# Get the average angle and convert it to degrees
offset = sum(angles) / len(angles) * 180.0 / math.pi
if offset > constants.ACCEPTABLE_SKEW_THRESHOLD:
log.warn("Image is heavily skewed. Check that the images are valid.")
return offset
示例12: label_image
def label_image(image):
ROI = np.zeros((470,400,3), dtype=np.uint8)
for c in range(3):
for i in range(50,520):
for j in range(240,640):
ROI[i-50,j-240,c] = image[i,j,c]
gray_ROI = cv2.cvtColor(ROI,cv2.COLOR_BGR2GRAY)
ROI_flou = cv2.medianBlur((ROI).astype('uint8'),3)
Laser = Detecte_laser.Detect_laser(ROI_flou)
open_laser = cv2.morphologyEx(Laser, cv2.MORPH_DILATE, disk(3))
skel = skeletonize(open_laser > 0)
tranche = Detecte_laser.tranche(skel,90,30)
ret, thresh = cv2.threshold(gray_ROI*tranche.astype('uint8'),0,1,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
thresh01 = thresh<1.0
open_thresh = cv2.morphologyEx(thresh01.astype('uint8'), cv2.MORPH_OPEN, disk(10))
labelised = (label(open_thresh,8,0))+1
return gray_ROI,labelised
示例13: label_particles_edge
def label_particles_edge(im, sigma=2, closing_size=0, **extra_args):
""" Segment image using Canny edge-finding filter.
parameters
----------
im : image in which to find particles
sigma : size of the Canny filter
closing_size : size of the closing filter
returns
-------
labels : an image array of uniquely labeled segments
"""
from skimage.morphology import square, binary_closing, skeletonize
if skimage_version < StrictVersion('0.11'):
from skimage.filter import canny
else:
from skimage.filters import canny
edges = canny(im, sigma=sigma)
if closing_size > 0:
edges = binary_closing(edges, square(closing_size))
edges = skeletonize(edges)
labels = sklabel(edges)
print "found {} segments".format(labels.max())
# in ma.array mask, False is True, and vice versa
labels = np.ma.array(labels, mask=edges == 0)
return labels
示例14: enhance_edges
def enhance_edges(self):
""" GNIRS_edge_detector method to enhance the footprint edges using the Sobel
kernel. Generate two binary images, one with the left edges
and the other showing the right edges only. This is because the
MDF information about the footprints location is not well
determined.
"""
sdata=nd.sobel(self.image,axis=self.axis)
std = np.std(sdata)
bdata=np.where(sdata>std,1,0)
# Make the edges one pixel wide
self.left_bin_image = skeletonize(bdata)
bdata=np.where(sdata < -std,1,0)
self.right_bin_image = skeletonize(bdata)
示例15: test_skeletonize_already_thinned
def test_skeletonize_already_thinned(self):
im = np.zeros((5, 5), np.uint8)
im[3, 1:-1] = 1
im[2, -1] = 1
im[4, 0] = 1
result = skeletonize(im)
numpy.testing.assert_array_equal(result, im)