本文整理汇总了Python中scipy.ndimage.measurements.find_objects方法的典型用法代码示例。如果您正苦于以下问题:Python measurements.find_objects方法的具体用法?Python measurements.find_objects怎么用?Python measurements.find_objects使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.ndimage.measurements
的用法示例。
在下文中一共展示了measurements.find_objects方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _filter_grouplen
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def _filter_grouplen(arr, minsize=3):
"""Filter out the groups of grid points smaller than minsize
Parameters
----------
arr : the array to filter (should be False and Trues)
minsize : the minimum size of the group
Returns
-------
the array, with small groups removed
"""
# Do it with trues
r, nr = label(arr)
nr = [i+1 for i, o in enumerate(find_objects(r)) if (len(r[o]) >= minsize)]
arr = np.asarray([ri in nr for ri in r])
# and with Falses
r, nr = label(~ arr)
nr = [i+1 for i, o in enumerate(find_objects(r)) if (len(r[o]) >= minsize)]
arr = ~ np.asarray([ri in nr for ri in r])
return arr
示例2: find_objects
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def find_objects(image: np.array, **kw) -> np.array:
"""
Redefine the scipy.ndimage.measurements.find_objects function to work with
a wider range of data types. The default function is inconsistent about
the data types it accepts on different platforms.
"""
try:
return measurements.find_objects(image, **kw)
except Exception:
pass
types = ["int32", "uint32", "int64", "uint64", "int16", "uint16"]
for t in types:
try:
return measurements.find_objects(np.array(image, dtype=t), **kw)
except Exception:
pass
# let it raise the same exception as before
return measurements.find_objects(image, **kw)
示例3: findVerticalAlternative
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def findVerticalAlternative(self):
# This is an alternative method, a bit more expensive
# than the first version, and is called on failure of
# the previous findVertical. It uses Scipy labelling to segment the a strip
# of data from the ROI
self.found = False
cx = self.ROIwh[0]//2
expectedW, expectedH = self.expectedSize
win = (expectedW - (expectedW*self.sizeMargin) )//2
#take a vertical section of pixels from the ROI and threshold it
vROI = self.ROIimg[:,cx-win:cx+win]
#Make a single pixel wide strip, with the median of all the rows
vROI = np.median(vROI,axis=1)
threshVal = int(vROI.max() * self.thresholdVal)
vROIthres = vROI >= threshVal
candidate = None
if vROIthres.min() != vROIthres.max():
# Prevent a divide by zero because roi is all the same value.
# e.g. we have a frame completely white or black
lbl,numLbl = nd.label(vROIthres)
obj = nd.find_objects(lbl)
brightest = 0
for s in obj:
print s
# s is an np.slice object
sBright = np.mean(vROI[s])
sHeight = s[0].stop - s[0].start
if (self.heightRange[0] <= sHeight <= self.heightRange[1]) and sBright > brightest:
candidate = s[0]
brightest = sBright
if candidate:
self.setPerfPosition( self.ROIcentrexy[0], self.ROIxy[1]+candidate.start + ((candidate.stop-candidate.start)/2 ))
self.found = True
示例4: _filter_small_slopes
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def _filter_small_slopes(hgt, dx, min_slope=0):
"""Masks out slopes with NaN until the slope if all valid points is at
least min_slope (in degrees).
"""
min_slope = np.deg2rad(min_slope)
slope = np.arctan(-np.gradient(hgt, dx)) # beware the minus sign
# slope at the end always OK
slope[-1] = min_slope
# Find the locs where it doesn't work and expand till we got everything
slope_mask = np.where(slope >= min_slope, slope, np.NaN)
r, nr = label(~np.isfinite(slope_mask))
for objs in find_objects(r):
obj = objs[0]
i = 0
while True:
i += 1
i0 = objs[0].start-i
if i0 < 0:
break
ngap = obj.stop - i0 - 1
nhgt = hgt[[i0, obj.stop]]
current_slope = np.arctan(-np.gradient(nhgt, ngap * dx))
if i0 <= 0 or current_slope[0] >= min_slope:
break
slope_mask[i0:obj.stop] = np.NaN
out = hgt.copy()
out[~np.isfinite(slope_mask)] = np.NaN
return out
示例5: select_regions
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def select_regions(binary, f, min=0, nbest=100000):
"""Given a scoring function f over slice tuples (as returned by
find_objects), keeps at most nbest regions whose scores is higher
than min."""
labels, n = label(binary)
objects = find_objects(labels)
scores = [f(o) for o in objects]
best = np.argsort(scores)
keep = np.zeros(len(objects) + 1, 'i')
if nbest > 0:
for i in best[-nbest:]:
if scores[i] <= min:
continue
keep[i+1] = 1
return keep[labels]
示例6: condense
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def condense(im_label):
"""
Shifts labels in a label image to fill in gaps corresponding to missing
values.
Parameters
----------
im_label : array_like
A label image generated by segmentation methods.
Returns
-------
Condensed : array_like
A label image where all values > 0 are shifted down to fill gaps.
See Also
--------
histomicstk.segmentation.label.shuffle
"""
# initialize output
Condensed = im_label.copy()
# get extent of each object
Locations = ms.find_objects(Condensed)
# initialize counter
Counter = 1
# fill in new values
for i in np.arange(1, len(Locations)+1):
if Locations[i-1] is not None:
Patch = Condensed[Locations[i-1]]
Patch[Patch == i] = Counter
Counter += 1
return Condensed
示例7: __distinct_binary_object_correspondences
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def __distinct_binary_object_correspondences(reference, result, connectivity=1):
"""
Determines all distinct (where connectivity is defined by the connectivity parameter
passed to scipy's `generate_binary_structure`) binary objects in both of the input
parameters and returns a 1to1 mapping from the labelled objects in reference to the
corresponding (whereas a one-voxel overlap suffices for correspondence) objects in
result.
All stems from the problem, that the relationship is non-surjective many-to-many.
@return (labelmap1, labelmap2, n_lables1, n_labels2, labelmapping2to1)
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
# binary structure
footprint = generate_binary_structure(result.ndim, connectivity)
# label distinct binary objects
labelmap1, n_obj_result = label(result, footprint)
labelmap2, n_obj_reference = label(reference, footprint)
# find all overlaps from labelmap2 to labelmap1; collect one-to-one relationships and store all one-two-many for later processing
slicers = find_objects(labelmap2) # get windows of labelled objects
mapping = dict() # mappings from labels in labelmap2 to corresponding object labels in labelmap1
used_labels = set() # set to collect all already used labels from labelmap2
one_to_many = list() # list to collect all one-to-many mappings
for l1id, slicer in enumerate(slicers): # iterate over object in labelmap2 and their windows
l1id += 1 # labelled objects have ids sarting from 1
bobj = (l1id) == labelmap2[slicer] # find binary object corresponding to the label1 id in the segmentation
l2ids = numpy.unique(labelmap1[slicer][bobj]) # extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping)
l2ids = l2ids[0 != l2ids] # remove background identifiers (=0)
if 1 == len(l2ids): # one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used
l2id = l2ids[0]
if not l2id in used_labels:
mapping[l1id] = l2id
used_labels.add(l2id)
elif 1 < len(l2ids): # one-to-many mapping: store relationship for later processing
one_to_many.append((l1id, set(l2ids)))
# process one-to-many mappings, always choosing the one with the least labelmap2 correspondences first
while True:
one_to_many = [(l1id, l2ids - used_labels) for l1id, l2ids in one_to_many] # remove already used ids from all sets
one_to_many = [x for x in one_to_many if x[1]] # remove empty sets
one_to_many = sorted(one_to_many, key=lambda x: len(x[1])) # sort by set length
if 0 == len(one_to_many):
break
l2id = one_to_many[0][1].pop() # select an arbitrary target label id from the shortest set
mapping[one_to_many[0][0]] = l2id # add to one-to-one mappings
used_labels.add(l2id) # mark target label as used
one_to_many = one_to_many[1:] # delete the processed set from all sets
return labelmap1, labelmap2, n_obj_result, n_obj_reference, mapping
示例8: delete
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def delete(im_label, indices):
"""
Deletes objects with values in 'indices' from label image, writing them over
with zeros to assimilate with background.
Parameters
----------
im_label : array_like
A label image generated by segmentation methods.
indices : array_like
An n-length array of strictly positive integer values to delete from
'im_label'.
Returns
-------
Deleted : array_like
A label image where all values in 'indices' are set to zero.
Notes:
------
A call to CondenseLabel can squeeze label image values to fill in gaps from
deleted values.
See Also
--------
histomicstk.segmentation.label.condense
"""
# initialize output
Deleted = im_label.copy()
# get extent of each object
Locations = ms.find_objects(Deleted)
# fill in new values
for i in np.arange(indices.size):
if Locations[indices[i]-1] is not None:
Patch = Deleted[Locations[indices[i] - 1]]
Patch[Patch == indices[i]] = 0
return Deleted
示例9: area_open
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def area_open(im_label, min_area):
"""Removes small objects from label image.
Parameters
----------
im_label : array_like
A uint32 type label image generated by segmentation methods.
min_area : int
minimum area threshold for objects. Objects with fewer than 'min_area'
pixels will be zeroed to merge with background.
Returns
-------
im_open : array_like
A uint32 label where objects with pixels < min_area are removed.
Notes
-----
Objects are assumed to have positive nonzero values. im_label image will be
condensed during processing.
See Also
--------
histomicstk.segmentation.label.condense,
histomicstk.segmentation.label.shuffle,
histomicstk.segmentation.label.split,
histomicstk.segmentation.label.width_open
"""
# copy input image
im_open = im_label.copy()
# condense label image
if np.unique(im_open).size-1 != im_open.max():
im_open = condense(im_open)
# count pixels in each object
Counts, Edges = np.histogram(im_open, bins=im_open.max()+1)
# get locations of objects in initial image
Locations = ms.find_objects(im_open)
# iterate through objects, zeroing where needed
for i in np.arange(1, Counts.size):
if Counts[i] < min_area:
# extract object from label image
Template = im_open[Locations[i-1]]
# label mask of object 'i'
Template[Template == i] = 0
# condense to fill gaps
im_open = condense(im_open)
return im_open
示例10: __distinct_binary_object_correspondences
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import find_objects [as 别名]
def __distinct_binary_object_correspondences(reference, result, connectivity=1):
"""
Determines all distinct (where connectivity is defined by the connectivity parameter
passed to scipy's `generate_binary_structure`) binary objects in both of the input
parameters and returns a 1to1 mapping from the labelled objects in reference to the
corresponding (whereas a one-voxel overlap suffices for correspondence) objects in
result.
All stems from the problem, that the relationship is non-surjective many-to-many.
@return (labelmap1, labelmap2, n_lables1, n_labels2, labelmapping2to1)
"""
result = np.atleast_1d(result.astype(np.bool))
reference = np.atleast_1d(reference.astype(np.bool))
# binary structure
footprint = generate_binary_structure(result.ndim, connectivity)
# label distinct binary objects
labelmap1, n_obj_result = label(result, footprint)
labelmap2, n_obj_reference = label(reference, footprint)
# find all overlaps from labelmap2 to labelmap1; collect one-to-one relationships and store all one-two-many for later processing
slicers = find_objects(labelmap2) # get windows of labelled objects
mapping = dict() # mappings from labels in labelmap2 to corresponding object labels in labelmap1
used_labels = set() # set to collect all already used labels from labelmap2
one_to_many = list() # list to collect all one-to-many mappings
for l1id, slicer in enumerate(slicers): # iterate over object in labelmap2 and their windows
l1id += 1 # labelled objects have ids sarting from 1
bobj = (l1id) == labelmap2[slicer] # find binary object corresponding to the label1 id in the segmentation
l2ids = np.unique(labelmap1[slicer][
bobj]) # extract all unique object identifiers at the corresponding positions in the reference (i.e. the mapping)
l2ids = l2ids[0 != l2ids] # remove background identifiers (=0)
if 1 == len(
l2ids): # one-to-one mapping: if target label not already used, add to final list of object-to-object mappings and mark target label as used
l2id = l2ids[0]
if not l2id in used_labels:
mapping[l1id] = l2id
used_labels.add(l2id)
elif 1 < len(l2ids): # one-to-many mapping: store relationship for later processing
one_to_many.append((l1id, set(l2ids)))
# process one-to-many mappings, always choosing the one with the least labelmap2 correspondences first
while True:
one_to_many = [(l1id, l2ids - used_labels) for l1id, l2ids in
one_to_many] # remove already used ids from all sets
one_to_many = [x for x in one_to_many if x[1]] # remove empty sets
one_to_many = sorted(one_to_many, key=lambda x: len(x[1])) # sort by set length
if 0 == len(one_to_many):
break
l2id = one_to_many[0][1].pop() # select an arbitrary target label id from the shortest set
mapping[one_to_many[0][0]] = l2id # add to one-to-one mappings
used_labels.add(l2id) # mark target label as used
one_to_many = one_to_many[1:] # delete the processed set from all sets
return labelmap1, labelmap2, n_obj_result, n_obj_reference, mapping