本文整理汇总了Python中scipy.ndimage.measurements.label方法的典型用法代码示例。如果您正苦于以下问题:Python measurements.label方法的具体用法?Python measurements.label怎么用?Python measurements.label使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.ndimage.measurements
的用法示例。
在下文中一共展示了measurements.label方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: connected_components_reference_implementation
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def connected_components_reference_implementation(images):
try:
from scipy.ndimage import measurements
except ImportError:
logging.exception("Skipping test method because scipy could not be loaded")
return
image_or_images = np.asarray(images)
if len(image_or_images.shape) == 2:
images = image_or_images[None, :, :]
elif len(image_or_images.shape) == 3:
images = image_or_images
components = np.asarray([measurements.label(image)[0] for image in images])
# Get the count of nonzero ids for each image, and offset each image's nonzero
# ids using the cumulative sum.
num_ids_per_image = components.reshape(
[-1, components.shape[1] * components.shape[2]]
).max(axis=-1)
positive_id_start_per_image = np.cumsum(num_ids_per_image)
for i in range(components.shape[0]):
new_id_start = positive_id_start_per_image[i - 1] if i > 0 else 0
components[i, components[i] > 0] += new_id_start
if len(image_or_images.shape) == 2:
return components[0, :, :]
else:
return components
示例2: _filter_grouplen
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def _filter_grouplen(arr, minsize=3):
"""Filter out the groups of grid points smaller than minsize
Parameters
----------
arr : the array to filter (should be False and Trues)
minsize : the minimum size of the group
Returns
-------
the array, with small groups removed
"""
# Do it with trues
r, nr = label(arr)
nr = [i+1 for i, o in enumerate(find_objects(r)) if (len(r[o]) >= minsize)]
arr = np.asarray([ri in nr for ri in r])
# and with Falses
r, nr = label(~ arr)
nr = [i+1 for i, o in enumerate(find_objects(r)) if (len(r[o]) >= minsize)]
arr = ~ np.asarray([ri in nr for ri in r])
return arr
示例3: _fix_mirror_padding
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def _fix_mirror_padding(self, ann):
"""
Deal with duplicated instances due to mirroring in interpolation
during shape augmentation (scale, rotation etc.)
"""
current_max_id = np.amax(ann)
inst_list = list(np.unique(ann))
inst_list.remove(0) # 0 is background
for inst_id in inst_list:
inst_map = np.array(ann == inst_id, np.uint8)
remapped_ids = measurements.label(inst_map)[0]
remapped_ids[remapped_ids > 1] += current_max_id
ann[remapped_ids > 1] = remapped_ids[remapped_ids > 1]
current_max_id = np.amax(ann)
return ann
####
示例4: greedy_decoder
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def greedy_decoder(outputs: np.ndarray) -> List[Tuple[int, int, int, float]]:
"""
Translates back the network output to a label sequence using greedy/best
path decoding as described in [0].
[0] Graves, Alex, et al. "Connectionist temporal classification: labelling
unsegmented sequence data with recurrent neural networks." Proceedings of
the 23rd international conference on Machine learning. ACM, 2006.
Args:
output (numpy.array): (C, W) shaped softmax output tensor
Returns:
A list with tuples (class, start, end, max). max is the maximum value
of the softmax layer in the region.
"""
labels = np.argmax(outputs, 0)
seq_len = outputs.shape[1]
mask = np.eye(outputs.shape[0], dtype='bool')[labels].T
classes = []
for label, group in groupby(zip(np.arange(seq_len), labels, outputs[mask]), key=lambda x: x[1]):
lgroup = list(group)
if label != 0:
classes.append((label, lgroup[0][0], lgroup[-1][0], max(x[2] for x in lgroup)))
return classes
示例5: label
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def label(image: np.array, **kw) -> np.array:
"""
Redefine the scipy.ndimage.measurements.label function to work with a wider
range of data types. The default function is inconsistent about the data
types it accepts on different platforms.
"""
try:
return measurements.label(image, **kw)
except Exception:
pass
types = ["int32", "uint32", "int64", "uint64", "int16", "uint16"]
for t in types:
try:
return measurements.label(np.array(image, dtype=t), **kw)
except Exception:
pass
# let it raise the same exception as before
return measurements.label(image, **kw)
示例6: propagate_labels
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def propagate_labels(image, labels, conflict=0):
"""Given an image and a set of labels, apply the labels
to all the regions in the image that overlap a label.
Assign the value `conflict` to any labels that have a conflict."""
rlabels, _ = label(image)
cors = correspondences(rlabels, labels)
outputs = np.zeros(np.amax(rlabels) + 1, 'i')
oops = -(1 << 30)
for o, i in cors.T:
if outputs[o] != 0:
outputs[o] = oops
else:
outputs[o] = i
outputs[outputs == oops] = conflict
outputs[0] = 0
return outputs[rlabels]
示例7: _remove_blobs
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def _remove_blobs(data):
"""Remove false positive blobs, likely occuring in brain sections."""
labeled_obj, num_obj = label(data)
if num_obj > 1: # If there is more than one connected object
bigger_obj = (labeled_obj == (np.bincount(labeled_obj.flat)[1:].argmax() + 1))
data2clean = np.copy(data)
# remove blobs only above the bigger connected object
z_max = np.max(np.where(bigger_obj)[2])
data2clean[:, :, :z_max + 1] = 0
labeled_obj2clean, num_obj2clean = label(data2clean)
if num_obj2clean: # If there is connected object above the biffer connected one
for obj_id in range(1, num_obj2clean + 1):
# if the blob has a volume < 10% of the bigger connected object, then remove it
if np.sum(labeled_obj2clean == obj_id) < 0.1 * np.sum(bigger_obj):
logger.warning('Removing small objects above slice #' + str(z_max))
data[np.where(labeled_obj2clean == obj_id)] = 0
return data
示例8: decompose_vol2cube_brain
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def decompose_vol2cube_brain(vol_data, cube_size, n_chn, ita):
cube_list = []
fold, ovlap = fit_cube_param(vol_data.shape[0:3], cube_size, ita)
dim = np.asarray(vol_data.shape[0:3]) # [307, 307, 143]
# decompose
for R in range(0, fold[0]):
r_s = R * cube_size - R * ovlap[0]
r_e = r_s + cube_size
if r_e >= dim[0]: # see if exceed the boundry
r_s = dim[0] - cube_size
r_e = r_s + cube_size
for C in range(0, fold[1]):
c_s = C * cube_size - C * ovlap[1]
c_e = c_s + cube_size
if c_e >= dim[1]:
c_s = dim[1] - cube_size
c_e = c_s + cube_size
for H in range(0, fold[2]):
h_s = H * cube_size - H * ovlap[2]
h_e = h_s + cube_size
if h_e >= dim[2]:
h_s = dim[2] - cube_size
h_e = h_s + cube_size
# partition multiple channels
cube_temp = vol_data[r_s:r_e, c_s:c_e, h_s:h_e, :]
# By default batch_size = 1
cube_batch = np.zeros(
[1, cube_size, cube_size, cube_size, n_chn]).astype('float32')
cube_batch[0, :, :, :, :] = copy.deepcopy(cube_temp)
# save
cube_list.append(cube_batch)
return cube_list
# compose list of label cubes into a label volume
示例9: get_n_largest_components
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def get_n_largest_components(vol, se, n, return_sizes=False):
"""Get the n largest components from a volume.
PARAMETERS
----------
vol : ndarray
Image volume. A dimX x dimY x dimZ array containing the image data.
se : ndarray
Structuring element to use when detecting components (i.e. setting the
connectivity which defines a component).
n : int
Number of (largest) components to retain.
return_sizes : bool, optional
Whether or not to also return the sizes (in voxels) of each component
that was retained (default = False).
RETURNS
----------
Components : ndarray
Binary dimX x dimY x dimZ array where entries corresponding to retained
components are True and the remaining entries are False.
"""
vol_lbl = label(vol,se)[0]
labels,region_size = np.unique(vol_lbl,return_counts=True)
labels = labels[1:] # disregard background (label=0)
region_size = region_size[1:] #
labels = labels[ np.argsort(region_size)[::-1]]
components = np.any(np.array([vol_lbl == i for i in labels[:n]]),
axis=0)
# if no components are found, components will be reduced to false. Replace
# by array of appropriate size
if components.sum() == 0:
components = np.zeros_like(vol, dtype=bool)
if return_sizes:
return components,region_size[labels[:n]]
else:
return components
示例10: get_large_components
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def get_large_components(vol, se, threshold):
"""Get the components larger than a given threshold from a volume.
PARAMETERS
----------
vol : ndarray
Image volume. A dimX x dimY x dimZ array containing the image data.
se : ndarray
Structuring element to use when detecting components (i.e. setting the
connectivity which defines a component).
threshold : float
Only components (strictly) larger than this value (# of voxels) are
retained.
RETURNS
----------
components : ndarray
Binary dimX x dimY x dimZ array where entries corresponding to retained
components are True and the remaining entries are False.
"""
vol_lbl = label(vol,se)[0]
labels, region_size = np.unique(vol_lbl,return_counts=True)
labels = labels[1:] # disregard background (label=0)
region_size = region_size[1:]
components = np.any(np.array([vol_lbl == i for i in labels[region_size > threshold]]),
axis=0)
# if no components are found, components will be reduced to false. Replace
# by array of appropriate size
if components.sum() == 0:
components = np.zeros_like(vol, dtype=bool)
return components
示例11: relabel_compartments
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def relabel_compartments(mesh, old, new):
"""Relabel elements belonging to compartment 'old' (as defined by tag1 and
tag2 fields) to compartment 'new' of a .msh file. If 'old' and 'new' are
lists of indices then they are zipped such that relabelling is from old[0]
to new[0], from old[1] to new[1] etc.
PARAMETERS
----------
mesh : str or mesh_io msh object
The mesh whose elements are relabelled.
old : int or list of ints
Old label.
new : int or list of ints
New label.
RETURNS
----------
mesh : mesh_io msh object
The modified input object.
"""
if type(mesh) == str:
mesh = mesh_io.read_msh(mesh)
# ensure iterable
try:
next(iter(old))
except TypeError:
old = [old]
try:
next(iter(new))
except TypeError:
new = [new]
# renumber
for iold, inew in zip(old,new):
mesh.elm.tag1[mesh.elm.tag1 == iold] = inew
mesh.elm.tag2[mesh.elm.tag2 == iold] = inew
return mesh
示例12: vmesh2nifti
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def vmesh2nifti(mesh, vol, filename):
"""Given a volume mesh and an image file (e.g., .nii file), determine which
voxels in vol are inside which elements of the mesh and label according to
the values found in mesh.elm.tag1.
PARAMETERS
----------
mesh : str or mesh_io volume mesh object
Object describing the volume mesh.
vol : str or nibabel image object
Reference volume. The output volume is similar to this with respect to
image dimensions, voxel size etc. only with voxel values being replaced
by corresponding values of the labels of the tetrahedra in the mesh.
filename : str
Name of the saved volume.
RETURNS
----------
Nothing, saves a nifti file by the name of filename to disk.
"""
if type(mesh) == str:
mesh = mesh_io.read_msh(mesh)
if type(vol) == str:
vol = nib.load(vol)
img_dims = vol.shape
points = np.array(np.meshgrid(*tuple(map(np.arange,img_dims)),
indexing="ij")).reshape((3,-1)).T
tetrahedra = mesh.nodes.node_coord[mesh.elm.node_number_list[mesh.elm.elm_type==4]-1]
tetrahedra_regions = mesh.elm.tag1[mesh.elm.elm_type==4]
tetrahedra = apply_affine(tetrahedra, np.linalg.inv(vol.affine))
labels = label_points(tetrahedra, points, tetrahedra_regions)
write_nifti(labels.reshape(img_dims), filename, vol, dtype=np.uint8,
return_volume=False)
示例13: tf_categorical_dice
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def tf_categorical_dice(pred, truth, k):
""" Dice overlap metric for label k """
A = tf.cast(tf.equal(pred, k), dtype=tf.float32)
B = tf.cast(tf.equal(truth, k), dtype=tf.float32)
return 2 * tf.reduce_sum(tf.multiply(A, B)) / (tf.reduce_sum(A) + tf.reduce_sum(B))
示例14: data_augmenter
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def data_augmenter(image, label, shift, rotate, scale, intensity, flip):
"""
Online data augmentation
Perform affine transformation on image and label,
which are 4D tensor of shape (N, H, W, C) and 3D tensor of shape (N, H, W).
"""
image2 = np.zeros(image.shape, dtype=np.float32)
label2 = np.zeros(label.shape, dtype=np.int32)
for i in range(image.shape[0]):
# For each image slice, generate random affine transformation parameters
# using the Gaussian distribution
shift_val = [np.clip(np.random.normal(), -3, 3) * shift,
np.clip(np.random.normal(), -3, 3) * shift]
rotate_val = np.clip(np.random.normal(), -3, 3) * rotate
scale_val = 1 + np.clip(np.random.normal(), -3, 3) * scale
intensity_val = 1 + np.clip(np.random.normal(), -3, 3) * intensity
# Apply the affine transformation (rotation + scale + shift) to the image
row, col = image.shape[1:3]
M = cv2.getRotationMatrix2D((row / 2, col / 2), rotate_val, 1.0 / scale_val)
M[:, 2] += shift_val
for c in range(image.shape[3]):
image2[i, :, :, c] = ndimage.interpolation.affine_transform(image[i, :, :, c],
M[:, :2], M[:, 2], order=1)
# Apply the affine transformation (rotation + scale + shift) to the label map
label2[i, :, :] = ndimage.interpolation.affine_transform(label[i, :, :],
M[:, :2], M[:, 2], order=0)
# Apply intensity variation
image2[i] *= intensity_val
# Apply random horizontal or vertical flipping
if flip:
if np.random.uniform() >= 0.5:
image2[i] = image2[i, ::-1, :, :]
label2[i] = label2[i, ::-1, :]
else:
image2[i] = image2[i, :, ::-1, :]
label2[i] = label2[i, :, ::-1]
return image2, label2
示例15: np_categorical_dice
# 需要导入模块: from scipy.ndimage import measurements [as 别名]
# 或者: from scipy.ndimage.measurements import label [as 别名]
def np_categorical_dice(pred, truth, k):
""" Dice overlap metric for label k """
A = (pred == k).astype(np.float32)
B = (truth == k).astype(np.float32)
return 2 * np.sum(A * B) / (np.sum(A) + np.sum(B))