本文整理汇总了Python中skimage.morphology.label方法的典型用法代码示例。如果您正苦于以下问题:Python morphology.label方法的具体用法?Python morphology.label怎么用?Python morphology.label使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类skimage.morphology
的用法示例。
在下文中一共展示了morphology.label方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_fp_loss
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def compute_fp_loss(probs_log, blob_dict, reduction='sum'):
if blob_dict["n_fp"] == 0:
return 0.
blobs = blob_dict["blobs"]
loss = 0.
n_fp = 0.
for b in blob_dict["blobList"]:
if b["n_points"] != 0:
continue
T = np.ones(blobs.shape[-2:])
T[blobs[b["class"]] == b["label"]] = 0
loss += F.nll_loss(probs_log, torch.LongTensor(T).to(device=probs_log.device)[None],
ignore_index=1, reduction='mean')
n_fp += 1
if reduction == 'mean':
loss = loss / max(n_fp, 1)
return loss
示例2: get_blobs
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def get_blobs(logits, roi_mask=None):
n, k, _, _ = logits.shape
pred_mask = logits.max(1)[1].squeeze().cpu().numpy()
h, w = pred_mask.shape
blobs = np.zeros((k - 1, h, w), int)
for category_id in np.unique(pred_mask):
if category_id == 0:
continue
blobs[category_id - 1] = morph.label(pred_mask == category_id)
if roi_mask is not None:
blobs = (blobs * roi_mask[None]).astype(int)
return blobs
示例3: _heatmap_to_rects
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def _heatmap_to_rects(self, grid_pred, bb_img):
"""Convert a heatmap to rectangles / bounding box candidates."""
grid_pred = np.squeeze(grid_pred) # (1, H, W) => (H, W)
# remove low activations
grid_thresh = grid_pred >= self.heatmap_activation_threshold
# find connected components
grid_labeled, num_labels = morphology.label(
grid_thresh, background=0, connectivity=1, return_num=True
)
# for each connected components,
# - draw a bounding box around it,
# - shrink the bounding box to optimal size
# - estimate a score/confidence value
bbs = []
for label in range(1, num_labels+1):
(yy, xx) = np.nonzero(grid_labeled == label)
min_y, max_y = np.min(yy), np.max(yy)
min_x, max_x = np.min(xx), np.max(xx)
rect = RectangleOnImage(x1=min_x, x2=max_x+1, y1=min_y, y2=max_y+1, shape=grid_labeled)
activation = self._rect_to_score(rect, grid_pred)
rect_shrunk, activation_shrunk = self._shrink(grid_pred, rect)
rect_rs_shrunk = rect_shrunk.on(bb_img)
bbs.append((rect_rs_shrunk, activation_shrunk))
return bbs
示例4: multi_rle_encode
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def multi_rle_encode(img):
labels = label(img[:, :, 0])
return [rle_encode(labels==k) for k in np.unique(labels[labels>0])]
# ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode
示例5: make_train_char_db
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def make_train_char_db(ori_img_dir):
import pandas as pd
import h5py
"""
均分四份,制作训练集
:param ori_img_dir: 原始的图像的目录
:return:
"""
even_split_train_path = os.path.join(os.getcwd(), 'evensplit_train_im')
if not os.path.exists(even_split_train_path):
os.makedirs(even_split_train_path)
train_imgs = os.listdir(ori_img_dir)
letters = list('02345678abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ')
answer_data = pd.read_table(os.path.join(os.getcwd(), 'lvy_ans.txt'), sep=':', names=['Index', 'Answer'])
# 保存数据
img = np.zeros((len(train_imgs)*4, 1, 35, 35), dtype=np.uint8)
label = np.zeros((len(train_imgs)*4), dtype=np.uint32)
index = 0
for train_img in train_imgs:
ori_train_img = os.path.join(ori_img_dir, train_img)
binary_train_img = binary_img(ori_train_img) # 二值化之后的图像
dingge_train_img = ding_ge(binary_train_img) # 顶格之后的图像
# 均分成四份
step_train = dingge_train_img.shape[1] / float(4)
start_train = [j for j in np.arange(0, dingge_train_img.shape[1], step_train).tolist()]
for p, k in enumerate(start_train):
print train_img + '_' + str((p+1))
split_train_img = dingge_train_img[:, k:k + step_train]
small_img = ding_ge(split_train_img)
split_train_resize_img = cv2.resize(small_img, (35, 35))
img[index, 0, :, :] = split_train_resize_img
label[index] = letters.index(answer_data['Answer'][int(train_img.split('.')[0])-1][p])
index += 1
cv2.imwrite(os.path.join(even_split_train_path,
train_img.split('.')[0] + '_' + str(p+1) + '.png'), split_train_resize_img*255)
f = h5py.File(os.path.join(os.getcwd(), 'train_chars_data.h5'), 'w')
f.create_dataset('img', data=img)
f.create_dataset('label', data=label)
f.close()
示例6: skeleton_transform_volume
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def skeleton_transform_volume(label):
vol_distance = np.zeros_like(label, dtype=np.float32)
vol_skeleton = np.zeros_like(label, dtype=np.uint8)
for i in range(label.shape[0]):
label_img = label[i].copy()
distance, skeleton = skeleton_transform(label_img)
vol_distance[i] = distance
vol_skeleton[i] = skeleton
return vol_distance, vol_skeleton
示例7: postprocess_prediction
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def postprocess_prediction(seg):
# basically look for connected components and choose the largest one, delete everything else
print("running postprocessing... ")
mask = seg != 0
lbls = label(mask, 8)
lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)]
largest_region = np.argmax(lbls_sizes[1:]) + 1
seg[lbls != largest_region] = 0
return seg
示例8: postprocess_prediction
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def postprocess_prediction(seg):
# basically look for connected components and choose the largest one, delete everything else
mask = seg != 0
lbls = label(mask, 8)
lbls_sizes = [np.sum(lbls==i) for i in np.unique(lbls)]
largest_region = np.argmax(lbls_sizes[1:]) + 1
seg[lbls != largest_region]=0
return seg
示例9: hard_dice
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def hard_dice(y_pred, y_true, n_classes):
# y_true must be label map, not one hot encoding
y_true = T.flatten(y_true)
y_pred = T.argmax(y_pred, axis=1)
dice = T.zeros(n_classes)
for i in range(n_classes):
i_val = T.constant(i)
y_true_i = T.eq(y_true, i_val)
y_pred_i = T.eq(y_pred, i_val)
dice = T.set_subtensor(dice[i], (T.constant(2.) * T.sum(y_true_i * y_pred_i) + T.constant(1e-7)) /
(T.sum(y_true_i) + T.sum(y_pred_i) + T.constant(1e-7)))
return dice
示例10: prob_to_rles
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def prob_to_rles(x, cutoff=0.5):
lab_img = label(x > cutoff)
for i in range(1, lab_img.max() + 1):
yield rle_encoding(lab_img == i)
示例11: __call__
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def __call__(self, **data_dict):
data = data_dict.get(self.key)
for b in range(data.shape[0]):
if np.random.uniform() < self.p_per_sample:
for c in self.channel_idx:
if np.random.uniform() < self.p_per_label:
workon = np.copy(data[b, c])
num_voxels = np.prod(workon.shape, dtype=np.uint64)
lab, num_comp = label(workon, return_num=True)
if num_comp > 0:
component_ids = []
component_sizes = []
for i in range(1, num_comp + 1):
component_ids.append(i)
component_sizes.append(np.sum(lab == i))
component_ids = [i for i, j in zip(component_ids, component_sizes) if j < num_voxels*self.dont_do_if_covers_more_than_X_percent]
#_ = component_ids.pop(np.argmax(component_sizes))
#else:
# component_ids = list(range(1, num_comp + 1))
if len(component_ids) > 0:
random_component = np.random.choice(component_ids)
data[b, c][lab == random_component] = 0
if np.random.uniform() < self.fill_with_other_class_p:
other_ch = [i for i in self.channel_idx if i != c]
if len(other_ch) > 0:
other_class = np.random.choice(other_ch)
data[b, other_class][lab == random_component] = 1
data_dict[self.key] = data
return data_dict
示例12: _check_if_all_in_one_region
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def _check_if_all_in_one_region(seg, regions):
res = OrderedDict()
for r in regions:
new_seg = np.zeros(seg.shape)
for c in r:
new_seg[seg == c] = 1
labelmap, numlabels = label(new_seg, return_num=True)
if numlabels != 1:
res[tuple(r)] = False
else:
res[tuple(r)] = True
return res
示例13: _collect_class_and_region_sizes
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def _collect_class_and_region_sizes(seg, all_classes, vol_per_voxel):
volume_per_class = OrderedDict()
region_volume_per_class = OrderedDict()
for c in all_classes:
region_volume_per_class[c] = []
volume_per_class[c] = np.sum(seg == c) * vol_per_voxel
labelmap, numregions = label(seg == c, return_num=True)
for l in range(1, numregions + 1):
region_volume_per_class[c].append(np.sum(labelmap == l) * vol_per_voxel)
return volume_per_class, region_volume_per_class
示例14: split_label_clusters
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def split_label_clusters(self, neighbors=4):
""" Expand the set of labels by looking at each connected component in
the labels. Assign each component a new label number, and copy its old
intensity value to its new label. This typically expands the number of
labels from ~30 to ~3000, so you should only really do it on the last
iteration. """
if self.params.logging:
prev_r_s = self.decomposition.get_r_s()
rows, cols = self.input.shape[0:2]
labels = self.decomposition.get_labels()
intensities = self.decomposition.intensities
chromaticities = self.decomposition.chromaticities
# split labels
new_labels = morphology.label(labels, neighbors=neighbors)
# map labels
self.decomposition.labels_nz = new_labels[self.input.mask_nz]
# map intensities
_, indices = np.unique(new_labels.ravel(), return_index=True)
new_to_old = labels.ravel()[indices]
new_to_old = new_to_old[new_to_old != -1]
self.decomposition.intensities = intensities[new_to_old]
self.decomposition.chromaticities = chromaticities[new_to_old]
if self.params.logging:
print ('split_label_clusters: %s --> %s' % (
intensities.shape[0], self.decomposition.intensities.shape[0]))
self.remove_unused_intensities()
if self.params.logging:
np.testing.assert_equal(self.decomposition.get_r_s(), prev_r_s)
assert (self.decomposition.chromaticities.shape[0] ==
self.decomposition.intensities.shape[0])
示例15: postprocess
# 需要导入模块: from skimage import morphology [as 别名]
# 或者: from skimage.morphology import label [as 别名]
def postprocess(preds, config):
assert preds.shape[2]==5
ldelta = delta(preds[:,:,1:])
#ldelta = delta0(preds[:,:,5:])
connected = np.all(ldelta>config.GRADIENT_THRES, 2)
base = connected * (preds[:,:,0]>config.MASK_THRES)
wall = np.sum(np.abs(preds[:,:,1:]),axis = -1)
base_label = label(base)
vals, counts = np.unique(base_label[base_label>0], return_counts=True)
for val in vals[(counts<config.CLIP_AREA_LOW)]:
base_label[base_label==val]=0
vals = vals[(counts>=config.CLIP_AREA_LOW)]
for val in vals:
label_mask = base_label == val
if np.sum(label_mask)==0:
continue
label_mask = remove_small_holes(label_mask)
label_mask = basin(label_mask, wall)
label_mask = remove_small_holes(label_mask)
'''
label_bdr = label_mask^binary_erosion(label_mask)
min_wall = np.min(wall[label_mask])
ave_bdr_wall = np.mean(wall[label_bdr])
if ave_bdr_wall < min_wall + config.WALL_DEPTH:
label_mask = 0
'''
base_label[label_mask] = val
vals, counts = np.unique(base_label[base_label>0], return_counts=True)
for val in vals[(counts<config.CLIP_AREA_LOW)]:
base_label[base_label==val]=0
return base_label