本文整理汇总了Python中maskrcnn_benchmark.utils.cv2_util.findContours方法的典型用法代码示例。如果您正苦于以下问题:Python cv2_util.findContours方法的具体用法?Python cv2_util.findContours怎么用?Python cv2_util.findContours使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类maskrcnn_benchmark.utils.cv2_util
的用法示例。
在下文中一共展示了cv2_util.findContours方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: overlay_mask
# 需要导入模块: from maskrcnn_benchmark.utils import cv2_util [as 别名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 别名]
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None]
contours, hierarchy = cv2_util.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
示例2: _findContours
# 需要导入模块: from maskrcnn_benchmark.utils import cv2_util [as 别名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 别名]
def _findContours(self):
contours = []
masks = self.masks.detach().numpy()
for mask in masks:
mask = cv2.UMat(mask)
contour, hierarchy = cv2_util.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1
)
reshaped_contour = []
for entity in contour:
assert len(entity.shape) == 3
assert (
entity.shape[1] == 1
), "Hierarchical contours are not allowed"
reshaped_contour.append(entity.reshape(-1).tolist())
contours.append(reshaped_contour)
return contours
示例3: overlay_mask
# 需要导入模块: from maskrcnn_benchmark.utils import cv2_util [as 别名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 别名]
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None].astype(np.uint8)
contours, hierarchy = cv2_util.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
示例4: overlay_mask
# 需要导入模块: from maskrcnn_benchmark.utils import cv2_util [as 别名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 别名]
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels)
image = image.astype(np.float)
masks = masks.squeeze(1)
for mask, color in zip(masks, colors):
idx = np.nonzero(mask)
alpha=0.4
image[idx[0], idx[1], :] *= 1.0 - alpha
image[idx[0], idx[1], :] += alpha * color
#for mask, color in zip(masks, colors):
# thresh = mask[0, :, :, None]
# contours, hierarchy = cv2_util.findContours(
# thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
# )
# image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
示例5: instances2dict_with_polygons
# 需要导入模块: from maskrcnn_benchmark.utils import cv2_util [as 别名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 别名]
def instances2dict_with_polygons(imageFileList, verbose=False):
imgCount = 0
instanceDict = {}
if not isinstance(imageFileList, list):
imageFileList = [imageFileList]
if verbose:
print("Processing {} images...".format(len(imageFileList)))
for imageFileName in imageFileList:
# Load image
img = Image.open(imageFileName)
# Image as numpy array
imgNp = np.array(img)
# Initialize label categories
instances = {}
for label in labels:
instances[label.name] = []
# Loop through all instance ids in instance image
for instanceId in np.unique(imgNp):
if instanceId < 1000:
continue
instanceObj = Instance(imgNp, instanceId)
instanceObj_dict = instanceObj.toDict()
#instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
if id2label[instanceObj.labelID].hasInstances:
mask = (imgNp == instanceId).astype(np.uint8)
contour, hier = cv2_util.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
polygons = [c.reshape(-1).tolist() for c in contour]
instanceObj_dict['contours'] = polygons
instances[id2label[instanceObj.labelID].name].append(instanceObj_dict)
imgKey = os.path.abspath(imageFileName)
instanceDict[imgKey] = instances
imgCount += 1
if verbose:
print("\rImages Processed: {}".format(imgCount), end=' ')
sys.stdout.flush()
if verbose:
print("")
return instanceDict