本文整理匯總了Python中maskrcnn_benchmark.utils.cv2_util.findContours方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2_util.findContours方法的具體用法?Python cv2_util.findContours怎麽用?Python cv2_util.findContours使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類maskrcnn_benchmark.utils.cv2_util
的用法示例。
在下文中一共展示了cv2_util.findContours方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: overlay_mask
# 需要導入模塊: from maskrcnn_benchmark.utils import cv2_util [as 別名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 別名]
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None]
contours, hierarchy = cv2_util.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
示例2: _findContours
# 需要導入模塊: from maskrcnn_benchmark.utils import cv2_util [as 別名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 別名]
def _findContours(self):
contours = []
masks = self.masks.detach().numpy()
for mask in masks:
mask = cv2.UMat(mask)
contour, hierarchy = cv2_util.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_TC89_L1
)
reshaped_contour = []
for entity in contour:
assert len(entity.shape) == 3
assert (
entity.shape[1] == 1
), "Hierarchical contours are not allowed"
reshaped_contour.append(entity.reshape(-1).tolist())
contours.append(reshaped_contour)
return contours
示例3: overlay_mask
# 需要導入模塊: from maskrcnn_benchmark.utils import cv2_util [as 別名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 別名]
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels).tolist()
for mask, color in zip(masks, colors):
thresh = mask[0, :, :, None].astype(np.uint8)
contours, hierarchy = cv2_util.findContours(
thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
)
image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
示例4: overlay_mask
# 需要導入模塊: from maskrcnn_benchmark.utils import cv2_util [as 別名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 別名]
def overlay_mask(self, image, predictions):
"""
Adds the instances contours for each predicted object.
Each label has a different color.
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `mask` and `labels`.
"""
masks = predictions.get_field("mask").numpy()
labels = predictions.get_field("labels")
colors = self.compute_colors_for_labels(labels)
image = image.astype(np.float)
masks = masks.squeeze(1)
for mask, color in zip(masks, colors):
idx = np.nonzero(mask)
alpha=0.4
image[idx[0], idx[1], :] *= 1.0 - alpha
image[idx[0], idx[1], :] += alpha * color
#for mask, color in zip(masks, colors):
# thresh = mask[0, :, :, None]
# contours, hierarchy = cv2_util.findContours(
# thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
# )
# image = cv2.drawContours(image, contours, -1, color, 3)
composite = image
return composite
示例5: instances2dict_with_polygons
# 需要導入模塊: from maskrcnn_benchmark.utils import cv2_util [as 別名]
# 或者: from maskrcnn_benchmark.utils.cv2_util import findContours [as 別名]
def instances2dict_with_polygons(imageFileList, verbose=False):
imgCount = 0
instanceDict = {}
if not isinstance(imageFileList, list):
imageFileList = [imageFileList]
if verbose:
print("Processing {} images...".format(len(imageFileList)))
for imageFileName in imageFileList:
# Load image
img = Image.open(imageFileName)
# Image as numpy array
imgNp = np.array(img)
# Initialize label categories
instances = {}
for label in labels:
instances[label.name] = []
# Loop through all instance ids in instance image
for instanceId in np.unique(imgNp):
if instanceId < 1000:
continue
instanceObj = Instance(imgNp, instanceId)
instanceObj_dict = instanceObj.toDict()
#instances[id2label[instanceObj.labelID].name].append(instanceObj.toDict())
if id2label[instanceObj.labelID].hasInstances:
mask = (imgNp == instanceId).astype(np.uint8)
contour, hier = cv2_util.findContours(
mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
polygons = [c.reshape(-1).tolist() for c in contour]
instanceObj_dict['contours'] = polygons
instances[id2label[instanceObj.labelID].name].append(instanceObj_dict)
imgKey = os.path.abspath(imageFileName)
instanceDict[imgKey] = instances
imgCount += 1
if verbose:
print("\rImages Processed: {}".format(imgCount), end=' ')
sys.stdout.flush()
if verbose:
print("")
return instanceDict