当前位置: 首页>>代码示例>>Python>>正文


Python visualization_utils.visualize_boxes_and_labels_on_image_array方法代码示例

本文整理汇总了Python中object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array方法的典型用法代码示例。如果您正苦于以下问题:Python visualization_utils.visualize_boxes_and_labels_on_image_array方法的具体用法?Python visualization_utils.visualize_boxes_and_labels_on_image_array怎么用?Python visualization_utils.visualize_boxes_and_labels_on_image_array使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在object_detection.utils.visualization_utils的用法示例。


在下文中一共展示了visualization_utils.visualize_boxes_and_labels_on_image_array方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: visualize

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def visualize(self, image, output_dict):
        """
        Draws the bounding boxes, labels and scores of each detection

        Args:
        image: (numpy array) input image
        output_dict (dictionary) output of object detection model

        Returns:
        image: (numpy array) image with drawings
        """
        # Draw the bounding boxes
        vis_util.visualize_boxes_and_labels_on_image_array(
            image,
            output_dict["detection_boxes"],
            output_dict["detection_classes"],
            output_dict["detection_scores"],
            self.category_index,
            instance_masks=output_dict.get('detection_masks'),
            use_normalized_coordinates=True,
            line_thickness=5)

        return image 
开发者ID:cagbal,项目名称:ros_people_object_detection_tensorflow,代码行数:25,代码来源:detector.py

示例2: test_visualize_boxes_and_labels_on_image_array

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def test_visualize_boxes_and_labels_on_image_array(self):
    ori_image = np.ones([360, 480, 3], dtype=np.int32) * 255
    test_image = np.ones([360, 480, 3], dtype=np.int32) * 255
    detections = np.array([[0.8, 0.1, 0.9, 0.1, 1., 0.1],
                           [0.1, 0.3, 0.8, 0.7, 1., 0.6]])
    keypoints = np.array(np.random.rand(2, 5, 2), dtype=np.float32)
    labelmap = {1: {'id': 1, 'name': 'cat'}, 2: {'id': 2, 'name': 'dog'}}
    visualization_utils.visualize_boxes_and_labels_on_image_array(
        test_image,
        detections[:, :4],
        detections[:, 4].astype(np.int32),
        detections[:, 5],
        labelmap,
        keypoints=keypoints,
        track_ids=None,
        use_normalized_coordinates=True,
        max_boxes_to_draw=1,
        min_score_thresh=0.2,
        agnostic_mode=False,
        line_thickness=8)
    self.assertGreater(np.abs(np.sum(test_image - ori_image)), 0) 
开发者ID:tensorflow,项目名称:models,代码行数:23,代码来源:visualization_utils_test.py

示例3: visualize_inference_for_single_image_from_path

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def visualize_inference_for_single_image_from_path(self, image_path,
                                                       min_score_thresh=.3,
                                                       line_thickness=4,
                                                       output_image_size=(12, 8),
                                                       image_size=300):
        image_np = load_image_into_numpy_array_from_path(image_path, image_size)

        # Actual detection.
        output_dict = self._run_inference_for_single_image(image_np)
        # Visualization of the results of a detection.
        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            output_dict['detection_boxes'],
            output_dict['detection_classes'],
            output_dict['detection_scores'],
            self.category_index,
            instance_masks=output_dict.get('detection_masks'),
            use_normalized_coordinates=True,
            min_score_thresh=min_score_thresh,
            line_thickness=line_thickness)
        plt.figure(figsize=output_image_size)
        plt.imshow(image_np)
        plt.show() 
开发者ID:isobar-us,项目名称:multilabel-image-classification-tensorflow,代码行数:25,代码来源:tf_graph_util.py

示例4: callback

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def callback(self, image_msg):
            
        cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8")
        
        with detection_graph.as_default():
            with tf.Session(graph=detection_graph) as sess:
                image_np = cv_image
                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(image_np, axis=0)
                image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
                # Each box represents a part of the image where a particular object was detected.
                boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
                # Each score represent how level of confidence for each of the objects.
                # Score is shown on the result image, together with the class label.
                scores = detection_graph.get_tensor_by_name('detection_scores:0')
                classes = detection_graph.get_tensor_by_name('detection_classes:0')
                num_detections = detection_graph.get_tensor_by_name('num_detections:0')
                # Actual detection.
                (boxes, scores, classes, num_detections) = sess.run(
                    [boxes, scores, classes, num_detections],
                    feed_dict={image_tensor: image_np_expanded})
                # Visualization of the results of a detection.
                vis_util.visualize_boxes_and_labels_on_image_array(
                    image_np,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    category_index,
                    use_normalized_coordinates=True,
                    line_thickness=8)
        
        try:
            self._pub.publish(self._cv_bridge.cv2_to_imgmsg(image_np, "bgr8"))
        except CvBridgeError as e:
            print(e) 
开发者ID:cong,项目名称:ros_tensorflow,代码行数:37,代码来源:ros_tensorflow_detect.py

示例5: detect_objects

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def detect_objects(image_np, sess, detection_graph):
    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
    image_np_expanded = np.expand_dims(image_np, axis=0)
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Each box represents a part of the image where a particular object was detected.
    boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represent how level of confidence for each of the objects.
    # Score is shown on the result image, together with the class label.
    scores = detection_graph.get_tensor_by_name('detection_scores:0')
    classes = detection_graph.get_tensor_by_name('detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Actual detection.
    (boxes, scores, classes, num_detections) = sess.run(
        [boxes, scores, classes, num_detections],
        feed_dict={image_tensor: image_np_expanded})

    # Visualization of the results of a detection.
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=8)
    return image_np 
开发者ID:datitran,项目名称:object_detector_app,代码行数:31,代码来源:object_detection_app.py

示例6: detect_objects

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def detect_objects(image_np, sess, detection_graph):
    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
    image_np_expanded = np.expand_dims(image_np, axis=0)
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

    # Each box represents a part of the image where a particular object was detected.
    boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

    # Each score represent how level of confidence for each of the objects.
    # Score is shown on the result image, together with the class label.
    scores = detection_graph.get_tensor_by_name('detection_scores:0')
    classes = detection_graph.get_tensor_by_name('detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # Actual detection.
    (boxes, scores, classes, num_detections) = sess.run(
        [boxes, scores, classes, num_detections],
        feed_dict={image_tensor: image_np_expanded})

    # Visualization of the results of a detection.
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=4)

    return image_np 
开发者ID:lbeaucourt,项目名称:Object-detection,代码行数:32,代码来源:objDet_utils.py

示例7: detect_objects

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def detect_objects(image_np, sess, detection_graph):
        # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
        image_np_expanded = np.expand_dims(image_np, axis=0)
        image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')

        # Each box represents a part of the image where a particular object was detected.
        boxes = detection_graph.get_tensor_by_name('detection_boxes:0')

        # Each score represent how level of confidence for each of the objects.
        # Score is shown on the result image, together with the class label.
        scores = detection_graph.get_tensor_by_name('detection_scores:0')
        classes = detection_graph.get_tensor_by_name('detection_classes:0')
        num_detections = detection_graph.get_tensor_by_name('num_detections:0')

        # Actual detection.
        (boxes, scores, classes, num_detections) = sess.run(
            [boxes, scores, classes, num_detections],
            feed_dict={image_tensor: image_np_expanded})

        # Visualization of the results of a detection.
        vis_util.visualize_boxes_and_labels_on_image_array(
            image_np,
            np.squeeze(boxes),
            np.squeeze(classes).astype(np.int32),
            np.squeeze(scores),
            category_index,
            use_normalized_coordinates=True,
            line_thickness=8)
        return image_np 
开发者ID:cong,项目名称:ros_tensorflow,代码行数:31,代码来源:ros_tensorflow_detect.py

示例8: detection

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def detection():
  image_label =[]
  for image_path in TEST_IMAGE_PATHS:
    image_org = Image.open(image_path, 'r')
    # the array based representation of the image will be used later in order to prepare the
    # result image with boxes and labels on it.
    image_np = load_image_into_numpy_array(image_org)
    # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
    # image_np_expanded = np.expand_dims(image_np, axis=0)
    image_name = os.path.basename(os.path.join(image_path))
    # Actual detection.
    output_dict = run_inference_for_single_image(image_np, detection_graph)
  
    output_path = os.path.join(PATH_TO_TEST_IMAGES_DIR)
  
    # Visualization of the results of a detection.
    image, box_to_color_map, box_to_display_str_map = vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks'),
        use_normalized_coordinates=True,
        max_boxes_to_draw=200,
        min_score_thresh=.75,
        line_thickness=2)
  
    # Crop bounding box to splt images.
    lang = 'cont41'
    img_label = img_ocr(image_name, output_path, image_org, box_to_color_map, box_to_display_str_map, lang)
    # save visualize_boxes_and_labels_on_image_array output image.
    image_name = os.path.basename(os.path.join(image_path))
    output_image_name = image_name[:-4] + '_out' + image_name[-4:]
    image_out = Image.fromarray(image_np)
    image_out.save(os.path.join(PATH_TO_TEST_IMAGES_DIR) + '/'+ output_image_name)
    image_label.append({str(image_name[:-4]): img_label})
  return image_label 
开发者ID:lonelygo,项目名称:container_detection,代码行数:40,代码来源:detection_var_image.py

示例9: detect_object

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def detect_object(detection_graph, sess, image, category_index):
    with detection_graph.as_default():
        with sess.as_default() as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')
            #   image = Image.open(image_path)
              # the array based representation of the image will be used later in order to prepare the
              # result image with boxes and labels on it.
            # image_np = load_image_into_numpy_array(image)
            image_np = image
            # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
            image_np_expanded = np.expand_dims(image_np, axis=0)
            test_var = tf.placeholder(dtype=tf.int8, shape=[])
            # Actual detection.
            (boxes, scores, classes, num) = sess.run(
              [detection_boxes, detection_scores, detection_classes, num_detections],
              feed_dict={image_tensor: image_np_expanded})
            # Visualization of the results of a detection.
            vis_util.visualize_boxes_and_labels_on_image_array(
              image_np,
              np.squeeze(boxes),
              np.squeeze(classes).astype(np.int32),
              np.squeeze(scores),
              category_index,
              use_normalized_coordinates=True,
              line_thickness=8,
              min_score_thresh = 0.7)
            return image_np 
开发者ID:scotthuang1989,项目名称:object_detection_with_tensorflow,代码行数:37,代码来源:object_detection_tf_multiprocessing.py

示例10: detect_object

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def detect_object(detection_graph, sess, image, image_list, category_index):
    with detection_graph.as_default():
        with sess.as_default() as sess:
            # Definite input and output Tensors for detection_graph
            image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
            # Each box represents a part of the image where a particular object was detected.
            detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
            # Each score represent how level of confidence for each of the objects.
            # Score is shown on the result image, together with the class label.
            detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
            detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
            num_detections = detection_graph.get_tensor_by_name('num_detections:0')

            # build feed_dict
            feed_dict = {}
            for i in range(image_per_run):
              feed_dict.update({"image_ph%d:0" % i: image_list[i]})

            # Actual detection.
            feed_image = sess.run(image,
                                  feed_dict=feed_dict)

            (boxes, scores, classes, num) = sess.run(
              [detection_boxes, detection_scores, detection_classes, num_detections],
              feed_dict={image_tensor: feed_image})

            # Visualization of the results of a detection.
            for i in range(feed_image.shape[0]):
              vis_util.visualize_boxes_and_labels_on_image_array(
                feed_image[i],
                np.squeeze(boxes[i]),
                np.squeeze(classes[i]).astype(np.int32),
                np.squeeze(scores[i]),
                category_index,
                use_normalized_coordinates=True,
                line_thickness=8,
                min_score_thresh=0.20)
            return feed_image 
开发者ID:scotthuang1989,项目名称:object_detection_with_tensorflow,代码行数:40,代码来源:object_detection_tf_vectorization_thread.py

示例11: detect_image

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def detect_image(image_path):
    # load label map
    category_index = label_map_util.create_category_index_from_labelmap(
        PATH_TO_LABELS)

    # load detection graph
    detection_graph = tf.Graph()
    with detection_graph.as_default():
        od_graph_def = tf.GraphDef()
        with tf.gfile.GFile(PATH_TO_FROZEN_GRAPH, 'rb') as fid:
            serialized_graph = fid.read()
            od_graph_def.ParseFromString(serialized_graph)
            tf.import_graph_def(od_graph_def, name='')

    # define input/output tensors
    image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
    detection_boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
    detection_scores = detection_graph.get_tensor_by_name('detection_scores:0')
    detection_classes = detection_graph.get_tensor_by_name('detection_classes:0')
    num_detections = detection_graph.get_tensor_by_name('num_detections:0')

    # load input image
    img = cv2.imread(image_path)
    if img is None:
        sys.exit('failed to load image: %s' % image_path)
    img = img[..., ::-1]  # BGR to RGB

    # run inference
    with detection_graph.as_default():
        with tf.Session() as sess:
            boxes, scores, classes, _ = sess.run(
                [detection_boxes, detection_scores, detection_classes, num_detections],
                feed_dict={image_tensor: np.expand_dims(img, 0)})

    # draw the results of the detection
    vis_util.visualize_boxes_and_labels_on_image_array(
        img,
        np.squeeze(boxes),
        np.squeeze(classes).astype(np.int32),
        np.squeeze(scores),
        category_index,
        use_normalized_coordinates=True,
        line_thickness=6,
        min_score_thresh=0.3)

    # save the output image
    img = img[..., ::-1]  # RGB to BGR
    cv2.imwrite(OUTPUT_PATH, img)

    print('Output has been written to %s\n' % OUTPUT_PATH) 
开发者ID:jkjung-avt,项目名称:hand-detection-tutorial,代码行数:52,代码来源:detect_image.py

示例12: vis_detection_result

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def vis_detection_result(graph,image_path,output_image_path):
    with graph.as_default():
        ops=tf.get_default_graph().get_operations()
        all_tensor_names={output.name for op in ops for output in op.outputs}
        tensor_dict={}
        for key in [
            'num_detections','detection_boxes','detection_scores',
            'detection_classes','detection_masks'
        ]:
            tensor_name=key+':0'
            if tensor_name in all_tensor_names:
                tensor_dict[key]=tf.get_default_graph().get_tensor_by_name(tensor_name)

        image_tensor=tf.get_default_graph().get_tensor_by_name('image_tensor:0')

        with tf.Session() as sess:
            print('get in the session')
            image = util.data_preprocessing(image_path,target_size=640)
            image_np = np.expand_dims(image, axis=0)
            output_dict=sess.run(tensor_dict,feed_dict={image_tensor:image_np})
            # print(output_dict)
            # all outputs are float32 numpy arrays, so convert types as appropriate
            output_dict['num_detections'] = int(output_dict['num_detections'][0])
            output_dict['detection_classes'] = output_dict[
                'detection_classes'][0].astype(np.int64)
            output_dict['detection_boxes'] = output_dict['detection_boxes'][0]
            output_dict['detection_scores'] = output_dict['detection_scores'][0]
            #print(output_dict)
            # return output_dict
            print('output_dict[\'detection_boxes\'] shape is {}'.format(output_dict['detection_boxes'].shape))
            print('output_dict[\'detection_scores\'] shape is {}'.format(output_dict['detection_scores'].shape))

            category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)

            image=vis_util.visualize_boxes_and_labels_on_image_array(
                image,
                output_dict['detection_boxes'],
                output_dict['detection_classes'],
                output_dict['detection_scores'],
                category_index,
                instance_masks=output_dict.get('detection_masks'),
                use_normalized_coordinates=True,
                line_thickness=3,min_score_thresh=0.3)

            plt.imsave(output_image_path,image)

            sess.close() 
开发者ID:fjchange,项目名称:object_centric_VAD,代码行数:49,代码来源:inference.py

示例13: detect

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def detect(model, category_index, image_np, i, confidence, min_detections=10, min_confidence=0.7):
    """Detection loop main method

    Runs actual detection
    
    Args:
        model (model): Model to use
        category_index (category_index): category_index
        image (byte): Numpy image array
        i (int): Iterator
        confidence (float): Previous confidence
        min_detections (int, optional): Minimum detections required to yield a positive result. Defaults to 10.
        min_confidence (float, optional): Minimum average confidence required to yield a positive result. Defaults to 0.7.
    
    Returns:
        (bool, int, float, np_aray): Tuple with detection threshold, iterator, confidence, image with labels
    """
    # Actual detection.
    output_dict = run_inference_for_single_image(model, image_np)
    # Visualization of the results of a detection.
    np_det_img = vis_util.visualize_boxes_and_labels_on_image_array(
        image_np,
        output_dict['detection_boxes'],
        output_dict['detection_classes'],
        output_dict['detection_scores'],
        category_index,
        instance_masks=output_dict.get('detection_masks_reframed', None),
        use_normalized_coordinates=True,
        line_thickness=8)

    cv2.imshow('object_detection', cv2.resize(image_np, (800, 600)))
    # print the most likely
    if 'detection_scores' not in output_dict or len(category_index) < 1 or len(output_dict['detection_scores']) <= 0:
        return (False, i, confidence, np_det_img)
    max_label = category_index[1]
    max_score = output_dict['detection_scores'][0]  # ['name']
    if max_label['name'] == 'person':
        i += 1
        confidence += max_score
        avg_confidence = confidence/i
    logger.debug('Count: {}, avg_confidence: {}'.format(i, avg_confidence))
    if i >= min_detections and avg_confidence >= min_confidence:
        logger.debug('HUMAN DETECTED! DEPLOY BORK BORK NOM NOM! {} {}'.format(
            i, avg_confidence))
        i = 0
        confidence = 0
        avg_confidence = 0
        return (True, i, confidence, np_det_img)
    else:
        return (False, i, confidence, np_det_img) 
开发者ID:chollinger93,项目名称:scarecrow,代码行数:52,代码来源:detector.py

示例14: img_ocr

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def img_ocr(image_name, output_path, image_org, box_to_color_map, box_to_display_str_map, lang = 'cont41'):
  cont_num_find = 0
  img_label = []
  # Convert coordinates to raw pixels.
  for box, color in box_to_color_map.items():
    ymin, xmin, ymax, xmax = box  
    # loads the original image, visualize_boxes_and_labels_on_image_array returned image had draw bounding boxs on it.
    image_corp_org = Image.fromarray(np.uint8(image_org))
    img_width, img_height = image_corp_org.size
    new_xmin = int(xmin * img_width)
    new_xmax = int(xmax * img_width)
    new_ymin = int(ymin * img_height)
    new_ymax = int(ymax * img_height)   
    # Increase cropping security boundary(px).
    offset = 5
    if new_xmin - offset >= 0:
      new_xmin = new_xmin - offset
    if new_xmax + offset <= img_width:
      new_xmax = new_xmax + offset
    if new_ymin - offset >= 0:
      new_ymin = new_ymin - offset
    if new_ymax + offset <= img_height:
      new_ymax = new_ymax + offset
    # Get the label name of every bounding box,and rename 'xxx: 90%' to 'xxx-90%'.
    img_label_name = box_to_display_str_map[box][0].split(': ')
    # Corp image. Note that the PLI and Numpy coordinates are reversed!!!
    image_corp_org = load_image_into_numpy_array(image_org)[new_ymin:new_ymax,new_xmin:new_xmax]       
    image_corp_org = Image.fromarray(np.uint8(image_corp_org))   
    # Tesseract OCR
    lang_use = 'eng+'+lang+'+letsgodigital+snum+eng_f'
    if re.match('container_number+', img_label_name[0]):
      cont_num_find += 1
      image_corp_gray = image_preprocessing(image_corp_org)
      if re.match('container_number_v+', img_label_name[0]):
        cont_num = pytesseract.image_to_string(image_corp_gray, lang=lang_use, config='--psm 6')
      elif re.match('container_number_e+', img_label_name[0]):
        cont_num = pytesseract.image_to_string(image_corp_gray, lang=lang_use, config='--psm 6')
      else :
        cont_num = pytesseract.image_to_string(image_corp_gray, lang=lang_use, config='--psm 4')
      # Save corp image to outo_path ,and join lable in name.
      # image_corp_name make up like this :'image_name(input)'_'cont_num_find'_'img_label_name'
      image_corp_name = image_name[:-4]+ '_'+ str(cont_num_find)+ '_'+ img_label_name[0]
      # img_lable[{lable,actual,cont_num,image_corp_name}]
      img_label.append({'lable':img_label_name[0], 'actual':img_label_name[1], 'cont_num':cont_num, 'image_corp_name':image_corp_name})
      image_corp_org.save(os.path.join(output_path) + '/' + image_corp_name + '_org_'+ image_name[-4:])
      cv2.imwrite(os.path.join(output_path) + '/' + image_corp_name + '_gray_'+ image_name[-4:], image_corp_gray)
      file = open(os.path.join(PATH_TO_TEST_IMAGES_DIR, 'cont_num.txt'), 'a')
      file.write(img_label[cont_num_find - 1]['image_corp_name']+ '_' + img_label[cont_num_find - 1]['actual'] + '\n' + img_label[cont_num_find - 1]['cont_num']+ '\n')
      file.close()
  return img_label # image_corp_org, image_corp_gray 
开发者ID:lonelygo,项目名称:container_detection,代码行数:52,代码来源:detection_var_image.py

示例15: predict

# 需要导入模块: from object_detection.utils import visualization_utils [as 别名]
# 或者: from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array [as 别名]
def predict(self, img):
        """ # Arguments
                img: a numpy array

            # Returns
                The url to an image with the bounding boxes
            """

        def load_image_into_numpy_array(image):
            (im_width, im_height) = image.size
            return np.array(image.getdata()).reshape(
                (im_height, im_width, 3)).astype(np.uint8)

        with self.graph.as_default():
            with tf.Session(graph=self.graph) as sess:
                # Definite input and output Tensors for detection_graph
                image_tensor = self.graph.get_tensor_by_name('image_tensor:0')
                # Each box represents a part of the image where a particular object was detected.
                detection_boxes = self.graph.get_tensor_by_name('detection_boxes:0')
                # Each score represent how level of confidence for each of the objects.
                # Score is shown on the result image, together with the class label.
                detection_scores = self.graph.get_tensor_by_name('detection_scores:0')
                detection_classes = self.graph.get_tensor_by_name('detection_classes:0')
                num_detections = self.graph.get_tensor_by_name('num_detections:0')
                image = Image.fromarray(img)
                # the array based representation of the image will be used later in order to prepare the
                # result image with boxes and labels on it.
                image_np = load_image_into_numpy_array(image)
                # Expand dimensions since the model expects images to have shape: [1, None, None, 3]
                image_np_expanded = np.expand_dims(image_np, axis=0)
                # Actual detection.
                (boxes, scores, classes, num) = sess.run(
                    [detection_boxes, detection_scores, detection_classes, num_detections],
                    feed_dict={image_tensor: image_np_expanded})
                # Visualization of the results of a detection.
                vis_util.visualize_boxes_and_labels_on_image_array(
                    image_np,
                    np.squeeze(boxes),
                    np.squeeze(classes).astype(np.int32),
                    np.squeeze(scores),
                    self.category_index,
                    use_normalized_coordinates=True,
                    line_thickness=8)
                im = Image.fromarray(image_np)
                filename = str(uuid.uuid4()) + '.jpg'
                save_dir = './outputs'
                if not os.path.exists(save_dir):
                    os.makedirs(save_dir)
                save_path = os.path.join(save_dir, filename)
                im.save(save_path)

                return json.dumps({'output': filename}) 
开发者ID:EliotAndres,项目名称:pretrained.ml,代码行数:54,代码来源:models.py


注:本文中的object_detection.utils.visualization_utils.visualize_boxes_and_labels_on_image_array方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。