當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.COLOR_RGB2BGR屬性代碼示例

本文整理匯總了Python中cv2.COLOR_RGB2BGR屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.COLOR_RGB2BGR屬性的具體用法?Python cv2.COLOR_RGB2BGR怎麽用?Python cv2.COLOR_RGB2BGR使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.COLOR_RGB2BGR屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __get_annotation__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def __get_annotation__(self, mask, image=None):

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        segmentation = []
        for contour in contours:
            # Valid polygons have >= 6 coordinates (3 points)
            if contour.size >= 6:
                segmentation.append(contour.flatten().tolist())
        RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1])
        RLE = cocomask.merge(RLEs)
        # RLE = cocomask.encode(np.asfortranarray(mask))
        area = cocomask.area(RLE)
        [x, y, w, h] = cv2.boundingRect(mask)

        if image is not None:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.drawContours(image, contours, -1, (0,255,0), 1)
            cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2)
            cv2.imshow("", image)
            cv2.waitKey(1)

        return segmentation, [x, y, w, h], area 
開發者ID:hazirbas,項目名稱:coco-json-converter,代碼行數:25,代碼來源:generate_coco_json.py

示例2: undistort_images

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def undistort_images(src, dst):
	"""
	undistort the images in src folder to dst folder
	"""
	# load dst, mtx
	pickle_file = open("../camera_cal/camera_cal.p", "rb")
	dist_pickle = pickle.load(pickle_file)
	mtx = dist_pickle["mtx"]  
	dist = dist_pickle["dist"]
	pickle_file.close()
	
	# loop the image folder
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_dist = cv2.undistort(img, mtx, dist, None, mtx)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		image_dist = cv2.cvtColor(image_dist, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, image_dist) 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:25,代碼來源:helpers.py

示例3: wrap_images

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def wrap_images(src, dst):
	"""
	apply the wrap to images
	"""
	# load M, Minv
	img_size = (1280, 720)
	pickle_file = open("../helper/trans_pickle.p", "rb")
	trans_pickle = pickle.load(pickle_file)
	M = trans_pickle["M"]
	Minv = trans_pickle["Minv"]
	# loop the file folder
	image_files = glob.glob(src+"*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		image_wraped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = dst+file_name
		print(out_image)
		# no need to covert RGB to BGR since 3 channel is same
		image_wraped = cv2.cvtColor(image_wraped, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, image_wraped) 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:25,代碼來源:helpers.py

示例4: test

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def test():
	pickle_file = open("trans_pickle.p", "rb")
	trans_pickle = pickle.load(pickle_file)
	M = trans_pickle["M"]  
	Minv = trans_pickle["Minv"]

	img_size = (1280, 720)

	image_files = glob.glob("../output_images/undistort/*.jpg")
	for idx, file in enumerate(image_files):
		print(file)
		img = mpimg.imread(file)
		warped = cv2.warpPerspective(img, M, img_size, flags=cv2.INTER_LINEAR)
		file_name = file.split("\\")[-1]
		print(file_name)
		out_image = "../output_images/perspect_trans/"+file_name
		print(out_image)
		# convert to opencv BGR format
		warped = cv2.cvtColor(warped, cv2.COLOR_RGB2BGR)
		cv2.imwrite(out_image, warped) 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:22,代碼來源:view_perspective.py

示例5: save_result

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def save_result(self):
        path = os.path.abspath(self.image_file)
        path, ext = os.path.splitext(path)

        suffix = datetime.datetime.now().strftime("%y%m%d_%H%M%S")
        save_path = "_".join([path, self.method, suffix])

        print('saving result to <%s>\n' % save_path)
        if not os.path.exists(save_path):
            os.mkdir(save_path)

        np.save(os.path.join(save_path, 'im_l.npy'), self.model.img_l)
        np.save(os.path.join(save_path, 'im_ab.npy'), self.im_ab0)
        np.save(os.path.join(save_path, 'im_mask.npy'), self.im_mask0)

        result_bgr = cv2.cvtColor(self.result, cv2.COLOR_RGB2BGR)
        mask = self.im_mask0.transpose((1, 2, 0)).astype(np.uint8) * 255
        cv2.imwrite(os.path.join(save_path, 'input_mask.png'), mask)
        cv2.imwrite(os.path.join(save_path, 'ours.png'), result_bgr)
        cv2.imwrite(os.path.join(save_path, 'ours_fullres.png'), self.model.get_img_fullres()[:, :, ::-1])
        cv2.imwrite(os.path.join(save_path, 'input_fullres.png'), self.model.get_input_img_fullres()[:, :, ::-1])
        cv2.imwrite(os.path.join(save_path, 'input.png'), self.model.get_input_img()[:, :, ::-1])
        cv2.imwrite(os.path.join(save_path, 'input_ab.png'), self.model.get_sup_img()[:, :, ::-1]) 
開發者ID:junyanz,項目名稱:interactive-deep-colorization,代碼行數:25,代碼來源:gui_draw.py

示例6: plotResults

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def plotResults(fname, result_list):
    columm = []
    for fig in result_list:
        shape = fig.shape
        fig = fig.numpy()
        row = []
        for idx in range(shape[0]):
            item = fig[idx, :, :, :]
            if item.shape[2] == 1:
                item = np.concatenate([item, item, item], axis=2)
            item = cv2.cvtColor(cv2.resize(item, (128, 128)), cv2.COLOR_RGB2BGR)
            row.append(item)
        row = np.concatenate(row, axis=1)
        columm.append(row)
    columm = np.concatenate(columm, axis=0)
    img = np.uint8(columm * 255)
    cv2.imwrite(fname, img)


############################################################
#  Deep Tree Network
############################################################ 
開發者ID:yaojieliu,項目名稱:CVPR2019-DeepTreeLearningForZeroShotFaceAntispoofing,代碼行數:24,代碼來源:model.py

示例7: start

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def start(self):
        """
        啟動程序
        :return:
        """
        self.console("程序啟動成功.")
        self.init_mask()
        while self.listener:
            frame = self.read_data()
            frame = resize(frame, width=self.max_width)
            img_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            rects = self.detector(img_gray, 0)
            faces = self.orientation(rects, img_gray)
            draw_img = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
            if self.doing:
                self.drawing(draw_img, faces)
                self.animation_time += self.speed
                self.save_data(draw_img)
                if self.animation_time > self.duration:
                    self.doing = False
                    self.animation_time = 0
                else:
                    frame = cv2.cvtColor(np.asarray(draw_img), cv2.COLOR_RGB2BGR)
            cv2.imshow("hello mask", frame)
            self.listener_keys() 
開發者ID:tomoncle,項目名稱:face-detection-induction-course,代碼行數:27,代碼來源:input_video_stream_paste_mask.py

示例8: train_generator

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def train_generator(self, image_generator, mask_generator):
        # cv2.namedWindow('show', 0)
        # cv2.resizeWindow('show', 1280, 640)
        while True:
            image = next(image_generator)
            mask = next(mask_generator)
            label = self.make_regressor_label(mask).astype(np.float32)
            # print (image.dtype, label.dtype)
            # print (image.shape, label.shape)
            # exit()
            # cv2.imshow('show', image[0].astype(np.uint8))
            # cv2.imshow('label', label[0].astype(np.uint8))
            # mask = self.select_labels(mask)
            # print (image.shape)
            # print (mask.shape)
            # image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            # mask = (mask.astype(np.float32)*255/33).astype(np.uint8)
            # mask_color = cv2.applyColorMap(mask, cv2.COLORMAP_JET)
            # print (mask_color.shape)
            # show = cv2.addWeighted(image, 0.5, mask_color, 0.5, 0.0)
            # cv2.imshow("show", show)
            # key = cv2.waitKey()
            # if key == 27:
            #     exit()
            yield (image, label) 
開發者ID:dhkim0225,項目名稱:keras-image-segmentation,代碼行數:27,代碼來源:train.py

示例9: return_left_camera_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def return_left_camera_image(self, mode='RGB'):
        """Return a numpy array with the LEFT camera image

        @param mode the image to return (default RGB)
            RGB: Red Green Blue image
            BGR: Blue Green Red (OpenCV)
            GRAY: Grayscale image
        """
        self.port_left_camera.read(self.yarp_image)
        if(mode=='BGR'):
            return cv2.cvtColor(self.img_array, cv2.COLOR_RGB2BGR)
        elif(mode=='RGB'):
            return self.img_array
        elif(mode=='GRAY'):
            return cv2.cvtColor(self.img_array, cv2.COLOR_BGR2GRAY)
        else:
            return self.img_array 
開發者ID:mpatacchiola,項目名稱:pyERA,代碼行數:19,代碼來源:icub.py

示例10: return_right_camera_image

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def return_right_camera_image(self, mode='RGB'):
        """Return a numpy array with the RIGHT camera image

        @param mode the image to return (default RGB)
            RGB: Red Green Blue image
            BGR: Blue Green Red (OpenCV)
            GRAY: Grayscale image
        """
        self.port_right_camera.read(self.yarp_image)
        if(mode=='BGR'):
            return cv2.cvtColor(self.img_array, cv2.COLOR_RGB2BGR)
        elif(mode=='RGB'):
            return self.img_array
        elif(mode=='GRAY'):
            return cv2.cvtColor(self.img_array, cv2.COLOR_BGR2GRAY)
        else:
            return self.img_array 
開發者ID:mpatacchiola,項目名稱:pyERA,代碼行數:19,代碼來源:icub.py

示例11: mx2tfrecords_old

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def mx2tfrecords_old(imgidx, imgrec, args):
    output_path = os.path.join(args.tfrecords_file_path, 'tran.tfrecords')
    writer = tf.python_io.TFRecordWriter(output_path)
    for i in imgidx:
        img_info = imgrec.read_idx(i)
        header, img = mx.recordio.unpack(img_info)
        encoded_jpg_io = io.BytesIO(img)
        image = PIL.Image.open(encoded_jpg_io)
        np_img = np.array(image)
        img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
        img_raw = img.tobytes()
        label = int(header.label)
        example = tf.train.Example(features=tf.train.Features(feature={
            'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
            "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))
        }))
        writer.write(example.SerializeToString())  # Serialize To String
        if i % 10000 == 0:
            print('%d num image processed' % i)
    writer.close() 
開發者ID:auroua,項目名稱:InsightFace_TF,代碼行數:22,代碼來源:mx2tfrecords.py

示例12: mx2tfrecords

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def mx2tfrecords(imgidx, imgrec, args):
    output_path = os.path.join(args.tfrecords_file_path, 'tran.tfrecords')
    writer = tf.python_io.TFRecordWriter(output_path)
    for i in imgidx:
        img_info = imgrec.read_idx(i)
        header, img = mx.recordio.unpack(img_info)
        encoded_jpg_io = io.BytesIO(img)
        image = PIL.Image.open(encoded_jpg_io)
        np_img = np.array(image)
        img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
        img_raw = img.tobytes()
        label = int(header.label)
        example = tf.train.Example(features=tf.train.Features(feature={
            'image_raw': tf.train.Feature(bytes_list=tf.train.BytesList(value=[img_raw])),
            "label": tf.train.Feature(int64_list=tf.train.Int64List(value=[label]))
        }))
        writer.write(example.SerializeToString())  # Serialize To String
        if i % 10000 == 0:
            print('%d num image processed' % i)
    writer.close() 
開發者ID:auroua,項目名稱:InsightFace_TF,代碼行數:22,代碼來源:eval_data_reader.py

示例13: load_bin

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def load_bin(db_name, image_size, args):
    bins, issame_list = pickle.load(open(os.path.join(args.eval_db_path, db_name+'.bin'), 'rb'), encoding='bytes')
    data_list = []
    for _ in [0,1]:
        data = np.empty((len(issame_list)*2, image_size[0], image_size[1], 3))
        data_list.append(data)
    for i in range(len(issame_list)*2):
        _bin = bins[i]
        img = mx.image.imdecode(_bin).asnumpy()
        img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
        for flip in [0,1]:
            if flip == 1:
                img = np.fliplr(img)
            data_list[flip][i, ...] = img
        i += 1
        if i % 1000 == 0:
            print('loading bin', i)
    print(data_list[0].shape)
    return data_list, issame_list 
開發者ID:auroua,項目名稱:InsightFace_TF,代碼行數:21,代碼來源:eval_data_reader.py

示例14: colorize

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def colorize(self, label_map, image_canvas=None):
        height, width = label_map.shape
        color_dst = np.zeros((height, width, 3), dtype=np.uint8)
        color_list = self.configer.get('details', 'color_list')
        for i in range(self.configer.get('data', 'num_classes')):
            color_dst[label_map == i] = color_list[i % len(color_list)]

        color_img_rgb = np.array(color_dst, dtype=np.uint8)
        color_img_bgr = cv2.cvtColor(color_img_rgb, cv2.COLOR_RGB2BGR)

        if image_canvas is not None:
            image_canvas = cv2.addWeighted(image_canvas, 0.6, color_img_bgr, 0.4, 0)
            return image_canvas

        else:
            return color_img_bgr 
開發者ID:openseg-group,項目名稱:openseg.pytorch,代碼行數:18,代碼來源:seg_parser.py

示例15: drawrect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import COLOR_RGB2BGR [as 別名]
def drawrect(img, rect, text):
    cv2.rectangle(img, tuple(rect[:2]), tuple(rect[2:]), (10,250,10), 2, 1)
    x, y = rect[:2]
    def cv2ImgAddText(img, text, left, top, textColor=(0, 255, 0), textSize=20):
        from PIL import Image, ImageDraw, ImageFont
        img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
        draw = ImageDraw.Draw(img)
        fontText = ImageFont.truetype( "font/simsun.ttc", textSize, encoding="utf-8")
        draw.text((left, top), text, textColor, font=fontText)
        return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
    import re
    if re.findall('[\u4e00-\u9fa5]', text):
        img = cv2ImgAddText(img, text, x, y-12, (10,10,250), 12) # 如果存在中文則使用這種方式繪製文字
    else:
        cv2.putText(img, text, (x,y), cv2.FONT_HERSHEY_SIMPLEX, 0.4, (10,10,250), 1)
    return img 
開發者ID:cilame,項目名稱:vrequest,代碼行數:18,代碼來源:pymini_yolo.py


注:本文中的cv2.COLOR_RGB2BGR屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。