當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.imshow方法代碼示例

本文整理匯總了Python中cv2.imshow方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.imshow方法的具體用法?Python cv2.imshow怎麽用?Python cv2.imshow使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cv2的用法示例。


在下文中一共展示了cv2.imshow方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _lapulaseDetection

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def _lapulaseDetection(self, imgName):
        """
        :param strdir: 文件所在的目錄
        :param name: 文件名稱
        :return: 檢測模糊後的分數
        """
        # step1: 預處理
        img2gray, reImg = self.preImgOps(imgName)
        # step2: laplacian算子 獲取評分
        resLap = cv2.Laplacian(img2gray, cv2.CV_64F)
        score = resLap.var()
        print("Laplacian %s score of given image is %s", str(score))
        # strp3: 繪製圖片並保存  不應該寫在這裏  抽象出來   這是共有的部分
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_lapulaseDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        # 顯示
        cv2.imwrite(newPath, newImg)  # 保存圖片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)

        # step3: 返回分數
        return score 
開發者ID:Leezhen2014,項目名稱:python--,代碼行數:27,代碼來源:BlurDetection.py

示例2: run

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def run(self):
        print("VEDIO server starts...")
        self.sock.bind(self.ADDR)
        self.sock.listen(1)
        conn, addr = self.sock.accept()
        print("remote VEDIO client success connected...")
        data = "".encode("utf-8")
        payload_size = struct.calcsize("L")
        cv2.namedWindow('Remote', cv2.WINDOW_AUTOSIZE)
        while True:
            while len(data) < payload_size:
                data += conn.recv(81920)
            packed_size = data[:payload_size]
            data = data[payload_size:]
            msg_size = struct.unpack("L", packed_size)[0]
            while len(data) < msg_size:
                data += conn.recv(81920)
            zframe_data = data[:msg_size]
            data = data[msg_size:]
            frame_data = zlib.decompress(zframe_data)
            frame = pickle.loads(frame_data)
            cv2.imshow('Remote', frame)
            if cv2.waitKey(1) & 0xFF == 27:
                break 
開發者ID:11ze,項目名稱:The-chat-room,代碼行數:26,代碼來源:vachat.py

示例3: __get_annotation__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def __get_annotation__(self, mask, image=None):

        _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        segmentation = []
        for contour in contours:
            # Valid polygons have >= 6 coordinates (3 points)
            if contour.size >= 6:
                segmentation.append(contour.flatten().tolist())
        RLEs = cocomask.frPyObjects(segmentation, mask.shape[0], mask.shape[1])
        RLE = cocomask.merge(RLEs)
        # RLE = cocomask.encode(np.asfortranarray(mask))
        area = cocomask.area(RLE)
        [x, y, w, h] = cv2.boundingRect(mask)

        if image is not None:
            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
            cv2.drawContours(image, contours, -1, (0,255,0), 1)
            cv2.rectangle(image,(x,y),(x+w,y+h), (255,0,0), 2)
            cv2.imshow("", image)
            cv2.waitKey(1)

        return segmentation, [x, y, w, h], area 
開發者ID:hazirbas,項目名稱:coco-json-converter,代碼行數:25,代碼來源:generate_coco_json.py

示例4: _SMDDetection

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def _SMDDetection(self, imgName):

        # step 1 圖像的預處理
        img2gray, reImg = self.preImgOps(imgName)
        f=self._imageToMatrix(img2gray)/255.0
        x, y = f.shape
        score = 0
        for i in range(x - 1):
            for j in range(y - 1):
                score += np.abs(f[i+1,j]-f[i,j])+np.abs(f[i,j]-f[i+1,j])
        # strp3: 繪製圖片並保存  不應該寫在這裏  抽象出來   這是共有的部分
        score=score/100
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_SMDDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存圖片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
開發者ID:Leezhen2014,項目名稱:python--,代碼行數:23,代碼來源:BlurDetection.py

示例5: processFrames

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def processFrames(self):
        try:
            for img in self.anotations_list:
                img = img.split(';')
                # print(img)
                # ret,imgcv = cap.read()
                if self.video:
                    ret,imgcv = self.cap.read()
                else:
                    imgcv = cv2.imread(os.path.join('../',self.config["dataset"],img[0]))
                result = self.tfnet.return_predict(imgcv)
                print(result)
                imgcv = self.drawBoundingBox(imgcv,result)        
                cv2.imshow('detected objects',imgcv)
                if cv2.waitKey(10) == ord('q'):
                    print('exitting loop')
                    break
        except KeyboardInterrupt:
            cv2.destroyAllWindows()
            print('exitting program') 
開發者ID:AmeyaWagh,項目名稱:Traffic_sign_detection_YOLO,代碼行數:22,代碼來源:objectDetectorYOLO.py

示例6: show

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def show(im, allobj, S, w, h, cellx, celly):
    for obj in allobj:
        a = obj[5] % S
        b = obj[5] // S
        cx = a + obj[1]
        cy = b + obj[2]
        centerx = cx * cellx
        centery = cy * celly
        ww = obj[3]**2 * w
        hh = obj[4]**2 * h
        cv2.rectangle(im,
            (int(centerx - ww/2), int(centery - hh/2)),
            (int(centerx + ww/2), int(centery + hh/2)),
            (0,0,255), 2)
    cv2.imshow('result', im)
    cv2.waitKey()
    cv2.destroyAllWindows() 
開發者ID:AmeyaWagh,項目名稱:Traffic_sign_detection_YOLO,代碼行數:19,代碼來源:misc.py

示例7: _blurDetection

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def _blurDetection(self, imgName):

        # step 1 圖像的預處理
        img2gray, reImg = self.preImgOps(imgName)
        imgMat=self._imageToMatrix(img2gray)/255.0
        x, y = imgMat.shape
        score = 0
        for i in range(x - 2):
            for j in range(y - 2):
                score += (imgMat[i + 2, j] - imgMat[i, j]) ** 2
        # step3: 繪製圖片並保存  不應該寫在這裏  抽象出來   這是共有的部分
        score=score/10
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_blurDetection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存圖片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
開發者ID:Leezhen2014,項目名稱:python--,代碼行數:23,代碼來源:BlurDetection.py

示例8: _SMD2Detection

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def _SMD2Detection(self, imgName):
        """
        灰度方差乘積
        :param imgName:
        :return:
        """
        # step 1 圖像的預處理
        img2gray, reImg = self.preImgOps(imgName)
        f=self._imageToMatrix(img2gray)/255.0
        x, y = f.shape
        score = 0
        for i in range(x - 1):
            for j in range(y - 1):
                score += np.abs(f[i+1,j]-f[i,j])*np.abs(f[i,j]-f[i,j+1])
        # strp3: 繪製圖片並保存  不應該寫在這裏  抽象出來   這是共有的部分
        score=score
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_SMD2Detection_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存圖片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
開發者ID:Leezhen2014,項目名稱:python--,代碼行數:27,代碼來源:BlurDetection.py

示例9: _Variance

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def _Variance(self, imgName):
        """
               灰度方差乘積
               :param imgName:
               :return:
               """
        # step 1 圖像的預處理
        img2gray, reImg = self.preImgOps(imgName)
        f = self._imageToMatrix(img2gray)

        # strp3: 繪製圖片並保存  不應該寫在這裏  抽象出來   這是共有的部分
        score = np.var(f)
        newImg = self._drawImgFonts(reImg, str(score))
        newDir = self.strDir + "/_Variance_/"
        if not os.path.exists(newDir):
            os.makedirs(newDir)
        newPath = newDir + imgName
        cv2.imwrite(newPath, newImg)  # 保存圖片
        cv2.imshow(imgName, newImg)
        cv2.waitKey(0)
        return score 
開發者ID:Leezhen2014,項目名稱:python--,代碼行數:23,代碼來源:BlurDetection.py

示例10: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (COCO_CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
開發者ID:PINTO0309,項目名稱:MobileNetv2-SSDLite,代碼行數:26,代碼來源:demo_caffe.py

示例11: detect

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def detect(imgfile):
    origimg = cv2.imread(imgfile)
    img = preprocess(origimg)
    
    img = img.astype(np.float32)
    img = img.transpose((2, 0, 1))

    net.blobs['data'].data[...] = img
    out = net.forward() 
    box, conf, cls = postprocess(origimg, out)

    for i in range(len(box)):
       p1 = (box[i][0], box[i][1])
       p2 = (box[i][2], box[i][3])
       cv2.rectangle(origimg, p1, p2, (0,255,0))
       p3 = (max(p1[0], 15), max(p1[1], 15))
       title = "%s:%.2f" % (CLASSES[int(cls[i])], conf[i])
       cv2.putText(origimg, title, p3, cv2.FONT_ITALIC, 0.6, (0, 255, 0), 1)
    cv2.imshow("SSD", origimg)
 
    k = cv2.waitKey(0) & 0xff
        #Exit if ESC pressed
    if k == 27 : return False
    return True 
開發者ID:PINTO0309,項目名稱:MobileNetv2-SSDLite,代碼行數:26,代碼來源:demo_caffe_voc.py

示例12: live_undistort

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def live_undistort(camera, camera_matrix, distortion_coefficients):
    """ Using a given calibration matrix, display the distorted, undistorted, and cropped frame"""
    scaled_camera_matrix, roi = cv2.getOptimalNewCameraMatrix(
        camera_matrix, distortion_coefficients, camera.size, 1, camera.size
    )
    while True:
        ret, frame = camera.cap.read()
        assert ret
        distorted_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        undistorted_frame = cv2.undistort(
            distorted_frame, camera_matrix, distortion_coefficients, None, scaled_camera_matrix,
        )
        roi_x, roi_y, roi_w, roi_h = roi
        cropped_frame = undistorted_frame[roi_y : roi_y + roi_h, roi_x : roi_x + roi_w]
        cv2.imshow("distorted %s" % (distorted_frame.shape,), distorted_frame)
        cv2.imshow("undistorted %s" % (undistorted_frame.shape,), undistorted_frame)
        cv2.imshow("cropped %s" % (cropped_frame.shape,), cropped_frame)
        cv2.waitKey(10) 
開發者ID:notkarol,項目名稱:derplearning,代碼行數:20,代碼來源:calibrate_camera.py

示例13: show_yuv_frame

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def show_yuv_frame(self, window_name, yuv_frame):
        # the VideoFrame.info() dictionary contains some useful information
        # such as the video resolution
        info = yuv_frame.info()
        height, width = info["yuv"]["height"], info["yuv"]["width"]

        # yuv_frame.vmeta() returns a dictionary that contains additional
        # metadata from the drone (GPS coordinates, battery percentage, ...)

        # convert pdraw YUV flag to OpenCV YUV flag
        cv2_cvt_color_flag = {
            olympe.PDRAW_YUV_FORMAT_I420: cv2.COLOR_YUV2BGR_I420,
            olympe.PDRAW_YUV_FORMAT_NV12: cv2.COLOR_YUV2BGR_NV12,
        }[info["yuv"]["format"]]

        # yuv_frame.as_ndarray() is a 2D numpy array with the proper "shape"
        # i.e (3 * height / 2, width) because it's a YUV I420 or NV12 frame

        # Use OpenCV to convert the yuv frame to RGB
        cv2frame = cv2.cvtColor(yuv_frame.as_ndarray(), cv2_cvt_color_flag)
        # Use OpenCV to show this frame
        cv2.imshow(window_name, cv2frame)
        cv2.waitKey(1)  # please OpenCV for 1 ms... 
開發者ID:Parrot-Developers,項目名稱:olympe,代碼行數:25,代碼來源:streaming.py

示例14: update

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def update(self, radarData):
        self.img = np.zeros((self.height, self.width, self.channels), np.uint8)
        cv2.line(self.img, (10, 0), (self.width/2 - 5, self.height), (100, 255, 255))
        cv2.line(self.img, (self.width - 10, 0), (self.width/2 + 5, self.height), (100, 255, 255))

        for track_number in range(1, 65):
            if str(track_number)+'_track_range' in radarData:
                track_range = radarData[str(track_number)+'_track_range']
                track_angle = (float(radarData[str(track_number)+'_track_angle'])+90.0)*math.pi/180

                x_pos = math.cos(track_angle)*track_range*4
                y_pos = math.sin(track_angle)*track_range*4

                cv2.circle(self.img, (self.width/2 + int(x_pos), self.height - int(y_pos) - 10), 5, (255, 255, 255))
                #cv2.putText(self.img, str(track_number), 
                #    (self.width/2 + int(x_pos)-2, self.height - int(y_pos) - 10), self.font, 1, (255,255,255), 2)

        cv2.imshow("Radar", self.img)
        cv2.waitKey(2) 
開發者ID:diyjac,項目名稱:Udacity-SDC-Radar-Driver-Micro-Challenge,代碼行數:21,代碼來源:esr_visualizer.py

示例15: test

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import imshow [as 別名]
def test():
	"""
	read the pickle file on disk and implement undistor on image
	show the oringal/undistort image
	"""
	print("Reading the pickle file...")
	pickle_file = open("./camera_cal.p", "rb")
	dist_pickle = pickle.load(pickle_file)
	mtx = dist_pickle["mtx"]  
	dist = dist_pickle["dist"]
	pickle_file.close()

	print("Reading the sample image...")
	img = cv2.imread('corners_founded/corners_found13.jpg')
	img_size = (img.shape[1],img.shape[0])
	dst = cv2.undistort(img, mtx, dist, None, mtx)

	# dst = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
	# Visualize undistortion
	print("Visulize the result...")
	f, (ax1,ax2) = plt.subplots(1,2, figsize=(20,10))
	ax1.imshow(img), ax1.set_title('Original Image', fontsize=15)
	ax2.imshow(dst), ax2.set_title('Undistored Image', fontsize=15)
	plt.show() 
開發者ID:ChengZhongShen,項目名稱:Advanced_Lane_Lines,代碼行數:26,代碼來源:camera_calibration.py


注:本文中的cv2.imshow方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。