当前位置: 首页>>代码示例>>Python>>正文


Python cv.ShowImage方法代码示例

本文整理汇总了Python中cv.ShowImage方法的典型用法代码示例。如果您正苦于以下问题:Python cv.ShowImage方法的具体用法?Python cv.ShowImage怎么用?Python cv.ShowImage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv的用法示例。


在下文中一共展示了cv.ShowImage方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: detect_and_draw

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def detect_and_draw(img, cascade):
    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
			       cv.Round (img.height / image_scale)), 8, 1)

    # convert color input image to grayscale
    cv.CvtColor(img, gray, cv.CV_BGR2GRAY)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(cascade):
        t = cv.GetTickCount()
        faces = cv.HaarDetectObjects(small_img, cascade, cv.CreateMemStorage(0),
                                     haar_scale, min_neighbors, haar_flags, min_size)
        t = cv.GetTickCount() - t
        print "detection time = %gms" % (t/(cv.GetTickFrequency()*1000.))
        if faces:
            for ((x, y, w, h), n) in faces:
                # the input to cv.HaarDetectObjects was resized, so scale the 
                # bounding box of each face and convert it to two CvPoints
                pt1 = (int(x * image_scale), int(y * image_scale))
                pt2 = (int((x + w) * image_scale), int((y + h) * image_scale))
                cv.Rectangle(img, pt1, pt2, cv.RGB(255, 0, 0), 3, 8, 0)
                print "x= "+str(x)+" y= "+str(y)+" w= "+str(w)+" h= "+str(h)

    cv.ShowImage("Face detection", img) 
开发者ID:alduxvm,项目名称:rpi-opencv,代码行数:32,代码来源:face-detection.py

示例2: disp_thresh

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def disp_thresh(lower, upper):
    depth, timestamp = freenect.sync_get_depth()
    depth = 255 * np.logical_and(depth > lower, depth < upper)
    depth = depth.astype(np.uint8)
    image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 1)
    cv.SetData(image, depth.tostring(),
               depth.dtype.itemsize * depth.shape[1])
    cv.ShowImage('Depth', image)
    cv.WaitKey(10) 
开发者ID:sightmachine,项目名称:SimpleCV2,代码行数:13,代码来源:demo_cv_thresh_sweep.py

示例3: display_depth

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def display_depth(dev, data, timestamp):
    global keep_running
    cv.ShowImage('Depth', frame_convert.pretty_depth_cv(data))
    if cv.WaitKey(10) == 27:
        keep_running = False 
开发者ID:sightmachine,项目名称:SimpleCV2,代码行数:7,代码来源:demo_cv_async.py

示例4: display_rgb

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def display_rgb(dev, data, timestamp):
    global keep_running
    cv.ShowImage('RGB', frame_convert.video_cv(data))
    if cv.WaitKey(10) == 27:
        keep_running = False 
开发者ID:sightmachine,项目名称:SimpleCV2,代码行数:7,代码来源:demo_cv_async.py

示例5: show_depth

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def show_depth():
    global threshold
    global current_depth

    depth, timestamp = freenect.sync_get_depth()
    depth = 255 * np.logical_and(depth >= current_depth - threshold,
                                 depth <= current_depth + threshold)
    depth = depth.astype(np.uint8)
    image = cv.CreateImageHeader((depth.shape[1], depth.shape[0]),
                                 cv.IPL_DEPTH_8U,
                                 1)
    cv.SetData(image, depth.tostring(),
               depth.dtype.itemsize * depth.shape[1])
    cv.ShowImage('Depth', image) 
开发者ID:sightmachine,项目名称:SimpleCV2,代码行数:16,代码来源:demo_cv_threshold.py

示例6: show_video

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def show_video():
    cv.ShowImage('Video', frame_convert.video_cv(freenect.sync_get_video()[0])) 
开发者ID:sightmachine,项目名称:SimpleCV2,代码行数:4,代码来源:demo_cv_threshold.py

示例7: detect_and_draw

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def detect_and_draw(img):
    t1 = time.time()

    # allocate temporary images
    gray = cv.CreateImage((img.width,img.height), 8, 1)
    small_img = cv.CreateImage((cv.Round(img.width / image_scale),
			       cv.Round (img.height / image_scale)), 8, 1)

    # blur the source image to reduce color noise 
    cv.Smooth(img, img, cv.CV_BLUR, 3);
    hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
    cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)
    thresholded_img =  cv.CreateImage(cv.GetSize(hsv_img), 8, 1)
    #cv.InRangeS(hsv_img, (120, 80, 80), (140, 255, 255), thresholded_img)

    # White
    sensitivity = 15
    cv.InRangeS(hsv_img, (0, 0, 255-sensitivity), (255, sensitivity, 255), thresholded_img)

    # Red
    #cv.InRangeS(hsv_img, (0, 150, 0), (5, 255, 255), thresholded_img)

    # Blue
    #cv.InRangeS(hsv_img, (100, 50, 50), (140, 255, 255), thresholded_img)

    # Green
    #cv.InRangeS(hsv_img, (40, 50, 50), (80, 255, 255), thresholded_img)

    mat=cv.GetMat(thresholded_img)
    moments = cv.Moments(mat, 0)
    area = cv.GetCentralMoment(moments, 0, 0)

    # scale input image for faster processing
    cv.Resize(gray, small_img, cv.CV_INTER_LINEAR)

    cv.EqualizeHist(small_img, small_img)

    if(area > 5000):
        #determine the x and y coordinates of the center of the object 
        #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
        x = cv.GetSpatialMoment(moments, 1, 0)/area
        y = cv.GetSpatialMoment(moments, 0, 1)/area
        x = int(round(x))
        y = int(round(y))

        #create an overlay to mark the center of the tracked object 
        overlay = cv.CreateImage(cv.GetSize(img), 8, 3)

        cv.Circle(overlay, (x, y), 2, (0, 0, 0), 20)
        cv.Add(img, overlay, img)
        #add the thresholded image back to the img so we can see what was  
        #left after it was applied 
        #cv.Merge(thresholded_img, None, None, None, img)
        t2 = time.time()
        message = "Color tracked!"
        print "detection time = %gs x=%d,y=%d" % ( round(t2-t1,3) , x, y)

    cv.ShowImage("Color detection", img) 
开发者ID:alduxvm,项目名称:rpi-opencv,代码行数:60,代码来源:color-2.py

示例8: run

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def run(self):
        while True:
            img = cv.QueryFrame( self.capture )
            t1 = time.time()
            #blur the source image to reduce color noise 
            cv.Smooth(img, img, cv.CV_BLUR, 3);

            #convert the image to hsv(Hue, Saturation, Value) so its  
            #easier to determine the color to track(hue) 
            hsv_img = cv.CreateImage(cv.GetSize(img), 8, 3)
            cv.CvtColor(img, hsv_img, cv.CV_BGR2HSV)

            #limit all pixels that don't match our criteria, in this case we are  
            #looking for purple but if you want you can adjust the first value in  
            #both turples which is the hue range(120,140).  OpenCV uses 0-180 as  
            #a hue range for the HSV color model 
            thresholded_img =  cv.CreateImage(cv.GetSize(hsv_img), 8, 1)

            # White
            sensitivity = 10
            cv.InRangeS(hsv_img, (0, 0, 255-sensitivity), (255, sensitivity, 255), thresholded_img)

            # Red
            #cv.InRangeS(hsv_img, (0, 150, 0), (5, 255, 255), thresholded_img)

            # Blue
            #cv.InRangeS(hsv_img, (100, 50, 50), (140, 255, 255), thresholded_img)

            # Green
            #cv.InRangeS(hsv_img, (40, 50, 50), (80, 255, 255), thresholded_img)

            #determine the objects moments and check that the area is large  
            #enough to be our object 
            mat=cv.GetMat(thresholded_img)
            moments = cv.Moments(mat, 0)
            area = cv.GetCentralMoment(moments, 0, 0)

            #there can be noise in the video so ignore objects with small areas 
            if(area > 10000):
                #determine the x and y coordinates of the center of the object 
                #we are tracking by dividing the 1, 0 and 0, 1 moments by the area 
                x = cv.GetSpatialMoment(moments, 1, 0)/area
                y = cv.GetSpatialMoment(moments, 0, 1)/area
                x = int(round(x))
                y = int(round(y))

                #create an overlay to mark the center of the tracked object 
                overlay = cv.CreateImage(cv.GetSize(img), 8, 3)
                cv.Circle(overlay, (x, y), 2, (255, 255, 255), 20)
                cv.Add(img, overlay, img)
                #add the thresholded image back to the img so we can see what was  
                #left after it was applied 
                t2 = time.time()
                cv.Merge(thresholded_img, None, None, None, img)
                print "detection time = %gs x=%d,y=%d" % ( round(t2-t1,3) , x, y)
            
            #display the image  
            cv.ShowImage(color_tracker_window, img)

            if cv.WaitKey(10) == 27:
                break 
开发者ID:alduxvm,项目名称:rpi-opencv,代码行数:63,代码来源:color-1.py

示例9: processVideo

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def processVideo(self, jpegbytes, timestamp_10msec):

        # Update controller events
        pygame.event.pump()    

        # Toggle lights    
        self.lightsAreOn  = self.checkButton(self.lightsAreOn, BUTTON_LIGHTS, self.turnLightsOn, self.turnLightsOff)   
            
        # Toggle night vision (infrared camera)    
        self.stealthIsOn = self.checkButton(self.stealthIsOn, BUTTON_STEALTH, self.turnStealthOn, self.turnStealthOff)   
        # Move camera up/down    
        if self.controller.get_button(BUTTON_CAMERA_UP):  
            self.moveCameraVertical(1)
        elif self.controller.get_button(BUTTON_CAMERA_DOWN): 
            self.moveCameraVertical(-1)
        else:
            self.moveCameraVertical(0)
            
        # Set treads based on axes        
        self.setTreads(self.axis(1), self.axis(3))

        # Display video image if possible
        try:
            if cv:

                # Save image to file on disk and load as OpenCV image
                fname = 'tmp.jpg'
                fd = open(fname, 'w')
                fd.write(jpegbytes)
                fd.close()
                image = cv.LoadImage(fname)        

                # Show image
                cv.ShowImage(self.wname, image )
                if cv.WaitKey(1) & 0xFF == 27: # ESC
                    self.quit = True
            else:
                pass
        except:
            pass
        
        
    # Converts Y coordinate of specified axis to +/-1 or 0 
开发者ID:simondlevy,项目名称:RoverPylot,代码行数:45,代码来源:ps3rover20.py

示例10: FPV_thread

# 需要导入模块: import cv [as 别名]
# 或者: from cv import ShowImage [as 别名]
def FPV_thread():
    global camera_index
    global capture
    global WINDOW_NAME
    global latest_frame
    global FPV_thread_stop
    global overlay_message  # shared with application return results
    global face_position    # shared with application return results

    FPV_init()

    cv.NamedWindow(WINDOW_NAME, cv.CV_WINDOW_NORMAL)
    cv.MoveWindow(WINDOW_NAME, 0, 0)

    width_scale = 1.0
    height_scale = 1.0
    while True:
        frame = cv.QueryFrame(capture)
        cv.Flip(frame, None, 1)

        #copy to buffer
        frame_lock.acquire()
        original_imagesize = (0,0)
        resized_imagesize = (0,0)
        if not latest_frame:
            latest_frame = cv.CreateImage((640, 480), frame.depth, frame.nChannels)
            original_imagesize = cv.GetSize(frame)
            resized_imagesize = cv.GetSize(latest_frame)
            width_scale = original_imagesize[0]*1.0/resized_imagesize[0]
            height_scale = original_imagesize[1]*1.0/resized_imagesize[1]
        cv.Resize(frame, latest_frame)
        frame_lock.release()


        #Display Result
        text_start_point = (10, 50)
        cv.PutText(frame, overlay_message, text_start_point, font, cv.Scalar(255,255,255))
        cv.Rectangle(frame, text_start_point, (original_imagesize[0], 100), cv.Scalar(0,0,0), thickness=cv.CV_FILLED)
        if face_position[0] > 0.0:
            point1 = (int(face_position[0]*width_scale), int(face_position[1]*height_scale))
            point2 = (int((face_position[0] + face_position[2])*width_scale), \
                    int((face_position[1]+face_position[3])*height_scale))
            cv.Rectangle(frame, point1, point2, \
                    cv.Scalar(255, 255, 255), thickness=2)
        cv.ShowImage(WINDOW_NAME, frame)
        cv.ResizeWindow(WINDOW_NAME, 200, 100)
        cv.NamedWindow(WINDOW_NAME, cv.CV_WINDOW_NORMAL);
        cv.SetWindowProperty(WINDOW_NAME, 0, cv.CV_WINDOW_FULLSCREEN);
        c = cv.WaitKey(10)
        if c == ord('q'):
            break

    print "[INFO] FPV Thread is finished"
    FPV_thread_stop = True
    FPV_close() 
开发者ID:cmusatyalab,项目名称:elijah-provisioning,代码行数:57,代码来源:FPV_client.py


注:本文中的cv.ShowImage方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。