当前位置: 首页>>代码示例>>Python>>正文


Python cv2.VideoCapture方法代码示例

本文整理汇总了Python中cv2.VideoCapture方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.VideoCapture方法的具体用法?Python cv2.VideoCapture怎么用?Python cv2.VideoCapture使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.VideoCapture方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def __init__(self, pipe=0, img_size=416, half=False):
        self.img_size = img_size
        self.half = half  # half precision fp16 images

        if pipe == '0':
            pipe = 0  # local camera
        # pipe = 'rtsp://192.168.1.64/1'  # IP camera
        # pipe = 'rtsp://username:password@192.168.1.64/1'  # IP camera with login
        # pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa'  # IP traffic camera
        # pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg'  # IP golf camera

        # https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
        # pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink'  # GStreamer

        # https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
        # https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package  # install help
        # pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink"  # GStreamer

        self.pipe = pipe
        self.cap = cv2.VideoCapture(pipe)  # video capture object
        self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3)  # set buffer size 
开发者ID:zbyuan,项目名称:pruning_yolov3,代码行数:23,代码来源:datasets.py

示例2: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def main():
    device = cv2.CAP_OPENNI
    capture = cv2.VideoCapture(device)
    if not(capture.isOpened()):
        capture.open(device)

    capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    app = wx.App()
    frame = MyFrame(None, -1, 'chapter2.py', capture)
    frame.Show(True)
#   self.SetTopWindow(frame)
    app.MainLoop()

    # When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows() 
开发者ID:PacktPublishing,项目名称:OpenCV-Computer-Vision-Projects-with-Python,代码行数:20,代码来源:chapter2.py

示例3: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def main():
    args = parse_args()

    device = torch.device(args.device)

    model = init_detector(args.config, args.checkpoint, device=device)

    camera = cv2.VideoCapture(args.camera_id)

    print('Press "Esc", "q" or "Q" to exit.')
    while True:
        ret_val, img = camera.read()
        result = inference_detector(model, img)

        ch = cv2.waitKey(1)
        if ch == 27 or ch == ord('q') or ch == ord('Q'):
            break

        model.show_result(
            img, result, score_thr=args.score_thr, wait_time=1, show=True) 
开发者ID:open-mmlab,项目名称:mmdetection,代码行数:22,代码来源:webcam_demo.py

示例4: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def __init__(self, ip, port, showme, level, version):
        threading.Thread.__init__(self)
        self.setDaemon(True)
        self.ADDR = (ip, port)
        self.showme = showme
        if int(level) < 3:
            self.interval = int(level)
        else:
            self.interval = 3
        self.fx = 1 / (self.interval + 1)
        if self.fx < 0.3:
            self.fx = 0.3
        if version == 4:
            self.sock = socket(AF_INET, SOCK_STREAM)
        else:
            self.sock = socket(AF_INET6, SOCK_STREAM)
        self.cap = cv2.VideoCapture(0)
        print("VEDIO client starts...") 
开发者ID:11ze,项目名称:The-chat-room,代码行数:20,代码来源:vachat.py

示例5: __init__

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def __init__(self, capture=cv2.VideoCapture(0), frame_resize=None):
        self._capture = capture
        self._frame_resize = None
        if frame_resize:
            if isinstance(frame_resize, (tuple, list)) and (len(frame_resize) == 2):
                self._frame_resize = tuple(map(int, frame_resize))
                self._frame_shape = (1, 3, self._frame_resize[0], self._frame_resize[1])
            elif isinstance(frame_resize, float):
                width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)*frame_resize)
                height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)*frame_resize)
                self._frame_shape = (1, 3, width, height)
                self._frame_resize = (width, height)
            else:
                assert False, "frame_resize should be a tuple of (x,y) pixels "
                "or a float setting the scaling factor"
        else:
            self._frame_shape = (1, 3,
                int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) 
开发者ID:awslabs,项目名称:dynamic-training-with-apache-mxnet-on-aws,代码行数:21,代码来源:cv2Iterator.py

示例6: _initCamera

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def _initCamera(self, callSleep = True):
        """
        Initializes camera. If can't establish connection will write error message to log file and sleep for some
        interval.

        :return: True when camera successfully open, otherwise False
        """
        self.cap = cv.VideoCapture(self.camConnectionString)

        if self.cap is None:
            self.setError("can't connect to camera")
            if callSleep:
                time.sleep(5)
            return None

        if not self.cap.isOpened():  # did we get a connection at all ?
            self.setError("can't connect to camera")
            if callSleep:
                time.sleep(5)

            return None

        return self.cap 
开发者ID:JFF-Bohdan,项目名称:pynvr,代码行数:25,代码来源:camera_support.py

示例7: initfrom

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def initfrom(self):
        global update1
        update1 = 0
        global update2
        update2 = 0
        self.maxcap=0;
        testmax = 10;
        for i in range(10):
            cap = cv2.VideoCapture(i)
            if(cap.isOpened()):
                self.maxcap+=1
            cap.release()
        self.selecamera1.clear()
        self.selecamera2.clear()

        self.selecamera1.addItems([str(i) for i in range(self.maxcap)])
        self.selecamera2.addItems([str(i) for i in range(self.maxcap)]) 
开发者ID:anonymouslycn,项目名称:bjtu_BinocularCameraRecord,代码行数:19,代码来源:Main.py

示例8: loop2

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def loop2(self,text,w=1280,h=720):
        cap = cv2.VideoCapture(int(text))
        cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
        global capnum2
        capnum2 = int(text)
        cap.set(3,w);
        cap.set(4,h);
        global update2
        update2 = 1
        global shotmark2

        while (update2 == 1):
            ret, frame = cap.read() 
            if shotmark2 == 1:
                fn = self.lineEdit.text()
                name = "photo/2_"+fn + "video.jpg"
                if os.path.exists(name):
                    name = "photo/2_" + fn + "video"+str(int(time.time()))+".jpg"
                cv2.imwrite(name, frame)
                shotmark2 = 0
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            self.original2_image.updateImage(frame)
        # cap.release()
        cv_img_rgb = np.zeros((700,700,3))
        self.original2_image.updateImage(cv_img_rgb) 
开发者ID:anonymouslycn,项目名称:bjtu_BinocularCameraRecord,代码行数:27,代码来源:Main.py

示例9: CaptureContinous

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def CaptureContinous(self, detector):
    cap = cv2.VideoCapture(0)
    _, image = cap.read()
    cap.release()
    output = detector.prediction(image)
    df = detector.filter_prediction(output, image)
    if len(df) > 0:
        if (df['class_name']
                .str
                .contains('person|bird|cat|wine glass|cup|sandwich')
                .any()):
            day = datetime.now().strftime("%Y%m%d")
            directory = os.path.join(IMAGE_FOLDER, 'webcam', day)
            if not os.path.exists(directory):
                os.makedirs(directory)
            image = detector.draw_boxes(image, df)
            classes = df['class_name'].unique().tolist()
            hour = datetime.now().strftime("%H%M%S")
            filename_output = os.path.join(
                    directory, "{}_{}_.jpg".format(hour, "-".join(classes))
                    )
            cv2.imwrite(filename_output, image) 
开发者ID:cristianpb,项目名称:object-detection,代码行数:24,代码来源:camera_opencv.py

示例10: counter

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def counter(func):
    @wraps(func)
    def tmp(*args, **kwargs):
        tmp.count += 1
        global lastsave
        if time.time() - lastsave > 3:
            # this is in seconds, so 5 minutes = 300 seconds
            lastsave = time.time()
            tmp.count = 0
        return func(*args, **kwargs)
    tmp.count = 0
    return tmp




#cap = cv2.VideoCapture(0) 
开发者ID:dark-archerx,项目名称:Traffic-Signs-and-Object-Detection,代码行数:19,代码来源:fatigue.py

示例11: extract_allframescommand

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def extract_allframescommand(filename):
    if filename:

        pathDir = str(filename[:-4])
        if not os.path.exists(pathDir):
            os.makedirs(pathDir)

        picFname = '%d.png'

        saveDirFilenames = os.path.join(pathDir, picFname)
        print(saveDirFilenames)

        fname = str(filename)
        cap = cv2.VideoCapture(fname)
        fps = cap.get(cv2.CAP_PROP_FPS)
        amount_of_frames = cap.get(7)
        print('The number of frames in this video = ',amount_of_frames)
        print('Extracting frames... (Might take awhile)')
        command = str('ffmpeg -i ' +'"'+ str(fname)+'"' + ' ' + '-q:v 1' + ' ' + '-start_number 0' + ' '+'"'+ str(saveDirFilenames)+'"')
        print(command)
        subprocess.call(command, shell=True)
        print('All frames are extracted!')
    else:
        print('Please select a video to convert') 
开发者ID:sgoldenlab,项目名称:simba,代码行数:26,代码来源:tkinter_functions.py

示例12: extractspecificframe

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def extractspecificframe(filename,startframe1,endframe1):

    cap = cv2.VideoCapture(filename)
    amount_of_frames = cap.get(7)
    pathDir = str(filename[:-4]+'\\frames')
    if not os.path.exists(pathDir):
        os.makedirs(pathDir)

    frames_OI = list(range(int(startframe1),int(endframe1)+1))
    #frames_OI.extend(range(7000,7200))
    #frames_OI.extend(range(9200,9350))

    for i in frames_OI:
        currentFrame = i
        cap.set(1, currentFrame)
        ret, frame = cap.read()
        fileName = str(currentFrame) + str('.png')
        filePath = os.path.join(pathDir, fileName)
        cv2.imwrite(filePath,frame) 
开发者ID:sgoldenlab,项目名称:simba,代码行数:21,代码来源:tkinter_functions.py

示例13: add_single_video_yaml

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def add_single_video_yaml(yamlfile,videofile):
    yamlPath = yamlfile
    cap = cv2.VideoCapture(videofile)
    width = int(cap.get(3))  # float
    height = int(cap.get(4))  # float
    cropLine = [0, width, 0, height]
    cropLine = str(cropLine)
    currCropLinePath = cropLine.strip("[]")
    currCropLinePath = currCropLinePath.replace("'", "")
    with open(yamlPath) as f:
        read_yaml = yaml.load(f, Loader=yaml.FullLoader)

    read_yaml["video_sets"].update({videofile: {'crop': currCropLinePath}})

    with open(yamlPath, 'w') as outfile:
        yaml.dump(read_yaml, outfile, default_flow_style=False) 
开发者ID:sgoldenlab,项目名称:simba,代码行数:18,代码来源:dlc_change_yamlfile.py

示例14: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
    parser.add_argument('--weight_dir', default='weights', type=str)
    parser.add_argument('--data_dir', default="data", type=str)
    parser.add_argument('--gpu', default='', type=str)
    args = parser.parse_args()

    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu

    yolo = YOLONet(False)
    weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
    detector = Detector(yolo, weight_file)

    # detect from camera
    # cap = cv2.VideoCapture(-1)
    # detector.camera_detector(cap)

    # detect from image file
    imname = 'test/person.jpg'
    detector.image_detector(imname) 
开发者ID:hizhangp,项目名称:yolo_tensorflow,代码行数:23,代码来源:test.py

示例15: main

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import VideoCapture [as 别名]
def main():
    total_pics = 1000
    cap = cv2.VideoCapture(0)
    x, y, w, h = 300, 50, 350, 350

    pic_no = 0
    flag_start_capturing = False
    frames = 0

    while True:
        ret, frame = cap.read()
        frame = cv2.flip(frame, 1)
        #frame = cv2.resize(frame, (image_x, image_y))
        cv2.imwrite("hand_images/" + str(pic_no) + ".jpg", frame)
        cv2.imshow("Capturing gesture", frame)
        pic_no += 1
        if pic_no == total_pics:
            break 
开发者ID:akshaybahadur21,项目名称:Emojinator,代码行数:20,代码来源:get_hand_images.py


注:本文中的cv2.VideoCapture方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。