當前位置: 首頁>>代碼示例>>Python>>正文


Python cv2.CAP_PROP_FRAME_WIDTH屬性代碼示例

本文整理匯總了Python中cv2.CAP_PROP_FRAME_WIDTH屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CAP_PROP_FRAME_WIDTH屬性的具體用法?Python cv2.CAP_PROP_FRAME_WIDTH怎麽用?Python cv2.CAP_PROP_FRAME_WIDTH使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在cv2的用法示例。


在下文中一共展示了cv2.CAP_PROP_FRAME_WIDTH屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: encode

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def encode(self, video_in, data, video_out):
        assert len(data) == self.data_dim

        video_in = cv2.VideoCapture(video_in)
        width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))
        length = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT))

        data = torch.FloatTensor([data]).cuda()
        video_out = cv2.VideoWriter(
            video_out, cv2.VideoWriter_fourcc(*'mp4v'), 20.0, (width, height))

        for i in tqdm(range(length)):
            ok, frame = video_in.read()
            frame = torch.FloatTensor([frame]) / 127.5 - 1.0      # (L, H, W, 3)
            frame = frame.permute(3, 0, 1, 2).unsqueeze(0).cuda()  # (1, 3, L, H, W)
            wm_frame = self.encoder(frame, data)                       # (1, 3, L, H, W)
            wm_frame = torch.clamp(wm_frame, min=-1.0, max=1.0)
            wm_frame = (
                (wm_frame[0, :, 0, :, :].permute(1, 2, 0) + 1.0) * 127.5
            ).detach().cpu().numpy().astype("uint8")
            video_out.write(wm_frame)

        video_out.release() 
開發者ID:DAI-Lab,項目名稱:RivaGAN,代碼行數:26,代碼來源:rivagan.py

示例2: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, video_source, video_width, video_height, video_fps, queue_size=1):
        self.video_fps = video_fps

        vc = cv2.VideoCapture(video_source)

        if hasattr(cv2, 'cv'):
            vc.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, video_width)
            vc.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, video_height)
            vc.set(cv2.cv.CV_CAP_PROP_FPS, video_fps)
        else:
            vc.set(cv2.CAP_PROP_FRAME_WIDTH, video_width)
            vc.set(cv2.CAP_PROP_FRAME_HEIGHT, video_height)
            vc.set(cv2.CAP_PROP_FPS, video_fps)

        self.stream = vc
        self.stopped = False
        self.queue = Queue(maxsize=queue_size)
        self.thread = Thread(target=self.update, args=())
        self.thread.daemon = True
        self.thread.start() 
開發者ID:blue-oil,項目名稱:blueoil,代碼行數:22,代碼來源:demo.py

示例3: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, capture=cv2.VideoCapture(0), frame_resize=None):
        self._capture = capture
        self._frame_resize = None
        if frame_resize:
            if isinstance(frame_resize, (tuple, list)) and (len(frame_resize) == 2):
                self._frame_resize = tuple(map(int, frame_resize))
                self._frame_shape = (1, 3, self._frame_resize[0], self._frame_resize[1])
            elif isinstance(frame_resize, float):
                width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)*frame_resize)
                height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)*frame_resize)
                self._frame_shape = (1, 3, width, height)
                self._frame_resize = (width, height)
            else:
                assert False, "frame_resize should be a tuple of (x,y) pixels "
                "or a float setting the scaling factor"
        else:
            self._frame_shape = (1, 3,
                int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT))) 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:21,代碼來源:cv2Iterator.py

示例4: main

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def main():
    device = cv2.CAP_OPENNI
    capture = cv2.VideoCapture(device)
    if not(capture.isOpened()):
        capture.open(device)

    capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
    capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

    app = wx.App()
    frame = MyFrame(None, -1, 'chapter2.py', capture)
    frame.Show(True)
#   self.SetTopWindow(frame)
    app.MainLoop()

    # When everything done, release the capture
    capture.release()
    cv2.destroyAllWindows() 
開發者ID:PacktPublishing,項目名稱:OpenCV-Computer-Vision-Projects-with-Python,代碼行數:20,代碼來源:chapter2.py

示例5: _run

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def _run(self, n_frames=500, width=1280, height=720, with_threading=False):
        if with_threading:
            cap = VideoCaptureTreading(0)
        else:
            cap = cv2.VideoCapture(0)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        if with_threading:
            cap.start()
        t0 = time.time()
        i = 0
        while i < n_frames:
            _, frame = cap.read()
            cv2.imshow('Frame', frame)
            cv2.waitKey(1) & 0xFF
            i += 1
        print('[i] Frames per second: {:.2f}, with_threading={}'.format(n_frames / (time.time() - t0), with_threading))
        if with_threading:
            cap.stop()
        cv2.destroyAllWindows() 
開發者ID:gilbertfrancois,項目名稱:video-capture-async,代碼行數:22,代碼來源:videocapturethreading.py

示例6: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, src, width, height):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.frame_counter = 1
        self.width = width
        self.height = height
        self.stream = cv2.VideoCapture(src)
        self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, self.width)
        self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, self.height)
        (self.grabbed, self.frame) = self.stream.read()
        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False
        #Debug stream shape
        self.real_width = int(self.stream.get(3))
        self.real_height = int(self.stream.get(4))
        print("> Start video stream with shape: {},{}".format(self.real_width,self.real_height)) 
開發者ID:DLR-RM,項目名稱:AugmentedAutoencoder,代碼行數:19,代碼來源:helper.py

示例7: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, device=None, size=None, fps=None, sync=False):
        self.device = device or 0
        self.size = size or (480, 640)
        fps = fps or 30

        self.cap = cv2.VideoCapture(self.device)
        cap_height, cap_width = self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT), self.cap.get(cv2.CAP_PROP_FRAME_WIDTH)
        if cap_height != self.size[0]:
            self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, self.size[0])
        if cap_width != self.size[1]:
            self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, self.size[1])
        cap_fps = self.cap.get(cv2.CAP_PROP_FPS)
        if cap_fps != fps:
            self.cap.set(cv2.CAP_PROP_FPS, fps)
        if sync:
            raise ValueError("sync not supported") 
開發者ID:alexlee-gk,項目名稱:visual_dynamics,代碼行數:18,代碼來源:video.py

示例8: getInfo

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def getInfo(sourcePath):
    cap = cv2.VideoCapture(sourcePath)
    info = {
        "framecount": cap.get(cv2.CAP_PROP_FRAME_COUNT),
        "fps": cap.get(cv2.CAP_PROP_FPS),
        "width": int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
        "height": int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)),
        "codec": int(cap.get(cv2.CAP_PROP_FOURCC))
    }
    cap.release()
    return info

#
# Extracts one frame for every second second of video.
# Effectively compresses a video down into much less data.
# 
開發者ID:tafsiri,項目名稱:filmstrip,代碼行數:18,代碼來源:preprocess.py

示例9: init

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def init():
    # initialize VNect estimator
    global estimator
    estimator = VNectEstimator()
    # catch the video stream
    global camera_capture
    camera_capture = cv2.VideoCapture(video)
    assert camera_capture.isOpened(), 'Video stream not opened: %s' % str(video)
    global W_img, H_img
    W_img, H_img = (int(camera_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    int(camera_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))


################
### Box Loop ###
################
# use a simple HOG method to initialize bounding box 
開發者ID:XinArkh,項目名稱:VNect,代碼行數:19,代碼來源:run_estimator_ps.py

示例10: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, src, width, height):
        # initialize the video camera stream and read the first frame
        # from the stream
        self.stream = cv2.VideoCapture(src)
        self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        (self.grabbed, self.frame) = self.stream.read()

        # initialize the variable used to indicate if the thread should
        # be stopped
        self.stopped = False 
開發者ID:matiji66,項目名稱:face-attendance-machine,代碼行數:13,代碼來源:app_utils.py

示例11: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, sources='streams.txt', img_size=416, half=False):
        self.mode = 'images'
        self.img_size = img_size
        self.half = half  # half precision fp16 images

        if os.path.isfile(sources):
            with open(sources, 'r') as f:
                sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
        else:
            sources = [sources]

        n = len(sources)
        self.imgs = [None] * n
        self.sources = sources
        for i, s in enumerate(sources):
            # Start the thread to read frames from the video stream
            print('%g/%g: %s... ' % (i + 1, n, s), end='')
            cap = cv2.VideoCapture(0 if s == '0' else s)
            assert cap.isOpened(), 'Failed to open %s' % s
            w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fps = cap.get(cv2.CAP_PROP_FPS) % 100
            _, self.imgs[i] = cap.read()  # guarantee first frame
            thread = Thread(target=self.update, args=([i, cap]), daemon=True)
            print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
            thread.start()
        print('')  # newline 
開發者ID:zbyuan,項目名稱:pruning_yolov3,代碼行數:29,代碼來源:datasets.py

示例12: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, src, width, height):
		# initialize the video camera stream and read the first frame
		# from the stream
		self.stream = cv2.VideoCapture(src)
		self.stream.set(cv2.CAP_PROP_FRAME_WIDTH, width)
		self.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
		(self.grabbed, self.frame) = self.stream.read()

		# initialize the variable used to indicate if the thread should
		# be stopped
		self.stopped = False 
開發者ID:datitran,項目名稱:object_detector_app,代碼行數:13,代碼來源:app_utils.py

示例13: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, infile=0, fps=30.0):
        self.isFile = not str(infile).isdecimal()
        self.ts = time.time()
        self.infile = infile
        self.cam = cv2.VideoCapture(self.infile)
        if not self.isFile:
            self.cam.set(cv2.CAP_PROP_FPS, fps)
            self.fps = fps
            # TODO: some cameras don't respect the fps directive
            self.cam.set(cv2.CAP_PROP_FRAME_WIDTH, 800)
            self.cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
        else:
            self.fps = self.cam.get(cv2.CAP_PROP_FPS)
            self.sma = SimpleMovingAverage(value=0.1, count=19) 
開發者ID:RedisGears,項目名稱:EdgeRealtimeVideoAnalytics,代碼行數:16,代碼來源:capture.py

示例14: extract_video_opencv

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def extract_video_opencv(v_path, f_root, dim=240):
    '''v_path: single video path;
       f_root: root to store frames'''
    v_class = v_path.split('/')[-2]
    v_name = os.path.basename(v_path)[0:-4]
    out_dir = os.path.join(f_root, v_class, v_name)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    vidcap = cv2.VideoCapture(v_path)
    nb_frames = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    width = vidcap.get(cv2.CAP_PROP_FRAME_WIDTH)   # float
    height = vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
    if (width == 0) or (height==0): 
        print(v_path, 'not successfully loaded, drop ..'); return
    new_dim = resize_dim(width, height, dim)

    success, image = vidcap.read()
    count = 1
    while success:
        image = cv2.resize(image, new_dim, interpolation = cv2.INTER_LINEAR)
        cv2.imwrite(os.path.join(out_dir, 'image_%05d.jpg' % count), image,
                    [cv2.IMWRITE_JPEG_QUALITY, 80])# quality from 0-100, 95 is default, high is good
        success, image = vidcap.read()
        count += 1
    if nb_frames > count:
        print('/'.join(out_dir.split('/')[-2::]), 'NOT extracted successfully: %df/%df' % (count, nb_frames))
    vidcap.release() 
開發者ID:TengdaHan,項目名稱:DPC,代碼行數:30,代碼來源:extract_frame.py

示例15: __init__

# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_PROP_FRAME_WIDTH [as 別名]
def __init__(self, src=0, width=640, height=480):
        self.src = src
        self.cap = cv2.VideoCapture(self.src)
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        self.grabbed, self.frame = self.cap.read()
        self.started = False
        self.read_lock = threading.Lock() 
開發者ID:gilbertfrancois,項目名稱:video-capture-async,代碼行數:10,代碼來源:capture.py


注:本文中的cv2.CAP_PROP_FRAME_WIDTH屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。