本文整理匯總了Python中cv2.VideoCapture方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.VideoCapture方法的具體用法?Python cv2.VideoCapture怎麽用?Python cv2.VideoCapture使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.VideoCapture方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def __init__(self, pipe=0, img_size=416, half=False):
self.img_size = img_size
self.half = half # half precision fp16 images
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:password@192.168.1.64/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:password@192.168.1.64/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:root@192.168.0.91:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
示例2: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def main():
device = cv2.CAP_OPENNI
capture = cv2.VideoCapture(device)
if not(capture.isOpened()):
capture.open(device)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
app = wx.App()
frame = MyFrame(None, -1, 'chapter2.py', capture)
frame.Show(True)
# self.SetTopWindow(frame)
app.MainLoop()
# When everything done, release the capture
capture.release()
cv2.destroyAllWindows()
示例3: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def main():
args = parse_args()
device = torch.device(args.device)
model = init_detector(args.config, args.checkpoint, device=device)
camera = cv2.VideoCapture(args.camera_id)
print('Press "Esc", "q" or "Q" to exit.')
while True:
ret_val, img = camera.read()
result = inference_detector(model, img)
ch = cv2.waitKey(1)
if ch == 27 or ch == ord('q') or ch == ord('Q'):
break
model.show_result(
img, result, score_thr=args.score_thr, wait_time=1, show=True)
示例4: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def __init__(self, ip, port, showme, level, version):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = (ip, port)
self.showme = showme
if int(level) < 3:
self.interval = int(level)
else:
self.interval = 3
self.fx = 1 / (self.interval + 1)
if self.fx < 0.3:
self.fx = 0.3
if version == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
self.cap = cv2.VideoCapture(0)
print("VEDIO client starts...")
示例5: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def __init__(self, capture=cv2.VideoCapture(0), frame_resize=None):
self._capture = capture
self._frame_resize = None
if frame_resize:
if isinstance(frame_resize, (tuple, list)) and (len(frame_resize) == 2):
self._frame_resize = tuple(map(int, frame_resize))
self._frame_shape = (1, 3, self._frame_resize[0], self._frame_resize[1])
elif isinstance(frame_resize, float):
width = int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)*frame_resize)
height = int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)*frame_resize)
self._frame_shape = (1, 3, width, height)
self._frame_resize = (width, height)
else:
assert False, "frame_resize should be a tuple of (x,y) pixels "
"or a float setting the scaling factor"
else:
self._frame_shape = (1, 3,
int(self._capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(self._capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))
示例6: _initCamera
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def _initCamera(self, callSleep = True):
"""
Initializes camera. If can't establish connection will write error message to log file and sleep for some
interval.
:return: True when camera successfully open, otherwise False
"""
self.cap = cv.VideoCapture(self.camConnectionString)
if self.cap is None:
self.setError("can't connect to camera")
if callSleep:
time.sleep(5)
return None
if not self.cap.isOpened(): # did we get a connection at all ?
self.setError("can't connect to camera")
if callSleep:
time.sleep(5)
return None
return self.cap
示例7: initfrom
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def initfrom(self):
global update1
update1 = 0
global update2
update2 = 0
self.maxcap=0;
testmax = 10;
for i in range(10):
cap = cv2.VideoCapture(i)
if(cap.isOpened()):
self.maxcap+=1
cap.release()
self.selecamera1.clear()
self.selecamera2.clear()
self.selecamera1.addItems([str(i) for i in range(self.maxcap)])
self.selecamera2.addItems([str(i) for i in range(self.maxcap)])
示例8: loop2
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def loop2(self,text,w=1280,h=720):
cap = cv2.VideoCapture(int(text))
cap.set(6 ,cv2.VideoWriter_fourcc('M', 'J', 'P', 'G') );
global capnum2
capnum2 = int(text)
cap.set(3,w);
cap.set(4,h);
global update2
update2 = 1
global shotmark2
while (update2 == 1):
ret, frame = cap.read()
if shotmark2 == 1:
fn = self.lineEdit.text()
name = "photo/2_"+fn + "video.jpg"
if os.path.exists(name):
name = "photo/2_" + fn + "video"+str(int(time.time()))+".jpg"
cv2.imwrite(name, frame)
shotmark2 = 0
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
self.original2_image.updateImage(frame)
# cap.release()
cv_img_rgb = np.zeros((700,700,3))
self.original2_image.updateImage(cv_img_rgb)
示例9: CaptureContinous
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def CaptureContinous(self, detector):
cap = cv2.VideoCapture(0)
_, image = cap.read()
cap.release()
output = detector.prediction(image)
df = detector.filter_prediction(output, image)
if len(df) > 0:
if (df['class_name']
.str
.contains('person|bird|cat|wine glass|cup|sandwich')
.any()):
day = datetime.now().strftime("%Y%m%d")
directory = os.path.join(IMAGE_FOLDER, 'webcam', day)
if not os.path.exists(directory):
os.makedirs(directory)
image = detector.draw_boxes(image, df)
classes = df['class_name'].unique().tolist()
hour = datetime.now().strftime("%H%M%S")
filename_output = os.path.join(
directory, "{}_{}_.jpg".format(hour, "-".join(classes))
)
cv2.imwrite(filename_output, image)
示例10: counter
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def counter(func):
@wraps(func)
def tmp(*args, **kwargs):
tmp.count += 1
global lastsave
if time.time() - lastsave > 3:
# this is in seconds, so 5 minutes = 300 seconds
lastsave = time.time()
tmp.count = 0
return func(*args, **kwargs)
tmp.count = 0
return tmp
#cap = cv2.VideoCapture(0)
示例11: extract_allframescommand
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def extract_allframescommand(filename):
if filename:
pathDir = str(filename[:-4])
if not os.path.exists(pathDir):
os.makedirs(pathDir)
picFname = '%d.png'
saveDirFilenames = os.path.join(pathDir, picFname)
print(saveDirFilenames)
fname = str(filename)
cap = cv2.VideoCapture(fname)
fps = cap.get(cv2.CAP_PROP_FPS)
amount_of_frames = cap.get(7)
print('The number of frames in this video = ',amount_of_frames)
print('Extracting frames... (Might take awhile)')
command = str('ffmpeg -i ' +'"'+ str(fname)+'"' + ' ' + '-q:v 1' + ' ' + '-start_number 0' + ' '+'"'+ str(saveDirFilenames)+'"')
print(command)
subprocess.call(command, shell=True)
print('All frames are extracted!')
else:
print('Please select a video to convert')
示例12: extractspecificframe
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def extractspecificframe(filename,startframe1,endframe1):
cap = cv2.VideoCapture(filename)
amount_of_frames = cap.get(7)
pathDir = str(filename[:-4]+'\\frames')
if not os.path.exists(pathDir):
os.makedirs(pathDir)
frames_OI = list(range(int(startframe1),int(endframe1)+1))
#frames_OI.extend(range(7000,7200))
#frames_OI.extend(range(9200,9350))
for i in frames_OI:
currentFrame = i
cap.set(1, currentFrame)
ret, frame = cap.read()
fileName = str(currentFrame) + str('.png')
filePath = os.path.join(pathDir, fileName)
cv2.imwrite(filePath,frame)
示例13: add_single_video_yaml
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def add_single_video_yaml(yamlfile,videofile):
yamlPath = yamlfile
cap = cv2.VideoCapture(videofile)
width = int(cap.get(3)) # float
height = int(cap.get(4)) # float
cropLine = [0, width, 0, height]
cropLine = str(cropLine)
currCropLinePath = cropLine.strip("[]")
currCropLinePath = currCropLinePath.replace("'", "")
with open(yamlPath) as f:
read_yaml = yaml.load(f, Loader=yaml.FullLoader)
read_yaml["video_sets"].update({videofile: {'crop': currCropLinePath}})
with open(yamlPath, 'w') as outfile:
yaml.dump(read_yaml, outfile, default_flow_style=False)
示例14: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--weights', default="YOLO_small.ckpt", type=str)
parser.add_argument('--weight_dir', default='weights', type=str)
parser.add_argument('--data_dir', default="data", type=str)
parser.add_argument('--gpu', default='', type=str)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
yolo = YOLONet(False)
weight_file = os.path.join(args.data_dir, args.weight_dir, args.weights)
detector = Detector(yolo, weight_file)
# detect from camera
# cap = cv2.VideoCapture(-1)
# detector.camera_detector(cap)
# detect from image file
imname = 'test/person.jpg'
detector.image_detector(imname)
示例15: main
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import VideoCapture [as 別名]
def main():
total_pics = 1000
cap = cv2.VideoCapture(0)
x, y, w, h = 300, 50, 350, 350
pic_no = 0
flag_start_capturing = False
frames = 0
while True:
ret, frame = cap.read()
frame = cv2.flip(frame, 1)
#frame = cv2.resize(frame, (image_x, image_y))
cv2.imwrite("hand_images/" + str(pic_no) + ".jpg", frame)
cv2.imshow("Capturing gesture", frame)
pic_no += 1
if pic_no == total_pics:
break