本文整理匯總了Python中cv2.CAP_GSTREAMER屬性的典型用法代碼示例。如果您正苦於以下問題:Python cv2.CAP_GSTREAMER屬性的具體用法?Python cv2.CAP_GSTREAMER怎麽用?Python cv2.CAP_GSTREAMER使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類cv2
的用法示例。
在下文中一共展示了cv2.CAP_GSTREAMER屬性的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: open_cam_rtsp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def open_cam_rtsp(uri, width, height, latency):
"""Open an RTSP URI (IP CAM)."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'omxh264dec' in gst_elements:
# Use hardware H.264 decoder on Jetson platforms
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(uri, latency, width, height)
elif 'avdec_h264' in gst_elements:
# Otherwise try to use the software decoder 'avdec_h264'
# NOTE: in case resizing images is necessary, try adding
# a 'videoscale' into the pipeline
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! avdec_h264 ! '
'videoconvert ! appsink').format(uri, latency)
else:
raise RuntimeError('H.264 decoder not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
示例2: show_camera
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def show_camera():
# To flip the image, modify the flip_method parameter (0 and 2 are the most common)
print(gstreamer_pipeline(flip_method=0))
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
if cap.isOpened():
window_handle = cv2.namedWindow('CSI Camera', cv2.WINDOW_AUTOSIZE)
# Window
while cv2.getWindowProperty('CSI Camera',0) >= 0:
ret_val, img = cap.read();
cv2.imshow('CSI Camera',img)
# This also acts as
keyCode = cv2.waitKey(30) & 0xff
# Stop the program on the ESC key
if keyCode == 27:
break
cap.release()
cv2.destroyAllWindows()
else:
print('Unable to open camera')
示例3: open_cam_rtsp
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def open_cam_rtsp(uri, width, height, latency):
"""Open an RTSP URI (IP CAM)."""
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(uri, latency, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
示例4: frames
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def frames():
camera = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
while True:
# read current frame
_, img = camera.read()
yield img
示例5: open_cam_usb
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def open_cam_usb(dev, width, height):
"""Open a USB webcam."""
if USB_GSTREAMER:
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
else:
return cv2.VideoCapture(dev)
示例6: open_cam_onboard
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def open_cam_onboard(width, height):
"""Open the Jetson onboard camera."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, you might need to add
# 'flip-method=2' into gst_str below.
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
示例7: open_cam_usb
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def open_cam_usb(dev, width, height):
"""Open a USB webcam.
We want to set width and height here, otherwise we could just do:
return cv2.VideoCapture(dev)
"""
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
示例8: open_cam_onboard
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def open_cam_onboard(width, height):
"""Open the Jetson onboard camera.
On versions of L4T prior to 28.1, you might need to add
'flip-method=2' into gst_str.
"""
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
示例9: open_usb_camera
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def open_usb_camera():
gst_str = ("v4l2src device={} ! "
"video/x-raw, width=(int){}, height=(int){}, framerate=(fraction)30/1 ! "
"videoconvert ! video/x-raw, , format=(string)BGR ! appsink"
).format(
VIDEO_DEVICE, VIDEO_WIDTH, VIDEO_HEIGHT
)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
示例10: open_cam_usb
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def open_cam_usb():
# We want to set width and height here, otherwise we could just do:
# return cv2.VideoCapture(dev)
gst_str = ("v4l2src device=/dev/video0 ! "
"video/x-raw, width=(int){}, height=(int){}, framerate=(fraction)30/1 ! "
"videoconvert ! video/x-raw, , format=(string)BGR ! appsink"
).format(WIDTH, HEIGHT)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
示例11: __init__
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def __init__(self, mqttBroker, camera=None, topic="ha/camera/mqtt"):
self.show = True
self.broker = mqttBroker
self.topic = topic
self.send_frames = 0
# To flip the image, modify the flip_method parameter (0 and 2 are the most common)
print(gstreamer_pipeline(flip_method=0))
self.cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
# First frame is average...
ret, self.avgframe = self.cap.read()
self.connect("Jetson MQTT Camera")
示例12: ObjectTracking
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def ObjectTracking(self):
detector = Detector()
myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', '*.jpg'),
recursive=True)
newdict = reduce(lambda a, b: reduce_tracking(a,b), myiter, dict())
startID = max(map(int, newdict.keys()), default=0) + 1
ct = CentroidTracker(startID=startID)
camera = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
try:
while True:
_, img = camera.read()
boxes, confs, clss = detector.prediction(img, conf_th=0.8, conf_class=[1])
img = detector.draw_boxes(img, boxes, confs, clss)
previous_object_ID = ct.nextObjectID
objects = ct.update(boxes)
if len(boxes) > 0 and 1 in clss and previous_object_ID in list(objects.keys()):
print("detected {} {} {} {}".format(ct.nextObjectID, confs, objects, boxes))
# loop over the tracked objects
for (objectID, centroid) in objects.items():
text = "ID {}".format(objectID)
cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
day = datetime.now().strftime("%Y%m%d")
directory = os.path.join(IMAGE_FOLDER, 'pi', day)
if not os.path.exists(directory):
os.makedirs(directory)
ids = "-".join(list([str(i) for i in objects.keys()]))
hour = datetime.now().strftime("%H%M%S")
filename_output = os.path.join(
directory, "{}_person_{}_.jpg".format(hour, ids)
)
cv2.imwrite(filename_output, img)
time.sleep(0.100)
except KeyboardInterrupt:
print('interrupted!')
camera.release()
print(type(objects))
print(objects)
except Exception as e:
print('interrupted! by:')
print(e)
camera.release()
print(type(objects))
print(objects)
示例13: __connect
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import CAP_GSTREAMER [as 別名]
def __connect(self):
if self._cap is not None:
self._cap.release()
del self._cap
self._cap = None
time.sleep(1)
device = "device=/dev/video%i" % self._config["index"]
gst = None
if self._config["mode"] == "video":
gst = (
"v4l2src %s"
" ! video/x-raw,format=YUY2,width=%i,height=%i,framerate=%i/1 "
" ! videoconvert ! appsink"
% (device, self._config["width"], self._config["height"], self._config["fps"])
)
elif self._config["mode"] == "image":
gst = (
"v4l2src %s"
" ! image/jpeg,width=%i,height=%i,framerate=%i/1"
" ! jpegparse ! jpegdec ! videoconvert ! appsink"
% (device, self._config["width"], self._config["height"], self._config["fps"])
)
elif self._config["mode"] == "csi":
gst = (
"nvarguscamerasrc sensor-id=%i"
" ! video/x-raw(memory:NVMM),width=%i,height=%i,framerate=(fraction)%i/1,format=(string)NV12"
" ! nvtee ! nvvidconv flip-method=%i"
" ! video/x-raw,width=%i,height=%i,format=BGRx"
" ! videoconvert ! appsink"
% (
self._config["index"],
self._config["capture_width"],
self._config["capture_height"],
self._config["fps"],
self._config["flip_method"],
self._config["width"],
self._config["height"],
)
)
print(gst)
if gst is not None:
self._cap = cv2.VideoCapture(gst, cv2.CAP_GSTREAMER)