本文整理匯總了Python中imutils.video.FPS屬性的典型用法代碼示例。如果您正苦於以下問題:Python video.FPS屬性的具體用法?Python video.FPS怎麽用?Python video.FPS使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類imutils.video
的用法示例。
在下文中一共展示了video.FPS屬性的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: calculate_fps
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import FPS [as 別名]
def calculate_fps(self, frames_no=100):
fps = FPS().start()
# Don't wanna display window
if self.debug:
self.debug = not self.debug
for i in range(0, frames_no):
self.where_lane_be()
fps.update()
fps.stop()
# Don't wanna display window
if not self.debug:
self.debug = not self.debug
print('Time taken: {:.2f}'.format(fps.elapsed()))
print('~ FPS : {:.2f}'.format(fps.fps()))
# Use this to save images to a location
示例2: cv2_demo
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import FPS [as 別名]
def cv2_demo(net, transform):
def predict(frame):
height, width = frame.shape[:2]
x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([width, height, width, height])
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
cv2.rectangle(frame,
(int(pt[0]), int(pt[1])),
(int(pt[2]), int(pt[3])),
COLORS[i % 3], 2)
cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)
j += 1
return frame
# start video stream thread, allow buffer to fill
print("[INFO] starting threaded video stream...")
stream = WebcamVideoStream(src=0).start() # default camera
time.sleep(1.0)
# start fps timer
# loop over frames from the video file stream
while True:
# grab next frame
frame = stream.read()
key = cv2.waitKey(1) & 0xFF
# update FPS counter
fps.update()
frame = predict(frame)
# keybindings for display
if key == ord('p'): # pause
while True:
key2 = cv2.waitKey(1) or 0xff
cv2.imshow('frame', frame)
if key2 == ord('p'): # resume
break
cv2.imshow('frame', frame)
if key == 27: # exit
break
示例3: cv2_demo
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import FPS [as 別名]
def cv2_demo(net, transform):
def predict(frame):
height, width = frame.shape[:2]
x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([width, height, width, height])
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
cv2.rectangle(frame, (int(pt[0]), int(pt[1])), (int(pt[2]),
int(pt[3])), COLORS[i % 3], 2)
cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])), FONT,
2, (255, 255, 255), 2, cv2.LINE_AA)
j += 1
return frame
# start video stream thread, allow buffer to fill
print("[INFO] starting threaded video stream...")
stream = WebcamVideoStream(src=0).start() # default camera
time.sleep(1.0)
# start fps timer
# loop over frames from the video file stream
while True:
# grab next frame
frame = stream.read()
key = cv2.waitKey(1) & 0xFF
# update FPS counter
fps.update()
frame = predict(frame)
# keybindings for display
if key == ord('p'): # pause
while True:
key2 = cv2.waitKey(1) or 0xff
cv2.imshow('frame', frame)
if key2 == ord('p'): # resume
break
cv2.imshow('frame', frame)
if key == 27: # exit
break
示例4: __init__
# 需要導入模塊: from imutils import video [as 別名]
# 或者: from imutils.video import FPS [as 別名]
def __init__(self, center=int(cvsettings.CAMERA_WIDTH / 2), debug=False, is_usb_webcam=True, period_s=0.025):
# Our video stream
# If its not a usb webcam then get pi camera
if not is_usb_webcam:
self.vs = PiVideoStream(resolution=(cvsettings.CAMERA_WIDTH, cvsettings.CAMERA_HEIGHT))
# Camera cvsettings
self.vs.camera.shutter_speed = cvsettings.SHUTTER
self.vs.camera.exposure_mode = cvsettings.EXPOSURE_MODE
self.vs.camera.exposure_compensation = cvsettings.EXPOSURE_COMPENSATION
self.vs.camera.awb_gains = cvsettings.AWB_GAINS
self.vs.camera.awb_mode = cvsettings.AWB_MODE
self.vs.camera.saturation = cvsettings.SATURATION
self.vs.camera.rotation = cvsettings.ROTATION
self.vs.camera.video_stabilization = cvsettings.VIDEO_STABALIZATION
self.vs.camera.iso = cvsettings.ISO
self.vs.camera.brightness = cvsettings.BRIGHTNESS
self.vs.camera.contrast = cvsettings.CONTRAST
# Else get the usb camera
else:
self.vs = WebcamVideoStream(src=0)
self.vs.stream.set(cv2.CAP_PROP_FRAME_WIDTH, cvsettings.CAMERA_WIDTH)
self.vs.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, cvsettings.CAMERA_HEIGHT)
# Has camera started
self.camera_started = False
self.start_camera() # Starts our camera
# To calculate our error in positioning
self.center = center
# To determine if we actually detected lane or not
self.detected_lane = False
# debug mode on? (to display processed images)
self.debug = debug
# Time interval between in update (in ms)
# FPS = 1/period_s
self.period_s = period_s
# Starting time
self.start_time = time.time()
# Mouse event handler for get_hsv