本文整理汇总了Python中imutils.video.FPS属性的典型用法代码示例。如果您正苦于以下问题:Python video.FPS属性的具体用法?Python video.FPS怎么用?Python video.FPS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类imutils.video
的用法示例。
在下文中一共展示了video.FPS属性的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: calculate_fps
# 需要导入模块: from imutils import video [as 别名]
# 或者: from imutils.video import FPS [as 别名]
def calculate_fps(self, frames_no=100):
fps = FPS().start()
# Don't wanna display window
if self.debug:
self.debug = not self.debug
for i in range(0, frames_no):
self.where_lane_be()
fps.update()
fps.stop()
# Don't wanna display window
if not self.debug:
self.debug = not self.debug
print('Time taken: {:.2f}'.format(fps.elapsed()))
print('~ FPS : {:.2f}'.format(fps.fps()))
# Use this to save images to a location
示例2: cv2_demo
# 需要导入模块: from imutils import video [as 别名]
# 或者: from imutils.video import FPS [as 别名]
def cv2_demo(net, transform):
def predict(frame):
height, width = frame.shape[:2]
x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([width, height, width, height])
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
cv2.rectangle(frame,
(int(pt[0]), int(pt[1])),
(int(pt[2]), int(pt[3])),
COLORS[i % 3], 2)
cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])),
FONT, 2, (255, 255, 255), 2, cv2.LINE_AA)
j += 1
return frame
# start video stream thread, allow buffer to fill
print("[INFO] starting threaded video stream...")
stream = WebcamVideoStream(src=0).start() # default camera
time.sleep(1.0)
# start fps timer
# loop over frames from the video file stream
while True:
# grab next frame
frame = stream.read()
key = cv2.waitKey(1) & 0xFF
# update FPS counter
fps.update()
frame = predict(frame)
# keybindings for display
if key == ord('p'): # pause
while True:
key2 = cv2.waitKey(1) or 0xff
cv2.imshow('frame', frame)
if key2 == ord('p'): # resume
break
cv2.imshow('frame', frame)
if key == 27: # exit
break
示例3: cv2_demo
# 需要导入模块: from imutils import video [as 别名]
# 或者: from imutils.video import FPS [as 别名]
def cv2_demo(net, transform):
def predict(frame):
height, width = frame.shape[:2]
x = torch.from_numpy(transform(frame)[0]).permute(2, 0, 1)
x = Variable(x.unsqueeze(0))
y = net(x) # forward pass
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor([width, height, width, height])
for i in range(detections.size(1)):
j = 0
while detections[0, i, j, 0] >= 0.6:
pt = (detections[0, i, j, 1:] * scale).cpu().numpy()
cv2.rectangle(frame, (int(pt[0]), int(pt[1])), (int(pt[2]),
int(pt[3])), COLORS[i % 3], 2)
cv2.putText(frame, labelmap[i - 1], (int(pt[0]), int(pt[1])), FONT,
2, (255, 255, 255), 2, cv2.LINE_AA)
j += 1
return frame
# start video stream thread, allow buffer to fill
print("[INFO] starting threaded video stream...")
stream = WebcamVideoStream(src=0).start() # default camera
time.sleep(1.0)
# start fps timer
# loop over frames from the video file stream
while True:
# grab next frame
frame = stream.read()
key = cv2.waitKey(1) & 0xFF
# update FPS counter
fps.update()
frame = predict(frame)
# keybindings for display
if key == ord('p'): # pause
while True:
key2 = cv2.waitKey(1) or 0xff
cv2.imshow('frame', frame)
if key2 == ord('p'): # resume
break
cv2.imshow('frame', frame)
if key == 27: # exit
break
示例4: __init__
# 需要导入模块: from imutils import video [as 别名]
# 或者: from imutils.video import FPS [as 别名]
def __init__(self, center=int(cvsettings.CAMERA_WIDTH / 2), debug=False, is_usb_webcam=True, period_s=0.025):
# Our video stream
# If its not a usb webcam then get pi camera
if not is_usb_webcam:
self.vs = PiVideoStream(resolution=(cvsettings.CAMERA_WIDTH, cvsettings.CAMERA_HEIGHT))
# Camera cvsettings
self.vs.camera.shutter_speed = cvsettings.SHUTTER
self.vs.camera.exposure_mode = cvsettings.EXPOSURE_MODE
self.vs.camera.exposure_compensation = cvsettings.EXPOSURE_COMPENSATION
self.vs.camera.awb_gains = cvsettings.AWB_GAINS
self.vs.camera.awb_mode = cvsettings.AWB_MODE
self.vs.camera.saturation = cvsettings.SATURATION
self.vs.camera.rotation = cvsettings.ROTATION
self.vs.camera.video_stabilization = cvsettings.VIDEO_STABALIZATION
self.vs.camera.iso = cvsettings.ISO
self.vs.camera.brightness = cvsettings.BRIGHTNESS
self.vs.camera.contrast = cvsettings.CONTRAST
# Else get the usb camera
else:
self.vs = WebcamVideoStream(src=0)
self.vs.stream.set(cv2.CAP_PROP_FRAME_WIDTH, cvsettings.CAMERA_WIDTH)
self.vs.stream.set(cv2.CAP_PROP_FRAME_HEIGHT, cvsettings.CAMERA_HEIGHT)
# Has camera started
self.camera_started = False
self.start_camera() # Starts our camera
# To calculate our error in positioning
self.center = center
# To determine if we actually detected lane or not
self.detected_lane = False
# debug mode on? (to display processed images)
self.debug = debug
# Time interval between in update (in ms)
# FPS = 1/period_s
self.period_s = period_s
# Starting time
self.start_time = time.time()
# Mouse event handler for get_hsv