本文整理匯總了Python中cv2.getTickFrequency方法的典型用法代碼示例。如果您正苦於以下問題:Python cv2.getTickFrequency方法的具體用法?Python cv2.getTickFrequency怎麽用?Python cv2.getTickFrequency使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類cv2
的用法示例。
在下文中一共展示了cv2.getTickFrequency方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: make_h5py
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def make_h5py():
x_train_paths, y_train_paths = get_data('train')
x_val_paths, y_val_paths = get_data('val')
x_test_paths, y_test_paths = get_data('test')
h5py_file = h5py.File(os.path.join(dir_path, 'data.h5'), 'w')
start = cv2.getTickCount()
write_data(h5py_file, 'train', x_train_paths, y_train_paths)
time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
print ('parsing train data, Time:%.3fs'%time)
start = cv2.getTickCount()
write_data(h5py_file, 'val', x_val_paths, y_val_paths)
time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
print ('parsing val data, Time:%.3fs'%time)
start = cv2.getTickCount()
write_data(h5py_file, 'test', x_test_paths, y_test_paths)
time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
print ('parsing test data, Time:%.3fs'%time)
示例2: predict
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def predict(name):
frame = cv.imread(name)
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))
# Remove the bounding boxes with low confidence
boxes1=postprocess(frame, outs)
# Put efficiency information. The function getPerfProfile returns the
# overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
#label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
#cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
cv.imwrite("prediction.jpg",frame)
return boxes1
# In[ ]:
示例3: detect
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def detect(self, frame):
fconv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
image = Image.fromarray(fconv)
# Create a 4D blob from a frame.
blob = cv2.dnn.blobFromImage(frame, 1/255, (YoloV3.inpWidth, YoloV3.inpHeight), [0,0,0], 1, crop=False)
# Sets the input to the network
self.net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = self.net.forward(self.getOutputsNames())
# Remove the bounding boxes with low confidence
detection = self.postprocess(frame, outs, self.colors)
# Put efficiency information. The function getPerfProfile returns the
# overall time for inference(t) and the timings for each of the layers(in layersTimes)
if self.drawPerformance:
t, _ = self.net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
return detection
示例4: webcam_gui
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def webcam_gui(filter_func, video_src=0):
cap = cv2.VideoCapture(video_src)
key_code = -1
while(key_code == -1):
t = cv2.getTickCount()
# read a frame
ret, frame = cap.read()
fps = cv2.getTickFrequency() / (cv2.getTickCount() - t)
print("Frame rate: " + str(fps))
# run filter with the arguments
frame_out = filter_func(frame)
# show the image
cv2.imshow('Press any key to exit', frame_out)
# wait for the key
key_code = cv2.waitKey(10)
cap.release()
cv2.destroyAllWindows()
示例5: clock
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def clock():
return cv2.getTickCount() / cv2.getTickFrequency()
示例6: train_visualization_seg
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def train_visualization_seg(self, model, epoch, path):
# image_name_list = sorted(glob(os.path.join(self.flag.data_path,'val/IMAGE/*/frankfurt_000000_014480_leftImg8bit.png')))
# print (image_name_list)
image_name = path #'./result/frankfurt_000000_014480_leftImg8bit.png'
image_height = self.flag.image_height
image_width = self.flag.image_width
imgInput = cv2.imread(image_name, self.flag.color_mode)
imgInput = cv2.cvtColor(imgInput, cv2.COLOR_BGR2RGB)
output_path = self.flag.output_dir
input_data = imgInput.reshape((1,image_height,image_width,self.flag.color_mode*2+1))
t_start = cv2.getTickCount()
result = model.predict(input_data, 1)
t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
print ("[*] Predict Time: %.3f ms"%t_total)
imgMask = (result[0]*255).astype(np.uint8)
imgShow = cv2.cvtColor(imgInput, cv2.COLOR_RGB2BGR).copy()
#cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
# imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
imgMaskColor = imgMask
imgShow = cv2.addWeighted(imgShow, 0.5, imgMaskColor, 0.6, 0.0)
output_path = os.path.join(self.flag.output_dir, '%04d_'%epoch+os.path.basename(image_name))
mask_path = os.path.join(self.flag.output_dir, 'mask_%04d_'%epoch+os.path.basename(image_name))
cv2.imwrite(output_path, imgShow)
cv2.imwrite(mask_path, imgMaskColor)
# print "SAVE:[%s]"%output_path
# cv2.imwrite(os.path.join(output_path, 'img%04d.png'%epoch), imgShow)
# cv2.namedWindow("show", 0)
# cv2.resizeWindow("show", 800, 800)
# cv2.imshow("show", imgShow)
# cv2.waitKey(1)
示例7: tracker
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def tracker(cam, frame, bbox):
tracker = KCFTracker(True, True, True) # (hog, fixed_Window, multi_scale)
tracker.init(bbox, frame)
while True:
ok, frame = cam.read()
timer = cv2.getTickCount()
bbox = tracker.update(frame)
bbox = list(map(int, bbox))
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
# Put FPS
cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cam.release()
cv2.destroyAllWindows()
示例8: service_capture
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def service_capture(self):
while(1):
t_start = cv2.getTickCount()
self.capture_mutex.acquire()
self.capture_state()
self.capture_image()
self.capture_mutex.release()
self.cost_time = ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency())
# self.cost_time_vector.append(cost_time)
# self.state_vector.append(self.raw_state)
# print("Query raw_state cost time = %.2f" % cost_time)
# self.update["Get_data_cost_time": cost_time]
示例9: capture_state
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def capture_state(self):
t_start = cv2.getTickCount()
self.state = self.client.getMultirotorState()
# self.collision_info = self.client.simGetCollisionInfo()
# print("Query raw_state cost time = %.2f" % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency()))
示例10: capture_image
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def capture_image(self):
t_start = cv2.getTickCount()
responses = self.client.simGetImages([
airsim.ImageRequest("0", airsim.ImageType.DepthVis), # depth visualization image
airsim.ImageRequest("1", airsim.ImageType.DepthPerspective, True), # depth in perspective projection
airsim.ImageRequest("1", airsim.ImageType.Scene)]) # scene vision image in png format
# print('Retrieved images: %d', len(responses))
for response in responses:
if response.pixels_as_float:
self.img_f_raw = img_tools.process_float_img(response)
self.img_f = img_tools.float_img_to_display(self.img_f_raw)
# img_f = img_tools.displat_float_img( img_tools.process_float_img(response))
# cv2.imshow("img_float", img_tools.displat_float_img(img_f))
elif response.compress: # png format
self.img_png = img_tools.process_compress_img(response)
# cv2.imshow("img_png", img_png)
pass
else: # uncompressed array
self.img_rgba = img_tools.process_rgba_img(response)
# cv2.imshow("img_rgba", img_rgba)
try:
self.img_f = np.uint8(self.img_f)
self.img_f_rgb = cv2.cvtColor(self.img_f, cv2.COLOR_GRAY2RGB)
self.img_combi = np.concatenate((self.img_png, 255 - self.img_f_rgb), axis=0)
# print(vis.shape)
# cv2.imshow("image", self.img_combi)
except Exception as e:
print(e)
# print(img_f_rgb.shape, img_png.shape)
pass
# print("Query image cost time = %.2f" % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency()))
return self.img_combi
# cv2.waitKey(1)
示例11: load_random_data
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def load_random_data(save_dir, file_size):
sample_set = np.random.choice(range(0, file_size), file_size, replace=False)
sample_set = np.sort(sample_set)
# sample_set = range(file_size)
# print(random_set)
file_name_vec = []
for idx in sample_set:
name = "%s\\traj_%d.pkl" % (save_dir, idx)
file_name_vec.append(name)
rapid_trajectory = Rapid_trajectory_generator()
# t_start = cv2.getTickCount()
in_data, out_data = rapid_trajectory.load_from_file_vector(file_name_vec)
# print("cost time = %.2f " % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency()))
return in_data, out_data
示例12: pause
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def pause(self):
now_time = cv2.getTickCount()
self._accumulated += (now_time - self._start_time)/cv2.getTickFrequency()
self._is_paused = True
示例13: elapsed
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def elapsed(self):
if self._is_paused:
self._elapsed = self._accumulated
else:
now = cv2.getTickCount()
self._elapsed = self._accumulated + (now - self._start_time)/cv2.getTickFrequency()
if self._is_verbose is True:
name = self._name
if self._is_paused:
name += ' [paused]'
message = 'Timer::' + name + ' - elapsed: ' + str(self._elapsed)
timer_print(message)
return self._elapsed
示例14: clock
# 需要導入模塊: import cv2 [as 別名]
# 或者: from cv2 import getTickFrequency [as 別名]
def clock():
return cv.getTickCount() / cv.getTickFrequency()