当前位置: 首页>>代码示例>>Python>>正文


Python cv2.getTickFrequency方法代码示例

本文整理汇总了Python中cv2.getTickFrequency方法的典型用法代码示例。如果您正苦于以下问题:Python cv2.getTickFrequency方法的具体用法?Python cv2.getTickFrequency怎么用?Python cv2.getTickFrequency使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv2的用法示例。


在下文中一共展示了cv2.getTickFrequency方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: make_h5py

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def make_h5py():
    x_train_paths, y_train_paths = get_data('train')
    x_val_paths, y_val_paths = get_data('val')
    x_test_paths, y_test_paths = get_data('test')

    h5py_file = h5py.File(os.path.join(dir_path, 'data.h5'), 'w')
    
    start = cv2.getTickCount()
    write_data(h5py_file, 'train', x_train_paths, y_train_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing train data, Time:%.3fs'%time)

    start = cv2.getTickCount()
    write_data(h5py_file, 'val', x_val_paths, y_val_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing val data, Time:%.3fs'%time)

    start = cv2.getTickCount()
    write_data(h5py_file, 'test', x_test_paths, y_test_paths)
    time = (cv2.getTickCount()-start)/cv2.getTickFrequency()
    print ('parsing test data, Time:%.3fs'%time) 
开发者ID:dhkim0225,项目名称:keras-image-segmentation,代码行数:23,代码来源:h5_test.py

示例2: predict

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def predict(name):
    frame = cv.imread(name)
    blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)

    # Sets the input to the network
    net.setInput(blob)

    # Runs the forward pass to get output of the output layers
    outs = net.forward(getOutputsNames(net))

    # Remove the bounding boxes with low confidence
    boxes1=postprocess(frame, outs)
    # Put efficiency information. The function getPerfProfile returns the 
    # overall time for inference(t) and the timings for each of the layers(in layersTimes)
    t, _ = net.getPerfProfile()
    #label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
    #cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
    cv.imwrite("prediction.jpg",frame)
    return boxes1


# In[ ]: 
开发者ID:holms-ur,项目名称:fine-tuning,代码行数:24,代码来源:predict.py

示例3: detect

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def detect(self, frame):
        fconv = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
        image = Image.fromarray(fconv)
        # Create a 4D blob from a frame.
        blob = cv2.dnn.blobFromImage(frame, 1/255, (YoloV3.inpWidth, YoloV3.inpHeight), [0,0,0], 1, crop=False)
        # Sets the input to the network
        self.net.setInput(blob)
        # Runs the forward pass to get output of the output layers
        outs = self.net.forward(self.getOutputsNames())
        # Remove the bounding boxes with low confidence
        detection = self.postprocess(frame, outs, self.colors)

        # Put efficiency information. The function getPerfProfile returns the
        # overall time for inference(t) and the timings for each of the layers(in layersTimes)
        if self.drawPerformance:
            t, _ = self.net.getPerfProfile()
            label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
            cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))

        return detection 
开发者ID:joakimeriksson,项目名称:ai-smarthome,代码行数:22,代码来源:yolo3.py

示例4: webcam_gui

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def webcam_gui(filter_func, video_src=0):

    cap = cv2.VideoCapture(video_src)
    key_code = -1
    
    while(key_code == -1):
        t = cv2.getTickCount()
        # read a frame
        ret, frame = cap.read()
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - t) 
        print("Frame rate: " + str(fps))
        
        # run filter with the arguments
        frame_out = filter_func(frame)
        
        # show the image
        cv2.imshow('Press any key to exit', frame_out)
        
        # wait for the key
        key_code = cv2.waitKey(10)

    cap.release()
    cv2.destroyAllWindows() 
开发者ID:fatcloud,项目名称:PyCV-time,代码行数:25,代码来源:webcam_gui.py

示例5: clock

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def clock():
    return cv2.getTickCount() / cv2.getTickFrequency() 
开发者ID:makelove,项目名称:OpenCV-Python-Tutorial,代码行数:4,代码来源:common.py

示例6: train_visualization_seg

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def train_visualization_seg(self, model, epoch, path):
        # image_name_list = sorted(glob(os.path.join(self.flag.data_path,'val/IMAGE/*/frankfurt_000000_014480_leftImg8bit.png')))
        # print (image_name_list)

        image_name = path #'./result/frankfurt_000000_014480_leftImg8bit.png'
        image_height = self.flag.image_height
        image_width = self.flag.image_width
        
        imgInput = cv2.imread(image_name, self.flag.color_mode)
        imgInput = cv2.cvtColor(imgInput, cv2.COLOR_BGR2RGB)
        output_path = self.flag.output_dir
        input_data = imgInput.reshape((1,image_height,image_width,self.flag.color_mode*2+1))

        t_start = cv2.getTickCount()
        result = model.predict(input_data, 1)
        t_total = (cv2.getTickCount() - t_start) / cv2.getTickFrequency() * 1000
        print ("[*] Predict Time: %.3f ms"%t_total)
        
        imgMask = (result[0]*255).astype(np.uint8)
        imgShow = cv2.cvtColor(imgInput, cv2.COLOR_RGB2BGR).copy()
        #cv2.cvtColor(imgInput, cv2.COLOR_GRAY2BGR)
        # imgMaskColor = cv2.applyColorMap(imgMask, cv2.COLORMAP_JET)
        imgMaskColor = imgMask
        imgShow = cv2.addWeighted(imgShow, 0.5, imgMaskColor, 0.6, 0.0)
        output_path = os.path.join(self.flag.output_dir, '%04d_'%epoch+os.path.basename(image_name))
        mask_path = os.path.join(self.flag.output_dir, 'mask_%04d_'%epoch+os.path.basename(image_name))
        cv2.imwrite(output_path, imgShow)
        cv2.imwrite(mask_path, imgMaskColor)
        # print "SAVE:[%s]"%output_path
        # cv2.imwrite(os.path.join(output_path, 'img%04d.png'%epoch), imgShow)
        # cv2.namedWindow("show", 0)
        # cv2.resizeWindow("show", 800, 800)
        # cv2.imshow("show", imgShow)
        # cv2.waitKey(1) 
开发者ID:dhkim0225,项目名称:keras-image-segmentation,代码行数:36,代码来源:callbacks.py

示例7: tracker

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def tracker(cam, frame, bbox):
    tracker = KCFTracker(True, True, True) # (hog, fixed_Window, multi_scale)
    tracker.init(bbox, frame)
    
    while True:
        ok, frame = cam.read()

        timer = cv2.getTickCount()
        bbox = tracker.update(frame)
        bbox = list(map(int, bbox))
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Tracking success
        p1 = (int(bbox[0]), int(bbox[1]))
        p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
        cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)

        # Put FPS
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    cam.release()
    cv2.destroyAllWindows() 
开发者ID:ryanfwy,项目名称:KCF-DSST-py,代码行数:31,代码来源:run.py

示例8: service_capture

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def service_capture(self):
        while(1):
            t_start = cv2.getTickCount()
            self.capture_mutex.acquire()
            self.capture_state()
            self.capture_image()
            self.capture_mutex.release()


            self.cost_time = ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency())
            # self.cost_time_vector.append(cost_time)
            # self.state_vector.append(self.raw_state)
            # print("Query raw_state cost time = %.2f" % cost_time)
            # self.update["Get_data_cost_time": cost_time] 
开发者ID:hku-mars,项目名称:crossgap_il_rl,代码行数:16,代码来源:query_aimsim_images.py

示例9: capture_state

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def capture_state(self):
        t_start = cv2.getTickCount()
        self.state = self.client.getMultirotorState()
        # self.collision_info = self.client.simGetCollisionInfo()
        # print("Query raw_state cost time = %.2f" % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency())) 
开发者ID:hku-mars,项目名称:crossgap_il_rl,代码行数:7,代码来源:query_aimsim_images.py

示例10: capture_image

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def capture_image(self):
        t_start = cv2.getTickCount()
        responses = self.client.simGetImages([
            airsim.ImageRequest("0", airsim.ImageType.DepthVis),  # depth visualization image
            airsim.ImageRequest("1", airsim.ImageType.DepthPerspective, True),  # depth in perspective projection
            airsim.ImageRequest("1", airsim.ImageType.Scene)])  # scene vision image in png format
        # print('Retrieved images: %d', len(responses))

        for response in responses:
            if response.pixels_as_float:
                self.img_f_raw = img_tools.process_float_img(response)
                self.img_f = img_tools.float_img_to_display(self.img_f_raw)
                # img_f = img_tools.displat_float_img( img_tools.process_float_img(response))
                # cv2.imshow("img_float", img_tools.displat_float_img(img_f))
            elif response.compress:  # png format
                self.img_png = img_tools.process_compress_img(response)
                # cv2.imshow("img_png", img_png)
                pass
            else:  # uncompressed array
                self.img_rgba = img_tools.process_rgba_img(response)
                # cv2.imshow("img_rgba", img_rgba)
        try:
            self.img_f = np.uint8(self.img_f)
            self.img_f_rgb = cv2.cvtColor(self.img_f, cv2.COLOR_GRAY2RGB)
            self.img_combi = np.concatenate((self.img_png, 255 - self.img_f_rgb), axis=0)
            # print(vis.shape)
            # cv2.imshow("image", self.img_combi)

        except Exception as e:
            print(e)
            # print(img_f_rgb.shape, img_png.shape)
            pass
        # print("Query image cost time = %.2f" % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency()))
        return self.img_combi
        # cv2.waitKey(1) 
开发者ID:hku-mars,项目名称:crossgap_il_rl,代码行数:37,代码来源:query_aimsim_images.py

示例11: load_random_data

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def load_random_data(save_dir, file_size):
    sample_set = np.random.choice(range(0, file_size), file_size, replace=False)
    sample_set = np.sort(sample_set)
    # sample_set = range(file_size)
    # print(random_set)
    file_name_vec = []
    for idx in sample_set:
        name = "%s\\traj_%d.pkl" % (save_dir, idx)
        file_name_vec.append(name)

    rapid_trajectory = Rapid_trajectory_generator()
    # t_start = cv2.getTickCount()
    in_data, out_data = rapid_trajectory.load_from_file_vector(file_name_vec)
    # print("cost time  = %.2f " % ((cv2.getTickCount() - t_start) * 1000.0 / cv2.getTickFrequency()))
    return in_data, out_data 
开发者ID:hku-mars,项目名称:crossgap_il_rl,代码行数:17,代码来源:Rapid_trajectory_generator.py

示例12: pause

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def pause(self): 
        now_time = cv2.getTickCount()
        self._accumulated += (now_time - self._start_time)/cv2.getTickFrequency() 
        self._is_paused = True 
开发者ID:luigifreda,项目名称:pyslam,代码行数:6,代码来源:timer.py

示例13: elapsed

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def elapsed(self):
        if self._is_paused:
            self._elapsed = self._accumulated
        else:
            now = cv2.getTickCount()
            self._elapsed = self._accumulated + (now - self._start_time)/cv2.getTickFrequency()        
        if self._is_verbose is True:      
            name =  self._name
            if self._is_paused:
                name += ' [paused]'
            message = 'Timer::' + name + ' - elapsed: ' + str(self._elapsed) 
            timer_print(message)
        return self._elapsed 
开发者ID:luigifreda,项目名称:pyslam,代码行数:15,代码来源:timer.py

示例14: clock

# 需要导入模块: import cv2 [as 别名]
# 或者: from cv2 import getTickFrequency [as 别名]
def clock():
    return cv.getTickCount() / cv.getTickFrequency() 
开发者ID:thunil,项目名称:TecoGAN,代码行数:4,代码来源:common.py


注:本文中的cv2.getTickFrequency方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。