当前位置: 首页>>代码示例>>Python>>正文


Python Timer.toc方法代码示例

本文整理汇总了Python中timer.Timer.toc方法的典型用法代码示例。如果您正苦于以下问题:Python Timer.toc方法的具体用法?Python Timer.toc怎么用?Python Timer.toc使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在timer.Timer的用法示例。


在下文中一共展示了Timer.toc方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_feature_scale

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
    def _get_feature_scale(self, num_images=100):
        _t = Timer()
        roidb = self.imdb.roidb
        total_norm = 0.0
        total_sum = 0.0
        count = 0.0
        num_images = min(num_images, self.imdb.num_images)
        inds = np.random.choice(range(self.imdb.num_images), size=num_images, replace=False)

        for i_, i in enumerate(inds):
            #im = cv2.imread(self.imdb.image_path_at(i))
            #if roidb[i]['flipped']:
            #    im = im[:, ::-1, :]
            #im = self.imdb.image_path_at(i)
            _t.tic()
            scores, boxes, feat = self.im_detect(self.net, i, roidb[i]['boxes'], boReturnClassifierScore = False)
            _t.toc()
            #feat = self.net.blobs[self.layer].data
            total_norm += np.sqrt((feat ** 2).sum(axis=1)).sum()
            total_sum += 1.0 * sum(sum(feat)) / len(feat)
            count += feat.shape[0]
            print('{}/{}: avg feature norm: {:.3f}, average value: {:.3f}'.format(i_ + 1, num_images,
                                                           total_norm / count, total_sum / count))

        return self.svm_targetNorm * 1.0 / (total_norm / count)
开发者ID:AllanYiin,项目名称:CNTK,代码行数:27,代码来源:train_svms.py

示例2: get_pos_examples

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
    def get_pos_examples(self):
        counts = self._get_pos_counts()
        for i in range(len(counts)):
            self.trainers[i].alloc_pos(counts[i])

        _t = Timer()
        roidb = self.imdb.roidb
        num_images = len(roidb)
        for i in range(num_images):
            #im = cv2.imread(self.imdb.image_path_at(i))
            #if roidb[i]['flipped']:
            #    im = im[:, ::-1, :]
            #im = self.imdb.image_path_at(i)
            gt_inds = np.where(roidb[i]['gt_classes'] > 0)[0]
            gt_boxes = roidb[i]['boxes'][gt_inds]
            _t.tic()
            scores, boxes, feat = self.im_detect(self.net, i, gt_boxes, self.feature_scale, gt_inds, boReturnClassifierScore = False)
            _t.toc()
            #feat = self.net.blobs[self.layer].data
            for j in range(1, self.imdb.num_classes):
                cls_inds = np.where(roidb[i]['gt_classes'][gt_inds] == j)[0]
                if len(cls_inds) > 0:
                    cls_feat = feat[cls_inds, :]
                    self.trainers[j].append_pos(cls_feat)
            if i % 50 == 0:
                print('get_pos_examples: {:d}/{:d} {:.3f}s' \
                      .format(i + 1, len(roidb), _t.average_time))
开发者ID:AllanYiin,项目名称:CNTK,代码行数:29,代码来源:train_svms.py

示例3: train_model

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
    def train_model(self, max_iters):
        """Network training loop."""
        last_snapshot_iter = -1
        timer = Timer()
        while self.solver.iter < max_iters:
            timer.tic()
	        self.solver.step(1)
            timer.toc()
            if self.solver.iter % (10 * self.solver_param.display) == 0:
                print 'speed: {:.3f}s / iter'.format(timer.average_time)

            if self.solver.iter % self.SNAPSHOT_ITERS == 0:
                last_snapshot_iter = self.solver.iter
                self.snapshot()
开发者ID:PHPerWu,项目名称:tripletloss,代码行数:16,代码来源:train.py

示例4: get_required_obj

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
def get_required_obj(net, args):
    '''
    '''
    img_file = args.img
    im = cv2.imread(img_file)
    
    timer = Timer()
    timer.tic()
    scores, boxes = im_detect(net, im)
    timer.toc()
    print ('Detection took {:.3f}s for '
           '{:d} object proposals').format(timer.total_time, boxes.shape[0])
#    print scores, boxes
    return scores, boxes
开发者ID:deanSunny,项目名称:caffeDL,代码行数:16,代码来源:demo_test1.py

示例5: train_model

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
 def train_model(self, max_iters):
     """Network training loop."""
     last_snapshot_iter = -1
     timer = Timer()
     while self.solver.iter < max_iters:
         timer.tic()
         self.solver.step(1)	    
         print 'fc9_1:',sorted(self.solver.net.params['fc9_1'][0].data[0])[-1]
         #print 'fc9:',sorted(self.solver.net.params['fc9'][0].data[0])[-1]
         #print 'fc7:',sorted(self.solver.net.params['fc7'][0].data[0])[-1]
         #print 'fc6:',sorted(self.solver.net.params['fc6'][0].data[0])[-1]
         #print 'fc9:',(self.solver.net.params['fc9'][0].data[0])[0]
         #print 'fc7:',(self.solver.net.params['fc7'][0].data[0])[0]
         #print 'fc6:',(self.solver.net.params['fc6'][0].data[0])[0]
         #print 'conv5_3:',self.solver.net.params['conv5_3'][0].data[0][0][0]
         #print 'conv5_2:',self.solver.net.params['conv5_2'][0].data[0][0][0]
         #print 'conv5_1:',self.solver.net.params['conv5_1'][0].data[0][0][0]
         #print 'conv4_3:',self.solver.net.params['conv4_3'][0].data[0][0][0]
         #print 'fc9:',self.solver.net.params['fc9'][0].data[0][0]
         timer.toc()
         if self.solver.iter % (10 * self.solver_param.display) == 0:
             print 'speed: {:.3f}s / iter'.format(timer.average_time)          
开发者ID:Ambier,项目名称:tripletloss,代码行数:24,代码来源:train.py

示例6: train_with_hard_negatives

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
    def train_with_hard_negatives(self):
        _t = Timer()
        roidb = self.imdb.roidb
        num_images = len(roidb)

        for epoch in range(0,self.svm_nrEpochs):

            # num_images = 100
            for i in range(num_images):
                print("*** EPOCH = %d, IMAGE = %d *** " % (epoch, i))
                #im = cv2.imread(self.imdb.image_path_at(i))
                #if roidb[i]['flipped']:
                #    im = im[:, ::-1, :]
                #im = self.imdb.image_path_at(i)
                _t.tic()
                scores, boxes, feat = self.im_detect(self.net, i, roidb[i]['boxes'], self.feature_scale)
                _t.toc()
                #feat = self.net.blobs[self.layer].data
                for j in range(1, self.imdb.num_classes):
                    hard_inds = \
                        np.where((scores[:, j] > self.hard_thresh) &
                                 (roidb[i]['gt_overlaps'][:, j].toarray().ravel() <
                                  self.neg_iou_thresh))[0]
                    if len(hard_inds) > 0:
                        hard_feat = feat[hard_inds, :].copy()
                        new_w_b = \
                            self.trainers[j].append_neg_and_retrain(feat=hard_feat)
                        if new_w_b is not None:
                            self.update_net(j, new_w_b[0], new_w_b[1])
                            np.savetxt(self.svmWeightsPath[:-4]   + "_epoch" + str(epoch) + ".txt", self.net.params['cls_score'][0].data)
                            np.savetxt(self.svmBiasPath[:-4]      + "_epoch" + str(epoch) + ".txt", self.net.params['cls_score'][1].data)
                            np.savetxt(self.svmFeatScalePath[:-4] + "_epoch" + str(epoch) + ".txt", [self.feature_scale])

            print(('train_with_hard_negatives: '
                   '{:d}/{:d} {:.3f}s').format(i + 1, len(roidb),
                                               _t.average_time))
开发者ID:AllanYiin,项目名称:CNTK,代码行数:38,代码来源:train_svms.py

示例7: train_model

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
	def train_model(self, max_iters, TRAIN_SNAPSHOT_ITERS=2000):
		print 'test_iter: {}'.format(self.solver_param.test_iter[0])
		last_snapshot_iter = -1
		timer = Timer()
		model_paths = []
		self.solver.net.forward()
		self.solver.test_nets[0].forward()
		while self.solver.iter < max_iters:
			timer.tic()
			self.solver.step(1)
			timer.toc()
			if self.solver.iter % (10 * int(self.solver_param.display)) == 0:
				print 'speed: {:.3f}s / iter'.format(timer.average_time)
			timer.tic()
			self.test_model()
			timer.toc()
			if self.solver.iter % TRAIN_SNAPSHOT_ITERS == 0:
				last_snapshot_iter = self.solver.iter
				model_paths.append(self.snapshot())

		if last_snapshot_iter != self.solver.iter:
			model_paths.append(self.snapshot())

		return model_paths
开发者ID:deanSunny,项目名称:caffeDL,代码行数:26,代码来源:caffe_train_wrapper.py

示例8: build_tsv

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
def build_tsv():
    # Set up the simulator
    sim = MatterSim.Simulator()
    sim.setCameraResolution(WIDTH, HEIGHT)
    sim.setCameraVFOV(math.radians(VFOV))
    sim.setDiscretizedViewingAngles(True)
    sim.init()

    # Set up Caffe resnet
    caffe.set_device(GPU_ID)
    caffe.set_mode_gpu()
    net = caffe.Net(PROTO, MODEL, caffe.TEST)
    net.blobs['data'].reshape(BATCH_SIZE, 3, HEIGHT, WIDTH)

    count = 0
    t_render = Timer()
    t_net = Timer()
    with open(OUTFILE, 'wb') as tsvfile:
        writer = csv.DictWriter(tsvfile, delimiter = '\t', fieldnames = TSV_FIELDNAMES)          

        # Loop all the viewpoints in the simulator
        viewpointIds = load_viewpointids()
        for scanId,viewpointId in viewpointIds:
            t_render.tic()
            # Loop all discretized views from this location
            blobs = []
            features = np.empty([VIEWPOINT_SIZE, FEATURE_SIZE], dtype=np.float32)
            for ix in range(VIEWPOINT_SIZE):
                if ix == 0:
                    sim.newEpisode(scanId, viewpointId, 0, math.radians(-30))
                elif ix % 12 == 0:
                    sim.makeAction(0, 1.0, 1.0)
                else:
                    sim.makeAction(0, 1.0, 0)

                state = sim.getState()
                assert state.viewIndex == ix
                
                # Transform and save generated image
                blobs.append(transform_img(state.rgb))

            t_render.toc()
            t_net.tic()
            # Run as many forward passes as necessary
            assert VIEWPOINT_SIZE % BATCH_SIZE == 0
            forward_passes = VIEWPOINT_SIZE / BATCH_SIZE            
            ix = 0
            for f in range(forward_passes):
                for n in range(BATCH_SIZE):
                    # Copy image blob to the net
                    net.blobs['data'].data[n, :, :, :] = blobs[ix]
                    ix += 1
                # Forward pass
                output = net.forward()
                features[f*BATCH_SIZE:(f+1)*BATCH_SIZE, :] = net.blobs['pool5'].data[:,:,0,0]

            writer.writerow({
                'scanId': scanId,
                'viewpointId': viewpointId,
                'image_w': WIDTH,
                'image_h': HEIGHT,
                'vfov' : VFOV,
                'features': base64.b64encode(features)
            })
            count += 1
            t_net.toc()
            if count % 100 == 0:
                print 'Processed %d / %d viewpoints, %.1fs avg render time, %.1fs avg net time, projected %.1f hours' %\
                  (count,len(viewpointIds), t_render.average_time, t_net.average_time, 
                  (t_render.average_time+t_net.average_time)*len(viewpointIds)/3600)
开发者ID:volkancirik,项目名称:Matterport3DSimulator,代码行数:72,代码来源:precompute_img_features.py

示例9: Fast_RCNN_C_Interface

# 需要导入模块: from timer import Timer [as 别名]
# 或者: from timer.Timer import toc [as 别名]
        sys.exit()
    
    #Create Fast_RCNN C Interface
    fast_rcnn = Fast_RCNN_C_Interface()

    #Get Camera Image
    while True:
        #Read Image
        ret, img = cap.read()
        #Detect Object and Show FPS
        timer = Timer()
        timer.tic()
        # Run Fast-RCNN
        hand5_max_detection = fast_rcnn.detect_object(img)
        print 'HAND5 MAX DETECTION IS: {}'.format(hand5_max_detection)
        timer.toc()
        print 'Detection took {:.3f}s for ONE IMAGE !! '.format(timer.total_time)

        #Draw the Biggest Rectangle
        if not (hand5_max_detection[0] is 0 and hand5_max_detection[1] is 0 
                and hand5_max_detection[2] is 0 and hand5_max_detection[3] is 0):
            #Draw the Rectangle
            cv2.rectangle(img, (hand5_max_detection[0], hand5_max_detection[1]), (hand5_max_detection[2], hand5_max_detection[3]), (0, 255, 0), 3)
 
        #Show Image
        cv2.imshow('Detect Result', img)
        cv2.waitKey(30)

    #Release OpenCV reSource
    if cap is not None or cap.isOpened():
        cap.release()
开发者ID:joeking11829,项目名称:fast-rcnn,代码行数:33,代码来源:call_fast_rcnn.py


注:本文中的timer.Timer.toc方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。