当前位置: 首页>>代码示例>>Python>>正文


Python Timer.reset方法代码示例

本文整理汇总了Python中utils.timer.Timer.reset方法的典型用法代码示例。如果您正苦于以下问题:Python Timer.reset方法的具体用法?Python Timer.reset怎么用?Python Timer.reset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils.timer.Timer的用法示例。


在下文中一共展示了Timer.reset方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train_whole_model

# 需要导入模块: from utils.timer import Timer [as 别名]
# 或者: from utils.timer.Timer import reset [as 别名]
    def train_whole_model(self, tester=None):
        '''
        test the performance using all the features
        may be memory consuming.
        '''
        self.comm.barrier()
        mpi.rootprint('*'*46)
        mpi.rootprint('*'*15+'whole featureset'+'*'*15)
        mpi.rootprint('*'*46)

        if tester is not None:
            # normalize the test data with the stats of the training data
            tester.normalize_data(self.mLocal, self.stdLocal)

        timer = Timer()
        timer.reset()
        if self.maxGraftDim != self.nMetabins*self.nCodes:
            mpi.rootprint('Please initialize with maxGraftDim=nMetabins*nCodes')
            return
        self.nSelFeats = 0
        self.isSelected[:] = False
        mpi.rootprint('Generating Features...')
        for code in range(self.nCodes):
            for metabin in range(self.nMetabins):
                self.append_feature(code, metabin)
                if tester is not None:
                    tester.append_feature(code, metabin)
        mpi.rootprint('Feature generation took {} secs'.format(timer.lap()))
        mpi.rootprint('Training...')
        loss = self.retrain_model(None)
        mpi.rootprint('Training took {} secs'.format(timer.lap()))
        mpi.rootprint('Training accuracy: {}'.format(self.compute_current_accuracy()))
        if tester is not None:
            mpi.rootprint('Current Testing accuracy: {}'.format(tester.compute_test_accuracy(self.weights, self.b)))
开发者ID:cc13ny,项目名称:galatea,代码行数:36,代码来源:grafting_mb.py

示例2: randomselecttest

# 需要导入模块: from utils.timer import Timer [as 别名]
# 或者: from utils.timer.Timer import reset [as 别名]
    def randomselecttest(self, tester=None, random_iterations=1, should_normalize = True):
        '''
        test the performance of random selection
        modified by Ian Goodfellow to use seeded random number generation so
        that results are replicable
        '''
        self.comm.barrier()
        mpi.rootprint('*'*46)
        mpi.rootprint('*'*15+'random selection'+'*'*15)
        mpi.rootprint('*'*46)

        trainaccu = np.zeros(random_iterations)
        testaccu = np.zeros(random_iterations)

        rng = np.random.RandomState([1,2,3])

        if tester is not None:
            # normalize the test data with the stats of the training data
            tester.normalize_data(self.mLocal, self.stdLocal, sabotage = not should_normalize)

        itertimer = Timer()
        for iter in range(random_iterations):
            itertimer.reset()
            mpi.rootprint('*'*15+'Round {}'.format(iter)+'*'*15)
            if self.rank == 0:
                #decide which features we are going to select
                allidx = np.array(range(self.nCodes*self.nMetabins),dtype=np.int)
                rng.shuffle(allidx)
                codeidlist = allidx / self.nMetabins
                metabinidlist = allidx % self.nMetabins
            else:
                codeidlist = None
                metabinidlist = None
            codeidlist = self.comm.bcast(codeidlist, root=0)
            metabinidlist = self.comm.bcast(metabinidlist, root=0)

            self.append_multiple_features(codeidlist[:self.maxGraftDim], metabinidlist[:self.maxGraftDim])
            mpi.rootprint('Feature selection took {} secs'.format(itertimer.lap()))
            mpi.rootprint('Training...')
            loss = self.retrain_model(None)
            trainaccu[iter] = self.compute_current_accuracy()
            mpi.rootprint('Training took {} secs'.format(itertimer.lap()))
            mpi.rootprint('Current training accuracy: {}'.format(trainaccu[iter]))
            if tester is not None:
                tester.append_multiple_features(codeidlist[:self.maxGraftDim], metabinidlist[:self.maxGraftDim])
                testaccu[iter] = tester.compute_test_accuracy(self.weights, self.b)
                mpi.rootprint('Current Testing accuracy: {}'.format(testaccu[iter]))
            mpi.rootprint('Testing selection took {} secs'.format(itertimer.lap()))
        self.safebarrier()

        mpi.rootprint('*'*15+'Summary'+'*'*15)
        mpi.rootprint('Training accuracy: {} +- {}'.format(np.mean(trainaccu),np.std(trainaccu)))
        mpi.rootprint('Testing accuracy: {} +- {}'.format(np.mean(testaccu),np.std(testaccu)))
开发者ID:cc13ny,项目名称:galatea,代码行数:55,代码来源:grafting_mb.py

示例3: TrainingStats

# 需要导入模块: from utils.timer import Timer [as 别名]
# 或者: from utils.timer.Timer import reset [as 别名]
class TrainingStats(object):
    """Track vital training statistics."""

    def __init__(self, model):
        # Window size for smoothing tracked values (with median filtering)
        self.WIN_SZ = 20
        # Output logging period in SGD iterations
        self.LOG_PERIOD = 20
        self.smoothed_losses_and_metrics = {
            key: SmoothedValue(self.WIN_SZ)
            for key in model.losses + model.metrics
        }
        self.losses_and_metrics = {
            key: 0
            for key in model.losses + model.metrics
        }
        self.smoothed_total_loss = SmoothedValue(self.WIN_SZ)
        self.smoothed_mb_qsize = SmoothedValue(self.WIN_SZ)
        self.iter_total_loss = np.nan
        self.iter_timer = Timer()
        self.model = model

    def IterTic(self):
        self.iter_timer.tic()

    def IterToc(self):
        return self.iter_timer.toc(average=False)

    def ResetIterTimer(self):
        self.iter_timer.reset()

    def UpdateIterStats(self):
        """Update tracked iteration statistics."""
        for k in self.losses_and_metrics.keys():
            if k in self.model.losses:
                self.losses_and_metrics[k] = nu.sum_multi_gpu_blob(k)
            else:
                self.losses_and_metrics[k] = nu.average_multi_gpu_blob(k)
        for k, v in self.smoothed_losses_and_metrics.items():
            v.AddValue(self.losses_and_metrics[k])
        self.iter_total_loss = np.sum(
            np.array([self.losses_and_metrics[k] for k in self.model.losses])
        )
        self.smoothed_total_loss.AddValue(self.iter_total_loss)
        self.smoothed_mb_qsize.AddValue(
            self.model.roi_data_loader._minibatch_queue.qsize()
        )

    def LogIterStats(self, cur_iter, lr):
        """Log the tracked statistics."""
        if (cur_iter % self.LOG_PERIOD == 0 or
                cur_iter == cfg.SOLVER.MAX_ITER - 1):
            eta_seconds = self.iter_timer.average_time * (
                cfg.SOLVER.MAX_ITER - cur_iter
            )
            eta = str(datetime.timedelta(seconds=int(eta_seconds)))
            mem_stats = c2_py_utils.GetGPUMemoryUsageStats()
            mem_usage = np.max(mem_stats['max_by_gpu'][:cfg.NUM_GPUS])
            stats = dict(
                iter=cur_iter,
                lr=float(lr),
                time=self.iter_timer.average_time,
                loss=self.smoothed_total_loss.GetMedianValue(),
                eta=eta,
                mb_qsize=int(
                    np.round(self.smoothed_mb_qsize.GetMedianValue())
                ),
                mem=int(np.ceil(mem_usage / 1024 / 1024))
            )
            for k, v in self.smoothed_losses_and_metrics.items():
                stats[k] = v.GetMedianValue()
            log_json_stats(stats)
开发者ID:ArsenLuca,项目名称:Detectron,代码行数:74,代码来源:train_net.py

示例4: TrainingStats

# 需要导入模块: from utils.timer import Timer [as 别名]
# 或者: from utils.timer.Timer import reset [as 别名]
class TrainingStats(object):
    """Track vital training statistics."""

    def __init__(self, metrics, losses,
                 solver_max_iters):
        self.solver_max_iters = solver_max_iters
        # Window size for smoothing tracked values (with median filtering)
        self.win_sz = 20
        # Output logging period in SGD iterations
        self.log_period = 20
        self.smoothed_losses_and_metrics = {
            key: SmoothedValue(self.win_sz)
            for key in losses + metrics
        }
        self.losses_and_metrics = {
            key: 0
            for key in losses + metrics
        }
        self.smoothed_total_loss = SmoothedValue(self.win_sz)
        self.smoothed_mb_qsize = SmoothedValue(self.win_sz)
        self.iter_total_loss = np.nan
        self.iter_timer = Timer()
        self.metrics = metrics
        self.losses = losses

    def IterTic(self):
        self.iter_timer.tic()

    def IterToc(self):
        return self.iter_timer.toc(average=False)

    def ResetIterTimer(self):
        self.iter_timer.reset()

    def UpdateIterStats(self,losses_dict, metrics_dict):
        """Update tracked iteration statistics."""
        for k in self.losses_and_metrics.keys():
            if k in self.losses: # if loss
                self.losses_and_metrics[k] = losses_dict[k]
            else: # if metric
                self.losses_and_metrics[k] = metrics_dict[k]

        for k, v in self.smoothed_losses_and_metrics.items():
            v.AddValue(self.losses_and_metrics[k])
        #import pdb; pdb.set_trace()
        self.iter_total_loss = np.sum(
            np.array([self.losses_and_metrics[k] for k in self.losses])
        )
        self.smoothed_total_loss.AddValue(self.iter_total_loss)
        self.smoothed_mb_qsize.AddValue(
            #self.model.roi_data_loader._minibatch_queue.qsize()
            64
        )

    def LogIterStats(self, cur_iter, lr):
        """Log the tracked statistics."""
        if (cur_iter % self.log_period == 0 or
                cur_iter == self.solver_max_iters - 1):
            stats = self.GetStats(cur_iter, lr)
            log_json_stats(stats)

    def GetStats(self, cur_iter, lr):
        eta_seconds = self.iter_timer.average_time * (
            self.solver_max_iters - cur_iter
        )
        eta = str(datetime.timedelta(seconds=int(eta_seconds)))
        #mem_stats = c2_py_utils.GetGPUMemoryUsageStats()
        #mem_usage = np.max(mem_stats['max_by_gpu'][:cfg.NUM_GPUS])
        stats = dict(
            iter=cur_iter,
            lr="{:.6f}".format(float(lr)),
            time="{:.6f}".format(self.iter_timer.average_time),
            loss="{:.6f}".format(self.smoothed_total_loss.GetMedianValue()),
            eta=eta,
            #mb_qsize=int(np.round(self.smoothed_mb_qsize.GetMedianValue())),
            #mem=int(np.ceil(mem_usage / 1024 / 1024))
        )
        for k, v in self.smoothed_losses_and_metrics.items():
            stats[k] = "{:.6f}".format(v.GetMedianValue())
        return stats
开发者ID:ericeiffel,项目名称:detectorch,代码行数:82,代码来源:training_stats.py

示例5: graft

# 需要导入模块: from utils.timer import Timer [as 别名]
# 或者: from utils.timer.Timer import reset [as 别名]
    def graft(self, dump_every = 0, \
              dump_file = None, \
              nActiveSet = None, \
              tester = None, \
              test_every = 10, \
              samplePerRun = 1, \
              fromDumpFile = None \
             ):
        '''
        the main grafting algorithm
        ==Parameters==
        dump_every: the frequency to dump the current result. 0 if you do not want to dump
        dump_file: dump file name.
        nActiveSet: when retraining, the number of features in the active set.
            pass None for full retraining (may be slow!)
            pass a positive number to select the last features
            pass a negative number to select features via their gradient values
                (recommended, much better than other approaches)
            pass 0 for boosting
        tester: the grafterMPI class that hosts the test data
        test_every: the frequency to compute test accuracy
        samplePerRun: in each feature selection run, how many features (in proportions)
            we should sample to select feature from. Pass 1 to enumerate all features.
        fromDumpFile: restore from dump file (not implemented for the mb version yet)
        '''
        self.comm.barrier()
        mpi.rootprint('*'*38)
        mpi.rootprint('*'*15+'grafting'+'*'*15)
        mpi.rootprint('*'*38)

        if True:
            mpi.rootprint('Number of data: {}'.format(self.nData))
            mpi.rootprint('Number of labels: {}'.format(self.nLabel))
            mpi.rootprint('Number of codes: {}'.format(self.nCodes))
            mpi.rootprint('Bins: {0}x{0}'.format(self.nBinsPerEdge))
            mpi.rootprint('Total pooling areas: {}'.format(self.nMetabins))
            mpi.rootprint('Total features: {}'.format(self.nMetabins*self.nCodes))
            mpi.rootprint('Number of features to select: {}'.format(self.maxGraftDim))
        mpi.rootprint('Graft Settings:')
        mpi.rootprint('dump_every = {}\nnActiveSet={}\ntest_every={}\nsamplePerRun={}'.format(\
                            dump_every, nActiveSet, test_every, samplePerRun))
        self.comm.barrier()

        if tester is not None:
            # normalize the test data with the stats of the training data
            tester.normalize_data(self.mLocal, self.stdLocal)
        if fromDumpFile is not None:
            self.restore_from_dump_file(fromDumpFile, tester)

        old_loss = 1e10
        timer = Timer()
        itertimer = Timer()
        for T in range(self.nSelFeats, self.maxGraftDim):
            itertimer.reset()
            mpi.rootprint('*'*15+'Round {}'.format(T)+'*'*15)
            score, codeid, metabinid = self.select_new_feature_by_grad(samplePerRun)
            mpi.rootprint('Selected Feature [code: {}, metabin: {}], score {}'.format(codeid, metabinid, score))
            # add this feature to the selected features
            self.append_feature(codeid, metabinid)
            mpi.rootprint('Number of Features: {}'.format(self.nSelFeats))
            mpi.rootprint('Feature selection took {} secs'.format(itertimer.lap()))
            mpi.rootprint('Retraining the model...')
            loss = self.retrain_model(nActiveSet, samplePerRun)
            mpi.rootprint('Total loss reduction {}/{}={}'.format(loss, old_loss, loss/old_loss))
            mpi.rootprint('Current training accuracy: {}'.format(self.compute_current_accuracy()))
            mpi.rootprint('Model retraining took {} secs'.format(itertimer.lap()))
            old_loss = loss

            if tester is not None:
                tester.append_feature(codeid, metabinid)
                if (T+1) % test_every == 0:
                    # print test accuracy
                    test_accuracy = tester.compute_test_accuracy(self.weights, self.b)
                    mpi.rootprint('Current Testing accuracy: {}'.format(test_accuracy))

            self.safebarrier()
            mpi.rootprint('This round took {} secs, total {} secs'.format(timer.lap(), timer.total()))
            mpi.rootprint('ETA {} secs.'.format(timer.total() * (self.maxGraftDim-T)/(T+1.0e-5)))

            if dump_every > 0 and (T+1) % dump_every == 0 and dump_file is not None:
                mpi.rootprint('*'*15 + 'Dumping' + '*'*15)
                self.dump_current_state(dump_file + str(T)+'.mat')

        mpi.rootprint('*'*15+'Finalizing'.format(T)+'*'*15)
        if dump_file is not None:
            self.dump_current_state(dump_file + 'final.mat')
开发者ID:cc13ny,项目名称:galatea,代码行数:88,代码来源:grafting_mb.py


注:本文中的utils.timer.Timer.reset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。