當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.Timer方法代碼示例

本文整理匯總了Python中utils.Timer方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.Timer方法的具體用法?Python utils.Timer怎麽用?Python utils.Timer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.Timer方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def __init__(self, log_dir, logger, enable):
        self.writer = None
        if enable:
            log_dir = str(log_dir)
            try:
                self.writer = importlib.import_module('tensorboardX').SummaryWriter(log_dir)
            except ImportError:
                message = "Warning: TensorboardX visualization is configured to use, but currently not installed on " \
                    "this machine. Please install the package by 'pip install tensorboardx' command or turn " \
                    "off the option in the 'config.json' file."
                logger.warning(message)
        self.step = 0
        self.mode = ''

        self.tb_writer_ftns = [
            'add_scalar', 'add_scalars', 'add_image', 'add_images', 'add_audio',
            'add_text', 'add_histogram', 'add_pr_curve', 'add_embedding'
        ]
        self.tag_mode_exceptions = ['add_histogram', 'add_embedding']
        self.timer = Timer() 
開發者ID:yjlolo,項目名稱:vae-audio,代碼行數:22,代碼來源:visualization.py

示例2: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def __init__(self, screen, pos, images, scroll_period, duration=-1):
        """ Create an animation.

            screen: The screen to which the animation will be drawn
            pos: Position on the screen
            images:
                A list of surface objects to cyclically scroll through
            scroll_period:
                Scrolling period (in ms)
            duration:
                Duration of the animation (in ms). If -1, the
                animation will have indefinite duration.
        """
        self.screen = screen
        self.images = images
        self.pos = pos
        self.img_ptr = 0
        self.active = True
        self.duration = duration

        self.scroll_timer = Timer(scroll_period, self._advance_img)
        self.active_timer = Timer(duration, self._inactivate, True) 
開發者ID:eliben,項目名稱:code-for-blog,代碼行數:24,代碼來源:simpleanimation.py

示例3: evaluate

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def evaluate(gt_labels, pred_labels, metric='pairwise'):
    if isinstance(gt_labels, str) and isinstance(pred_labels, str):
        print('[gt_labels] {}'.format(gt_labels))
        print('[pred_labels] {}'.format(pred_labels))
        gt_labels, gt_lb_set = _read_meta(gt_labels)
        pred_labels, pred_lb_set = _read_meta(pred_labels)

        print('#inst: gt({}) vs pred({})'.format(len(gt_labels),
                                                 len(pred_labels)))
        print('#cls: gt({}) vs pred({})'.format(len(gt_lb_set),
                                                len(pred_lb_set)))

    metric_func = metrics.__dict__[metric]

    with Timer('evaluate with {}{}{}'.format(TextColors.FATAL, metric,
                                             TextColors.ENDC)):
        result = metric_func(gt_labels, pred_labels)
    if isinstance(result, np.float):
        print('{}{}: {:.4f}{}'.format(TextColors.OKGREEN, metric, result,
                                      TextColors.ENDC))
    else:
        ave_pre, ave_rec, fscore = result
        print('{}ave_pre: {:.4f}, ave_rec: {:.4f}, fscore: {:.4f}{}'.format(
            TextColors.OKGREEN, ave_pre, ave_rec, fscore, TextColors.ENDC)) 
開發者ID:yl-1993,項目名稱:learn-to-cluster,代碼行數:26,代碼來源:evaluate.py

示例4: train

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def train(args, data_loader, model, global_stats):
    """Run through one epoch of model training with the provided data loader."""
    # Initialize meters + timers
    train_loss = utils.AverageMeter()
    epoch_time = utils.Timer()

    # Run one epoch
    for idx, ex in enumerate(data_loader):
        train_loss.update(*model.update(ex))

        if idx % args.display_iter == 0:
            logger.info('train: Epoch = %d | iter = %d/%d | ' %
                        (global_stats['epoch'], idx, len(data_loader)) +
                        'loss = %.2f | elapsed time = %.2f (s)' %
                        (train_loss.avg, global_stats['timer'].time()))
            train_loss.reset()

    logger.info('train: Epoch %d done. Time for epoch = %.2f (s)' %
                (global_stats['epoch'], epoch_time.time()))

    # Checkpoint
    if args.checkpoint:
        model.checkpoint(args.model_file + '.checkpoint',
                         global_stats['epoch'] + 1)


# ------------------------------------------------------------------------------
# Validation loops. Includes both "unofficial" and "official" functions that
# use different metrics and implementations.
# ------------------------------------------------------------------------------ 
開發者ID:HKUST-KnowComp,項目名稱:MnemonicReader,代碼行數:32,代碼來源:train.py

示例5: validate_unofficial

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def validate_unofficial(args, data_loader, model, global_stats, mode):
    """Run one full unofficial validation.
    Unofficial = doesn't use SQuAD script.
    """
    eval_time = utils.Timer()
    start_acc = utils.AverageMeter()
    end_acc = utils.AverageMeter()
    exact_match = utils.AverageMeter()

    # Make predictions
    examples = 0
    for ex in data_loader:
        batch_size = ex[0].size(0)
        pred_s, pred_e, _ = model.predict(ex)
        target_s, target_e = ex[-3:-1]

        # We get metrics for independent start/end and joint start/end
        accuracies = eval_accuracies(pred_s, target_s, pred_e, target_e)
        start_acc.update(accuracies[0], batch_size)
        end_acc.update(accuracies[1], batch_size)
        exact_match.update(accuracies[2], batch_size)

        # If getting train accuracies, sample max 10k
        examples += batch_size
        if mode == 'train' and examples >= 1e4:
            break

    logger.info('%s valid unofficial: Epoch = %d | start = %.2f | ' %
                (mode, global_stats['epoch'], start_acc.avg) +
                'end = %.2f | exact = %.2f | examples = %d | ' %
                (end_acc.avg, exact_match.avg, examples) +
                'valid time = %.2f (s)' % eval_time.time())

    return {'exact_match': exact_match.avg} 
開發者ID:HKUST-KnowComp,項目名稱:MnemonicReader,代碼行數:36,代碼來源:train.py

示例6: validate_official

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def validate_official(args, data_loader, model, global_stats,
                      offsets, texts, answers):
    """Run one full official validation. Uses exact spans and same
    exact match/F1 score computation as in the SQuAD script.

    Extra arguments:
        offsets: The character start/end indices for the tokens in each context.
        texts: Map of qid --> raw text of examples context (matches offsets).
        answers: Map of qid --> list of accepted answers.
    """
    eval_time = utils.Timer()
    f1 = utils.AverageMeter()
    exact_match = utils.AverageMeter()

    # Run through examples
    examples = 0
    for ex in data_loader:
        ex_id, batch_size = ex[-1], ex[0].size(0)
        pred_s, pred_e, _ = model.predict(ex)

        for i in range(batch_size):
            s_offset = offsets[ex_id[i]][pred_s[i][0]][0]
            e_offset = offsets[ex_id[i]][pred_e[i][0]][1]
            prediction = texts[ex_id[i]][s_offset:e_offset]

            # Compute metrics
            ground_truths = answers[ex_id[i]]
            exact_match.update(utils.metric_max_over_ground_truths(
                utils.exact_match_score, prediction, ground_truths))
            f1.update(utils.metric_max_over_ground_truths(
                utils.f1_score, prediction, ground_truths))

        examples += batch_size

    logger.info('dev valid official: Epoch = %d | EM = %.2f | ' %
                (global_stats['epoch'], exact_match.avg * 100) +
                'F1 = %.2f | examples = %d | valid time = %.2f (s)' %
                (f1.avg * 100, examples, eval_time.time()))

    return {'exact_match': exact_match.avg * 100, 'f1': f1.avg * 100} 
開發者ID:HKUST-KnowComp,項目名稱:MnemonicReader,代碼行數:42,代碼來源:train.py

示例7: __init__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def __init__(self, quit_callback=None):
        super(HappyMacStatusBarApp, self).__init__("", quit_button=None)
        self.quit_button = None
        self.quit_callback = quit_callback
        self.menu = [ ]
        self.loading()
        self.menu._menu.setDelegate_(self)
        self.start = time.time()
        self.need_menu = False
        utils.set_menu_open(False)
        utils.Timer(1.0, self.update).start()
        log.log("Started HappyMac %s" % version_manager.last_version()) 
開發者ID:laffra,項目名稱:happymac,代碼行數:14,代碼來源:main.py

示例8: aro

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def aro(feats, knn, th_sim, num_process, **kwargs):
    """
    Master function. Takes the descriptor matrix and returns clusters.
    n_neighbors are the number of nearest neighbors considered and thresh
    is the clustering distance threshold
    """
    with Timer('[aro] search knn with pyflann'):
        nbrs, _ = build_index(feats, n_neighbors=knn)
    dists = calculate_symmetric_dist(nbrs, num_process)
    print('symmetric dist:', dists.max(), dists.min(), dists.mean())
    clusters = aro_clustering(nbrs, dists, 1. - th_sim)
    labels_ = clusters2labels(clusters, feats.shape[0])
    return labels_ 
開發者ID:yl-1993,項目名稱:learn-to-cluster,代碼行數:15,代碼來源:aro.py

示例9: train

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def train(self):
        self.train_timer   =Timer()
        self.losses_timer  =Timer()
        self.tvd_timer     =Timer()
        self.scatter_timer =Timer()

        self.log_step=50
        self.max_step=50001
        #self.max_step=501
        for step in trange(self.max_step):

            if step % self.log_step == 0:
                for gan in self.gans:
                    self.losses_timer.on()
                    gan.record_losses(self.sess)
                    self.losses_timer.off()

                    self.tvd_timer.on()
                    gan.record_tvd(self.sess)
                    self.tvd_timer.off()

            if step % (10*self.log_step) == 0:
                for gan in self.gans:
                    self.scatter_timer.on()
                    gan.record_scatter(self.sess)

                    #DEBUG: reassure me nothing changes during optimization
                    #self.data_scatterplot()

                    self.scatter_timer.off()

            if step % (5000) == 0:
                self.saver.save(self.sess,self.save_model_name,step)

            self.train_timer.on()
            self.sess.run(self.train_op)
            self.train_timer.off()


        print("Timers:")
        print(self.train_timer)
        print(self.losses_timer)
        print(self.tvd_timer)
        print(self.scatter_timer) 
開發者ID:mkocaoglu,項目名稱:CausalGAN,代碼行數:46,代碼來源:trainer.py

示例10: extract_supervised_data

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def extract_supervised_data(self, demo_file, noisy=False):
        """
            Load the states and actions of the demos into memory.
            Args:
                demo_file: list of demo files where each file contains expert's states and actions of one task.
        """
        demos = extract_demo_dict(demo_file)
        # We don't need the whole dataset of simulated pushing.
        if FLAGS.experiment == 'sim_push':
            for key in demos.keys():
                demos[key]['demoX'] = demos[key]['demoX'][6:-6, :, :].copy()
                demos[key]['demoU'] = demos[key]['demoU'][6:-6, :, :].copy()
        n_folders = len(demos.keys())
        N_demos = np.sum(demo['demoX'].shape[0] for i, demo in demos.iteritems())
        self.state_idx = range(demos[0]['demoX'].shape[-1])
        self._dU = demos[0]['demoU'].shape[-1]
        print "Number of demos: %d" % N_demos
        idx = np.arange(n_folders)
        if FLAGS.train:
            n_val = FLAGS.val_set_size # number of demos for testing
            if not hasattr(self, 'train_idx'):
                if n_val != 0:
                    if not FLAGS.shuffle_val:
                        self.val_idx = idx[-n_val:]
                        self.train_idx = idx[:-n_val]
                    else:
                        self.val_idx = np.sort(np.random.choice(idx, size=n_val, replace=False))
                        mask = np.array([(i in self.val_idx) for i in idx])
                        self.train_idx = np.sort(idx[~mask])
                else:
                    self.train_idx = idx
                    self.val_idx = []
            # Normalize the states if it's training.
            with Timer('Normalizing states'):
                if self.scale is None or self.bias is None:
                    states = np.vstack((demos[i]['demoX'] for i in self.train_idx)) # hardcoded here to solve the memory issue
                    states = states.reshape(-1, len(self.state_idx))
                    # 1e-3 to avoid infs if some state dimensions don't change in the
                    # first batch of samples
                    self.scale = np.diag(
                        1.0 / np.maximum(np.std(states, axis=0), 1e-3))
                    self.bias = - np.mean(
                        states.dot(self.scale), axis=0)
                    # Save the scale and bias.
                    with open('data/scale_and_bias_%s.pkl' % FLAGS.experiment, 'wb') as f:
                        pickle.dump({'scale': self.scale, 'bias': self.bias}, f)
                for key in demos.keys():
                    demos[key]['demoX'] = demos[key]['demoX'].reshape(-1, len(self.state_idx))
                    demos[key]['demoX'] = demos[key]['demoX'].dot(self.scale) + self.bias
                    demos[key]['demoX'] = demos[key]['demoX'].reshape(-1, self.T, len(self.state_idx))
        if not noisy:
            self.demos = demos
        else:
            self.noisy_demos = demos 
開發者ID:tianheyu927,項目名稱:mil,代碼行數:56,代碼來源:data_generator.py

示例11: init_network

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import Timer [as 別名]
def init_network(self, graph, input_tensors=None, restore_iter=0, prefix='Training_'):
        """ Helper method to initialize the tf networks used """
        with graph.as_default():
            with Timer('building TF network'):
                result = self.construct_model(input_tensors=input_tensors, prefix=prefix, dim_input=self._dO, dim_output=self._dU,
                                          network_config=self.network_params)
            outputas, outputbs, test_output, lossesa, lossesb, final_eept_lossesb, flat_img_inputb, gradients = result
            if 'Testing' in prefix:
                self.obs_tensor = self.obsa
                self.state_tensor = self.statea
                self.test_act_op = test_output
                self.image_op = flat_img_inputb

            trainable_vars = tf.trainable_variables()
            total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(self.meta_batch_size)
            total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(self.meta_batch_size) for j in range(self.num_updates)]
            total_final_eept_losses2 = [tf.reduce_sum(final_eept_lossesb[j]) / tf.to_float(self.meta_batch_size) for j in range(self.num_updates)]

            if 'Training' in prefix:
                self.total_loss1 = total_loss1
                self.total_losses2 = total_losses2
                self.total_final_eept_losses2 = total_final_eept_losses2
            elif 'Validation' in prefix:
                self.val_total_loss1 = total_loss1
                self.val_total_losses2 = total_losses2
                self.val_total_final_eept_losses2 = total_final_eept_losses2

            if 'Training' in prefix:
                self.train_op = tf.train.AdamOptimizer(self.meta_lr).minimize(self.total_losses2[self.num_updates - 1])
                # Add summaries
                summ = [tf.summary.scalar(prefix + 'Pre-update_loss', self.total_loss1)]
                for j in xrange(self.num_updates):
                    summ.append(tf.summary.scalar(prefix + 'Post-update_loss_step_%d' % j, self.total_losses2[j]))
                    summ.append(tf.summary.scalar(prefix + 'Post-update_final_eept_loss_step_%d' % j, self.total_final_eept_losses2[j]))
                    for k in xrange(len(self.sorted_weight_keys)):
                        summ.append(tf.summary.histogram('Gradient_of_%s_step_%d' % (self.sorted_weight_keys[k], j), gradients[j][k]))
                self.train_summ_op = tf.summary.merge(summ)
            elif 'Validation' in prefix:
                # Add summaries
                summ = [tf.summary.scalar(prefix + 'Pre-update_loss', self.val_total_loss1)]
                for j in xrange(self.num_updates):
                    summ.append(tf.summary.scalar(prefix + 'Post-update_loss_step_%d' % j, self.val_total_losses2[j]))
                    summ.append(tf.summary.scalar(prefix + 'Post-update_final_eept_loss_step_%d' % j, self.val_total_final_eept_losses2[j]))
                self.val_summ_op = tf.summary.merge(summ) 
開發者ID:tianheyu927,項目名稱:mil,代碼行數:46,代碼來源:mil.py


注:本文中的utils.Timer方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。