當前位置: 首頁>>代碼示例>>Python>>正文


Python eval.evaluate方法代碼示例

本文整理匯總了Python中eval.evaluate方法的典型用法代碼示例。如果您正苦於以下問題:Python eval.evaluate方法的具體用法?Python eval.evaluate怎麽用?Python eval.evaluate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在eval的用法示例。


在下文中一共展示了eval.evaluate方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: compute_validation_map

# 需要導入模塊: import eval [as 別名]
# 或者: from eval import evaluate [as 別名]
def compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):
    with torch.no_grad():
        yolact_net.eval()
        
        start = time.time()
        print()
        print("Computing validation mAP (this may take a while)...", flush=True)
        val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)
        end = time.time()

        if log is not None:
            log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)

        yolact_net.train() 
開發者ID:dbolya,項目名稱:yolact,代碼行數:16,代碼來源:train.py

示例2: train_and_eval

# 需要導入模塊: import eval [as 別名]
# 或者: from eval import evaluate [as 別名]
def train_and_eval(net, train_loader, val_loader, optimizer, loss_fn, metrics, params, model_dir, restore=None):
    """
    Train and evaluate every epoch of a model.
    net: The model. 
    train/val loader: The data loaders
    params: The parameters parsed from JSON file 
    restore: if there is a checkpoint restore from that point. 
    """
    best_val_acc = 0.0 
    if restore is not None:
        restore_file = os.path.join(args.param_path, args.resume_path + '_pth.tar')
        logging.info("Loaded checkpoints from:{}".format(restore_file))
        utils.load_checkpoint(restore_file, net, optimizer)

    for ep in range(params.num_epochs):
        logging.info("Running epoch: {}/{}".format(ep+1, params.num_epochs))

        # train one epoch 
        train(net, train_loader, loss_fn, params, metrics, optimizer)

        val_metrics = evaluate(net, val_loader, loss_fn, params, metrics)

        val_acc = val_metrics['accuracy']
        isbest = val_acc >= best_val_acc 

        utils.save_checkpoint({"epoch":ep, "state_dict":net.state_dict(), "optimizer":optimizer.state_dict()}, 
        isBest=isbest, ckpt_dir=model_dir)
    
        if isbest:
            # if the accuracy is great  save it to best.json 
            logging.info("New best accuracy found!")
            best_val_acc = val_acc 
            best_json_path = os.path.join(model_dir, "best_model_params.json")
            utils.save_dict_to_json(val_metrics, best_json_path)
        
        last_acc_path = os.path.join(model_dir, 'last_acc_metrics.json')
        utils.save_dict_to_json(val_metrics, last_acc_path) 
開發者ID:aicaffeinelife,項目名稱:Pytorch-STN,代碼行數:39,代碼來源:train.py

示例3: evaluate_batch

# 需要導入模塊: import eval [as 別名]
# 或者: from eval import evaluate [as 別名]
def evaluate_batch(config, model_para, model_qg, sess_para, sess_qg, num_batches, eval_file, iterator,
                   id2word, map_to_orig, evaluate_func=evaluate):
    answer_dict = {}
    losses = []
    next_element = iterator.get_next()
    for _ in tqdm(range(1, num_batches + 1)):
        para, para_unk, ques, labels, pos_tags, ner_tags, qa_id = sess_para.run(next_element)
        para_emb = sess_para.run(model_para.bert_emb, feed_dict={model_para.input_ids: para_unk})
        loss, symbols, probs = sess_qg.run([model_qg.loss, model_qg.symbols, model_qg.probs],
                                           feed_dict={
                                               model_qg.para: para, model_qg.bert_para: para_emb,
                                               model_qg.que: ques, model_qg.labels: labels,
                                               model_qg.pos_tags: pos_tags,
                                               model_qg.ner_tags: ner_tags, model_qg.qa_id: qa_id,
                                               model_qg.temperature: config.temperature,
                                               model_qg.diverse_rate: config.diverse_rate
                                           })
        answer_dict_ = convert_tokens_seq(eval_file, qa_id, symbols, probs, id2word, map_to_orig)
        for key in answer_dict_:
            if key not in answer_dict:
                answer_dict[key] = answer_dict_[key]
        losses.append(loss)
    loss = np.mean(losses)
    metrics = evaluate_func(eval_file, answer_dict)
    metrics["loss"] = loss
    return metrics 
開發者ID:ZhangShiyue,項目名稱:QGforQA,代碼行數:28,代碼來源:main.py

示例4: main

# 需要導入模塊: import eval [as 別名]
# 或者: from eval import evaluate [as 別名]
def main(_):
  tf.logging.set_verbosity(tf.logging.INFO)
  assert FLAGS.checkpoint_dir, "Flag 'checkpoint_dir' must be set."
  assert FLAGS.eval_dir, "Flag 'eval_dir' must be set."

  if FLAGS.config_file:
    for config_file in FLAGS.config_file:
      gin.parse_config_file(config_file)
  if FLAGS.params:
    gin.parse_config(FLAGS.params)

  eval_.evaluate(FLAGS.checkpoint_dir, FLAGS.eval_dir) 
開發者ID:generalized-iou,項目名稱:g-tensorflow-models,代碼行數:14,代碼來源:run_eval.py

示例5: run_evaluate

# 需要導入模塊: import eval [as 別名]
# 或者: from eval import evaluate [as 別名]
def run_evaluate(args, model, eval_features, device, eval_examples, tokenizer, best_dev_score=None):
    logger.info("***** Running predictions *****")
    logger.info("  Num orig examples = %d", len(eval_examples))
    logger.info("  Num split examples = %d", len(eval_features))
    logger.info("  Batch size = %d", args.predict_batch_size)

    all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
    all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)

    eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
    if args.local_rank == -1:
        eval_sampler = SequentialSampler(eval_data)
    else:
        eval_sampler = DistributedSampler(eval_data)
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)

    model.eval()
    all_results = []
    logger.info("Start evaluating")
    for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
        if len(all_results) % 1000 == 0:
            logger.info("Processing example: %d" % (len(all_results)))
        input_ids = input_ids.to(device)
        input_mask = input_mask.to(device)
        segment_ids = segment_ids.to(device)
        with torch.no_grad():
            batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
        for i, example_index in enumerate(example_indices):
            start_logits = batch_start_logits[i].detach().cpu().tolist()
            end_logits = batch_end_logits[i].detach().cpu().tolist()
            eval_feature = eval_features[example_index.item()]
            unique_id = int(eval_feature.unique_id)
            all_results.append(RawResult(unique_id=unique_id,
                                         start_logits=start_logits,
                                         end_logits=end_logits))
    output_prediction_file = os.path.join(args.output_dir, "predictions.json")
    output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
    predict, nbest = write_predictions(eval_examples, eval_features, all_results,
                                       args.n_best_size, args.max_answer_length,
                                       args.do_lower_case, output_prediction_file,
                                       output_nbest_file, args.verbose_logging)
    with open(args.predict_file) as dataset_file:
        dataset_json = json.load(dataset_file)
        dataset = dataset_json['data']
    dev_score = evaluate(dataset, predict, tokenizer)['f1']
    logger.info('Dev score : {}'.format(dev_score))

    if best_dev_score is not None and dev_score > best_dev_score:
        logger.info('Saving model with dev score: {}'.format(dev_score))
        best_dev_score = dev_score
        write_to_file(predict, nbest, output_prediction_file, output_nbest_file)
        torch.save(model.state_dict(), args.finetuned_checkpoint)
    else:
        write_to_file(predict, nbest, output_prediction_file, output_nbest_file)
    return best_dev_score 
開發者ID:eva-n27,項目名稱:BERT-for-Chinese-Question-Answering,代碼行數:59,代碼來源:run_squad.py


注:本文中的eval.evaluate方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。