当前位置: 首页>>代码示例>>Python>>正文


Python eval.evaluate方法代码示例

本文整理汇总了Python中eval.evaluate方法的典型用法代码示例。如果您正苦于以下问题:Python eval.evaluate方法的具体用法?Python eval.evaluate怎么用?Python eval.evaluate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在eval的用法示例。


在下文中一共展示了eval.evaluate方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: compute_validation_map

# 需要导入模块: import eval [as 别名]
# 或者: from eval import evaluate [as 别名]
def compute_validation_map(epoch, iteration, yolact_net, dataset, log:Log=None):
    with torch.no_grad():
        yolact_net.eval()
        
        start = time.time()
        print()
        print("Computing validation mAP (this may take a while)...", flush=True)
        val_info = eval_script.evaluate(yolact_net, dataset, train_mode=True)
        end = time.time()

        if log is not None:
            log.log('val', val_info, elapsed=(end - start), epoch=epoch, iter=iteration)

        yolact_net.train() 
开发者ID:dbolya,项目名称:yolact,代码行数:16,代码来源:train.py

示例2: train_and_eval

# 需要导入模块: import eval [as 别名]
# 或者: from eval import evaluate [as 别名]
def train_and_eval(net, train_loader, val_loader, optimizer, loss_fn, metrics, params, model_dir, restore=None):
    """
    Train and evaluate every epoch of a model.
    net: The model. 
    train/val loader: The data loaders
    params: The parameters parsed from JSON file 
    restore: if there is a checkpoint restore from that point. 
    """
    best_val_acc = 0.0 
    if restore is not None:
        restore_file = os.path.join(args.param_path, args.resume_path + '_pth.tar')
        logging.info("Loaded checkpoints from:{}".format(restore_file))
        utils.load_checkpoint(restore_file, net, optimizer)

    for ep in range(params.num_epochs):
        logging.info("Running epoch: {}/{}".format(ep+1, params.num_epochs))

        # train one epoch 
        train(net, train_loader, loss_fn, params, metrics, optimizer)

        val_metrics = evaluate(net, val_loader, loss_fn, params, metrics)

        val_acc = val_metrics['accuracy']
        isbest = val_acc >= best_val_acc 

        utils.save_checkpoint({"epoch":ep, "state_dict":net.state_dict(), "optimizer":optimizer.state_dict()}, 
        isBest=isbest, ckpt_dir=model_dir)
    
        if isbest:
            # if the accuracy is great  save it to best.json 
            logging.info("New best accuracy found!")
            best_val_acc = val_acc 
            best_json_path = os.path.join(model_dir, "best_model_params.json")
            utils.save_dict_to_json(val_metrics, best_json_path)
        
        last_acc_path = os.path.join(model_dir, 'last_acc_metrics.json')
        utils.save_dict_to_json(val_metrics, last_acc_path) 
开发者ID:aicaffeinelife,项目名称:Pytorch-STN,代码行数:39,代码来源:train.py

示例3: evaluate_batch

# 需要导入模块: import eval [as 别名]
# 或者: from eval import evaluate [as 别名]
def evaluate_batch(config, model_para, model_qg, sess_para, sess_qg, num_batches, eval_file, iterator,
                   id2word, map_to_orig, evaluate_func=evaluate):
    answer_dict = {}
    losses = []
    next_element = iterator.get_next()
    for _ in tqdm(range(1, num_batches + 1)):
        para, para_unk, ques, labels, pos_tags, ner_tags, qa_id = sess_para.run(next_element)
        para_emb = sess_para.run(model_para.bert_emb, feed_dict={model_para.input_ids: para_unk})
        loss, symbols, probs = sess_qg.run([model_qg.loss, model_qg.symbols, model_qg.probs],
                                           feed_dict={
                                               model_qg.para: para, model_qg.bert_para: para_emb,
                                               model_qg.que: ques, model_qg.labels: labels,
                                               model_qg.pos_tags: pos_tags,
                                               model_qg.ner_tags: ner_tags, model_qg.qa_id: qa_id,
                                               model_qg.temperature: config.temperature,
                                               model_qg.diverse_rate: config.diverse_rate
                                           })
        answer_dict_ = convert_tokens_seq(eval_file, qa_id, symbols, probs, id2word, map_to_orig)
        for key in answer_dict_:
            if key not in answer_dict:
                answer_dict[key] = answer_dict_[key]
        losses.append(loss)
    loss = np.mean(losses)
    metrics = evaluate_func(eval_file, answer_dict)
    metrics["loss"] = loss
    return metrics 
开发者ID:ZhangShiyue,项目名称:QGforQA,代码行数:28,代码来源:main.py

示例4: main

# 需要导入模块: import eval [as 别名]
# 或者: from eval import evaluate [as 别名]
def main(_):
  tf.logging.set_verbosity(tf.logging.INFO)
  assert FLAGS.checkpoint_dir, "Flag 'checkpoint_dir' must be set."
  assert FLAGS.eval_dir, "Flag 'eval_dir' must be set."

  if FLAGS.config_file:
    for config_file in FLAGS.config_file:
      gin.parse_config_file(config_file)
  if FLAGS.params:
    gin.parse_config(FLAGS.params)

  eval_.evaluate(FLAGS.checkpoint_dir, FLAGS.eval_dir) 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:14,代码来源:run_eval.py

示例5: run_evaluate

# 需要导入模块: import eval [as 别名]
# 或者: from eval import evaluate [as 别名]
def run_evaluate(args, model, eval_features, device, eval_examples, tokenizer, best_dev_score=None):
    logger.info("***** Running predictions *****")
    logger.info("  Num orig examples = %d", len(eval_examples))
    logger.info("  Num split examples = %d", len(eval_features))
    logger.info("  Batch size = %d", args.predict_batch_size)

    all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
    all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
    all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
    all_example_index = torch.arange(all_input_ids.size(0), dtype=torch.long)

    eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_example_index)
    if args.local_rank == -1:
        eval_sampler = SequentialSampler(eval_data)
    else:
        eval_sampler = DistributedSampler(eval_data)
    eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.predict_batch_size)

    model.eval()
    all_results = []
    logger.info("Start evaluating")
    for input_ids, input_mask, segment_ids, example_indices in tqdm(eval_dataloader, desc="Evaluating"):
        if len(all_results) % 1000 == 0:
            logger.info("Processing example: %d" % (len(all_results)))
        input_ids = input_ids.to(device)
        input_mask = input_mask.to(device)
        segment_ids = segment_ids.to(device)
        with torch.no_grad():
            batch_start_logits, batch_end_logits = model(input_ids, segment_ids, input_mask)
        for i, example_index in enumerate(example_indices):
            start_logits = batch_start_logits[i].detach().cpu().tolist()
            end_logits = batch_end_logits[i].detach().cpu().tolist()
            eval_feature = eval_features[example_index.item()]
            unique_id = int(eval_feature.unique_id)
            all_results.append(RawResult(unique_id=unique_id,
                                         start_logits=start_logits,
                                         end_logits=end_logits))
    output_prediction_file = os.path.join(args.output_dir, "predictions.json")
    output_nbest_file = os.path.join(args.output_dir, "nbest_predictions.json")
    predict, nbest = write_predictions(eval_examples, eval_features, all_results,
                                       args.n_best_size, args.max_answer_length,
                                       args.do_lower_case, output_prediction_file,
                                       output_nbest_file, args.verbose_logging)
    with open(args.predict_file) as dataset_file:
        dataset_json = json.load(dataset_file)
        dataset = dataset_json['data']
    dev_score = evaluate(dataset, predict, tokenizer)['f1']
    logger.info('Dev score : {}'.format(dev_score))

    if best_dev_score is not None and dev_score > best_dev_score:
        logger.info('Saving model with dev score: {}'.format(dev_score))
        best_dev_score = dev_score
        write_to_file(predict, nbest, output_prediction_file, output_nbest_file)
        torch.save(model.state_dict(), args.finetuned_checkpoint)
    else:
        write_to_file(predict, nbest, output_prediction_file, output_nbest_file)
    return best_dev_score 
开发者ID:eva-n27,项目名称:BERT-for-Chinese-Question-Answering,代码行数:59,代码来源:run_squad.py


注:本文中的eval.evaluate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。