当前位置: 首页>>代码示例>>Python>>正文


Python evaluation.evaluate方法代码示例

本文整理汇总了Python中evaluation.evaluate方法的典型用法代码示例。如果您正苦于以下问题:Python evaluation.evaluate方法的具体用法?Python evaluation.evaluate怎么用?Python evaluation.evaluate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在evaluation的用法示例。


在下文中一共展示了evaluation.evaluate方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def test(args):
    test_set = Dataset.from_bin_file(args.test_file)
    assert args.load_model

    print('load model from [%s]' % args.load_model, file=sys.stderr)
    params = torch.load(args.load_model, map_location=lambda storage, loc: storage)
    transition_system = params['transition_system']
    saved_args = params['args']
    saved_args.cuda = args.cuda
    # set the correct domain from saved arg
    args.lang = saved_args.lang

    parser_cls = Registrable.by_name(args.parser)
    parser = parser_cls.load(model_path=args.load_model, cuda=args.cuda)
    parser.eval()
    evaluator = Registrable.by_name(args.evaluator)(transition_system, args=args)
    eval_results, decode_results = evaluation.evaluate(test_set.examples, parser, evaluator, args,
                                                       verbose=args.verbose, return_decode_result=True)
    print(eval_results, file=sys.stderr)
    if args.save_decode_to:
        pickle.dump(decode_results, open(args.save_decode_to, 'wb')) 
开发者ID:pcyin,项目名称:tranX,代码行数:23,代码来源:exp.py

示例2: main

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def main():
  flags = parse_flags()
  hparams = parse_hparams(flags.hparams)

  if flags.mode == 'train':
    train.train(model_name=flags.model, hparams=hparams,
                class_map_path=flags.class_map_path,
                train_csv_path=flags.train_csv_path,
                train_clip_dir=flags.train_clip_dir,
                train_dir=flags.train_dir)

  elif flags.mode == 'eval':
    evaluation.evaluate(model_name=flags.model, hparams=hparams,
                        class_map_path=flags.class_map_path,
                        eval_csv_path=flags.eval_csv_path,
                        eval_clip_dir=flags.eval_clip_dir,
                        checkpoint_path=flags.checkpoint_path)

  else:
    assert flags.mode == 'inference'
    inference.predict(model_name=flags.model, hparams=hparams,
                      class_map_path=flags.class_map_path,
                      test_clip_dir=flags.test_clip_dir,
                      checkpoint_path=flags.checkpoint_path,
                      predictions_csv_path=flags.predictions_csv_path) 
开发者ID:DCASE-REPO,项目名称:dcase2018_baseline,代码行数:27,代码来源:main.py

示例3: main

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def main(argv):
  hparams = model.parse_hparams(flags.hparams)

  if flags.mode == 'train':
    def split_csv(scopes):
      return scopes.split(',') if scopes else None
    train.train(model_name=flags.model, hparams=hparams,
                class_map_path=flags.class_map_path,
                train_csv_path=flags.train_csv_path,
                train_clip_dir=flags.train_clip_dir,
                train_dir=flags.train_dir,
                epoch_batches=flags.epoch_num_batches,
                warmstart_checkpoint=flags.warmstart_checkpoint,
                warmstart_include_scopes=split_csv(flags.warmstart_include_scopes),
                warmstart_exclude_scopes=split_csv(flags.warmstart_exclude_scopes))

  elif flags.mode == 'eval':
    evaluation.evaluate(model_name=flags.model, hparams=hparams,
                        class_map_path=flags.class_map_path,
                        eval_csv_path=flags.eval_csv_path,
                        eval_clip_dir=flags.eval_clip_dir,
                        eval_dir=flags.eval_dir,
                        train_dir=flags.train_dir)

  else:
    assert flags.mode == 'inference'
    inference.predict(model_name=flags.model, hparams=hparams,
                      class_map_path=flags.class_map_path,
                      inference_clip_dir=flags.inference_clip_dir,
                      inference_checkpoint=flags.inference_checkpoint,
                      predictions_csv_path=flags.predictions_csv_path) 
开发者ID:DCASE-REPO,项目名称:dcase2019_task2_baseline,代码行数:33,代码来源:runner.py

示例4: main

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def main():
    global args
    args = parser.parse_args()

    if not os.path.exists(args.output_path):
        comm = 'python extract_feat.py \
                --arch {} \
                --batch-size {} \
                --input-size {} \
                --feature-dim {} \
                --load-path {} \
                --bin-file {} \
                --output-path {}'\
                .format(args.arch, args.batch_size, args.input_size, args.feature_dim,
                        args.load_path, args.bin_file, args.output_path)
        print(' '.join(comm.split()))
        os.system(comm)

    features = np.load(args.output_path).reshape(-1, args.feature_dim)
    _, lbs = bin_loader(args.bin_file)
    print('feature shape: {}'.format(features.shape))
    assert features.shape[0] == 2 * len(lbs), "{} vs {}".format(
        features.shape[0], 2 * len(lbs))

    features = normalize(features)
    _, _, acc, val, val_std, far = evaluate(features,
                                            lbs,
                                            nrof_folds=args.nfolds,
                                            distance_metric=0)
    print("accuracy: {:.4f}({:.4f})".format(acc.mean(), acc.std())) 
开发者ID:yl-1993,项目名称:hfsoftmax,代码行数:32,代码来源:eval.py

示例5: validate

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def validate(val_loader, model, criterion, epoch, loss_weight, train_len, tb_logger, count):
    raise NotImplemented
    num_tasks = len(val_loader)
    losses = [AverageMeter(args.val.average_stats) for k in range(num_tasks)]

    # switch to evaluate mode
    model.eval()

    start = time.time()
    for i, all_in in enumerate(zip(*tuple(val_loader))):
        input, target = zip(*[all_in[k] for k in range(num_tasks)])

        slice_pt = 0
        slice_idx = [0]
        for l in [p.size(0) for p in input]:
            slice_pt += l
            slice_idx.append(slice_pt)

        input = torch.cat(tuple(input), dim=0)

        target = [tg.cuda() for tg in target]
        input_var = torch.autograd.Variable(input.cuda(), volatile=True)
        target_var = [torch.autograd.Variable(tg, volatile=True) for tg in target]

        # measure accuracy and record loss
        loss = model(input_var, target_var, slice_idx)

        for k in range(num_tasks):
            if torch.__version__ >= '1.1.0':
                losses[k].update(loss[k].item())
            else:
                losses[k].update(loss[k].data[0])

    log('Test epoch #{}    Time {}'.format(epoch, time.time() - start))
    for k in range(num_tasks):
        log(' * Task: #{0}    Loss {loss.avg:.4f}'.format(k, loss=losses[k]))

    for k in range(num_tasks):
        tb_logger.add_scalar('val_loss_{}'.format(k), losses[k].val, count[0]) 
开发者ID:XiaohangZhan,项目名称:face_recognition_framework,代码行数:41,代码来源:main.py

示例6: train

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def train(self,
            train_input_fn,
            run_eval_after_train=False,
            eval_input_fn=None):
    """Run distributed training on Mask RCNN model."""

    self._save_config()
    run_config = self.build_strategy_configuration()
    params = self.build_model_parameters('train', run_config)
    tf.logging.info(params)
    train_estimator = self.build_mask_rcnn_estimator(params, run_config,
                                                     'train')
    if self._model_params.use_tpu:
      train_estimator.train(
          input_fn=train_input_fn, max_steps=self._model_params.total_steps)
    else:
      # As MirroredStrategy only supports `train_and_evaluate`, for training,
      # we pass dummy `eval_spec`.
      train_spec = tf.estimator.TrainSpec(
          input_fn=train_input_fn, max_steps=self._model_params.total_steps)
      eval_spec = tf.estimator.EvalSpec(input_fn=tf.data.Dataset)
      tf.estimator.train_and_evaluate(train_estimator, train_spec, eval_spec)

    eval_results = None
    if not run_eval_after_train:
      return eval_results

    if eval_input_fn is None:
      raise ValueError('Eval input_fn must be passed to conduct '
                       'evaluation after training.')

    eval_params = self.build_model_parameters('eval', run_config)
    eval_estimator = self.build_mask_rcnn_estimator(eval_params, run_config,
                                                    'eval')
    eval_results, predictions = evaluation.evaluate(
        eval_estimator, eval_input_fn, self._model_params.eval_samples,
        self._model_params.eval_batch_size, self._model_params.include_mask,
        self._model_params.val_json_file)

    output_dir = os.path.join(self._flags.model_dir, 'eval')
    tf.gfile.MakeDirs(output_dir)
    # Summary writer writes out eval metrics.
    summary_writer = tf.summary.FileWriter(output_dir)
    self._write_summary(summary_writer, eval_results, predictions,
                        self._model_params.total_steps)
    summary_writer.close()

    return eval_results 
开发者ID:artyompal,项目名称:tpu_models,代码行数:50,代码来源:distributed_executer.py

示例7: eval

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def eval(self, eval_input_fn):
    """Run distributed eval on Mask RCNN model."""

    output_dir = os.path.join(self._flags.model_dir, 'eval')
    tf.gfile.MakeDirs(output_dir)

    # Summary writer writes out eval metrics.
    summary_writer = tf.summary.FileWriter(output_dir)
    run_config = self.build_strategy_configuration()
    eval_params = self.build_model_parameters('eval', run_config)
    eval_estimator = self.build_mask_rcnn_estimator(eval_params, run_config,
                                                    'eval')

    def _terminate_eval():
      tf.logging.info('Terminating eval after %d seconds of '
                      'no checkpoints' % self._flags.eval_timeout)
      return True

    eval_results = None
    # Run evaluation when there's a new checkpoint
    for ckpt in tf.contrib.training.checkpoints_iterator(
        self._flags.model_dir,
        min_interval_secs=self._flags.min_eval_interval,
        timeout=self._flags.eval_timeout,
        timeout_fn=_terminate_eval):
      # Terminate eval job when final checkpoint is reached
      current_step = int(os.path.basename(ckpt).split('-')[1])

      tf.logging.info('Starting to evaluate.')
      try:
        eval_results, predictions = evaluation.evaluate(
            eval_estimator, eval_input_fn, self._model_params.eval_samples,
            self._model_params.eval_batch_size, self._model_params.include_mask,
            self._model_params.val_json_file)
        self._write_summary(summary_writer, eval_results, predictions,
                            current_step)

        if current_step >= self._model_params.total_steps:
          tf.logging.info('Evaluation finished after training step %d' %
                          current_step)
          break
      except tf.errors.NotFoundError:
        # Since the coordinator is on a different job than the TPU worker,
        # sometimes the TPU worker does not finish initializing until long after
        # the CPU job tells it to start evaluating. In this case, the checkpoint
        # file could have been deleted already.
        tf.logging.info('Checkpoint %s no longer exists, skipping checkpoint' %
                        ckpt)
    summary_writer.close()
    return eval_results 
开发者ID:artyompal,项目名称:tpu_models,代码行数:52,代码来源:distributed_executer.py

示例8: train_and_eval

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def train_and_eval(self, train_input_fn, eval_input_fn):
    """Run distributed train and eval on Mask RCNN model."""

    self._save_config()
    output_dir = os.path.join(self._flags.model_dir, 'eval')
    tf.gfile.MakeDirs(output_dir)
    summary_writer = tf.summary.FileWriter(output_dir)

    run_config = self.build_strategy_configuration()
    train_params = self.build_model_parameters('train', run_config)
    eval_params = self.build_model_parameters('eval', run_config)
    train_estimator = self.build_mask_rcnn_estimator(train_params, run_config,
                                                     'train')
    eval_estimator = self.build_mask_rcnn_estimator(eval_params, run_config,
                                                    'eval')

    num_cycles = int(self._model_params.total_steps /
                     self._model_params.num_steps_per_eval)
    for cycle in range(num_cycles):
      tf.logging.info('Start training cycle %d.' % cycle)
      train_estimator.train(
          input_fn=train_input_fn, steps=self._model_params.num_steps_per_eval)

      tf.logging.info('Start evaluation cycle %d.' % cycle)
      eval_results, predictions = evaluation.evaluate(
          eval_estimator, eval_input_fn, self._model_params.eval_samples,
          self._model_params.eval_batch_size, self._model_params.include_mask,
          self._model_params.val_json_file)

      current_step = int(cycle * self._model_params.num_steps_per_eval)
      self._write_summary(summary_writer, eval_results, predictions,
                          current_step)

    tf.logging.info('Starting training cycle %d.' % num_cycles)
    train_estimator.train(
        input_fn=train_input_fn, max_steps=self._model_params.total_steps)
    eval_results, predictions = evaluation.evaluate(
        eval_estimator, eval_input_fn, self._model_params.eval_samples,
        self._model_params.eval_batch_size, self._model_params.include_mask,
        self._model_params.val_json_file)
    self._write_summary(summary_writer, eval_results, predictions,
                        self._model_params.total_steps)
    summary_writer.close()
    return eval_results 
开发者ID:artyompal,项目名称:tpu_models,代码行数:46,代码来源:distributed_executer.py

示例9: evaluation

# 需要导入模块: import evaluation [as 别名]
# 或者: from evaluation import evaluate [as 别名]
def evaluation(test_loader, model, num, outfeat_fn, benchmark):
    load_feat = True
    if not os.path.isfile(outfeat_fn) or not load_feat:
        features = extract(test_loader, model, num, outfeat_fn, silent=True)
    else:
        print("loading from: {}".format(outfeat_fn))
        features = np.fromfile(outfeat_fn, dtype=np.float32).reshape(-1, args.model.feature_dim)

    if benchmark == "megaface":
        r = test_megaface(features)
        log(' * Megaface: 1e-6 [{}], 1e-5 [{}], 1e-4 [{}]'.format(r[-1], r[-2], r[-3]))
        return r[-1]
    else:
        features = normalize(features)
        _, lbs = bin_loader("{}/{}.bin".format(args.test.test_root, benchmark))
        _, _, acc, val, val_std, far = evaluate(
            features, lbs, nrof_folds=args.test.nfolds, distance_metric=0)
    
        log(" * {}: accuracy: {:.4f}({:.4f})".format(benchmark, acc.mean(), acc.std()))
        return acc.mean()


#def evaluation_old(test_loader, model, num, outfeat_fn, benchmark):
#    load_feat = False
#    if not os.path.isfile(outfeat_fn) or not load_feat:
#        features = extract(test_loader, model, num, outfeat_fn)
#    else:
#        log("Loading features: {}".format(outfeat_fn))
#        features = np.fromfile(outfeat_fn, dtype=np.float32).reshape(-1, args.model.feature_dim)
#
#    if benchmark == "megaface":
#        r = test.test_megaface(features)
#        log(' * Megaface: 1e-6 [{}], 1e-5 [{}], 1e-4 [{}]'.format(r[-1], r[-2], r[-3]))
#        with open(outfeat_fn[:-4] + ".txt", 'w') as f:
#            f.write(' * Megaface: 1e-6 [{}], 1e-5 [{}], 1e-4 [{}]'.format(r[-1], r[-2], r[-3]))
#        return r[-1]
#    elif benchmark == "ijba":
#        r = test.test_ijba(features)
#        log(' * IJB-A: {} [{}], {} [{}], {} [{}]'.format(r[0][0], r[0][1], r[1][0], r[1][1], r[2][0], r[2][1]))
#        with open(outfeat_fn[:-4] + ".txt", 'w') as f:
#            f.write(' * IJB-A: {} [{}], {} [{}], {} [{}]'.format(r[0][0], r[0][1], r[1][0], r[1][1], r[2][0], r[2][1]))
#        return r[2][1]
#    elif benchmark == "lfw":
#        r = test.test_lfw(features)
#        log(' * LFW: mean: {} std: {}'.format(r[0], r[1]))
#        with open(outfeat_fn[:-4] + ".txt", 'w') as f:
#            f.write(' * LFW: mean: {} std: {}'.format(r[0], r[1]))
#        return r[0] 
开发者ID:XiaohangZhan,项目名称:face_recognition_framework,代码行数:50,代码来源:main.py


注:本文中的evaluation.evaluate方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。