當前位置: 首頁>>代碼示例>>Python>>正文


Python predict.PredictConfig方法代碼示例

本文整理匯總了Python中tensorpack.predict.PredictConfig方法的典型用法代碼示例。如果您正苦於以下問題:Python predict.PredictConfig方法的具體用法?Python predict.PredictConfig怎麽用?Python predict.PredictConfig使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorpack.predict的用法示例。


在下文中一共展示了predict.PredictConfig方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: eval_classification

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def eval_classification(model, sessinit, dataflow):
    """
    Eval a classification model on the dataset. It assumes the model inputs are
    named "input" and "label", and contains "wrong-top1" and "wrong-top5" in the graph.
    """
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
開發者ID:tensorpack,項目名稱:benchmarks,代碼行數:26,代碼來源:imagenet_utils.py

示例2: __init__

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def __init__(self, config):
        """
        Args:
            config (PredictConfig): the config to use.
        """
        self._input_names = config.input_names
        self.graph = config._maybe_create_graph()
        with self.graph.as_default():
            input = PlaceholderInput()
            input.setup(config.input_signature)
            with TowerContext('', is_training=False):
                config.tower_func(*input.get_input_tensors())

            input_tensors = get_tensors_by_names(config.input_names)
            output_tensors = get_tensors_by_names(config.output_names)

            config.session_init._setup_graph()
            self.saver = tf.train.Saver()
            init_op = [tf.global_variables_initializer(), tf.local_variables_initializer()]
            self.sess = config.session_creator.create_session()
            self.sess.run(init_op)
            config.session_init._run_init(self.sess)
            super(OfflinePredictorWithSaver, self).__init__(
                input_tensors, output_tensors, config.return_input, self.sess) 
開發者ID:microsoft,項目名稱:petridishnn,代碼行數:26,代碼來源:critic.py

示例3: critic_predictor

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def critic_predictor(ctrl, model_dir, vs_name):
    """
    Create an OfflinePredictorWithSaver for test-time use.
    """
    model = critic_factory(ctrl, is_train=False, vs_name=vs_name)
    output_names = ['{}/predicted_accuracy:0'.format(vs_name)]
    session_config=None
    if ctrl.critic_type == CriticTypes.LSTM:
        session_config = tf.ConfigProto(device_count = {'GPU': 0})
    pred_config = PredictConfig(
        model=model,
        input_names=model.input_names,
        output_names=output_names,
        session_creator=NewSessionCreator(config=session_config)
    )
    if model_dir:
        ckpt = tf.train.latest_checkpoint(model_dir)
        logger.info("Loading {} predictor from {}".format(vs_name, ckpt))
        if ckpt:
            pred_config.session_init = SaverRestore(ckpt)
    predictor = OfflinePredictorWithSaver(pred_config)
    return predictor 
開發者ID:microsoft,項目名稱:petridishnn,代碼行數:24,代碼來源:critic.py

示例4: eval_on_ILSVRC12

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
開發者ID:ppwwyyxx,項目名稱:GroupNorm-reproduce,代碼行數:22,代碼來源:imagenet_utils.py

示例5: eval_on_ILSVRC12

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label', 'input2', 'label2'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    acc1, acc5 = RatioCounter(), RatioCounter()

    # This does not have a visible improvement over naive predictor,
    # but will have an improvement if image_dtype is set to float32.
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
    for _ in tqdm.trange(dataflow.size()):
        top1, top5 = pred()
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
開發者ID:qinenergy,項目名稱:adanet,代碼行數:22,代碼來源:imagenet_utils.py

示例6: eval_on_ILSVRC12

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5']
    )
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    for top1, top5 in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio)) 
開發者ID:huawei-noah,項目名稱:ghostnet,代碼行數:17,代碼來源:imagenet_utils.py

示例7: eval_on_ILSVRC12

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def eval_on_ILSVRC12(model, sessinit, dataflow):
    pred_config = PredictConfig(
        model=model,
        session_init=sessinit,
        input_names=['input', 'label'],
        output_names=['wrong-top1', 'wrong-top5', 'res-top5', 'label', 'logits']
    )
    pred = SimpleDatasetPredictor(pred_config, dataflow)
    acc1, acc5 = RatioCounter(), RatioCounter()
    top5s = []
    labels = []
    logits = []
    for top1, top5, pred, label, logit in pred.get_result():
        batch_size = top1.shape[0]
        acc1.feed(top1.sum(), batch_size)
        acc5.feed(top5.sum(), batch_size)
        top5s.extend(pred.tolist())
        labels.extend(label.tolist())
        logits.extend(logit.tolist())
    with open("top5_resnet2x.json", "w") as f:
        json.dump(top5s, f)
    
    with open("labels_resnet2x.json", "w") as f:
        json.dump(labels, f)

    print("Top1 Error: {}".format(acc1.ratio))
    print("Top5 Error: {}".format(acc5.ratio))
    return acc1.ratio, acc5.ratio 
開發者ID:qinenergy,項目名稱:webvision-2.0-benchmarks,代碼行數:30,代碼來源:imagenet_utils.py

示例8: run

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def run(self):

        if self.inf_auto_find_chkpt:
            print('-----Auto Selecting Checkpoint Basing On "%s" Through "%s" Comparison' % \
                        (self.inf_auto_metric, self.inf_auto_comparator))
            model_path, stat = get_best_chkpts(self.save_dir, self.inf_auto_metric, self.inf_auto_comparator)
            print('Selecting: %s' % model_path)
            print('Having Following Statistics:')
            for key, value in stat.items():
                print('\t%s: %s' % (key, value))
        else:
            model_path = self.inf_model_path

        model_constructor = self.get_model()
        pred_config = PredictConfig(
            model        = model_constructor(),
            session_init = get_model_loader(model_path),
            input_names  = self.eval_inf_input_tensor_names,
            output_names = self.eval_inf_output_tensor_names)
        predictor = OfflinePredictor(pred_config)

        save_dir = self.inf_output_dir
        file_list = glob.glob('%s/*%s' % (self.inf_data_dir, self.inf_imgs_ext))
        file_list.sort() # ensure same order

        rm_n_mkdir(save_dir)       
        for filename in file_list:
            filename = os.path.basename(filename)
            basename = filename.split('.')[0]
            print(self.inf_data_dir, basename, end=' ', flush=True)

            ##
            img = cv2.imread(self.inf_data_dir + filename)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)

            ##
            pred_map = self.__gen_prediction(img, predictor)
            sio.savemat('%s/%s.mat' % (save_dir, basename), {'result':[pred_map]})
            print('FINISH')

#### 
開發者ID:vqdang,項目名稱:hover_net,代碼行數:43,代碼來源:infer.py

示例9: critic_predict_dataflow

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def critic_predict_dataflow(ctrl, data, log_dir, model_dir, vs_name):
    """
    Prediction on a dataflow, used for testing a large batch of data
    """
    ckpt = tf.train.latest_checkpoint(model_dir)
    if not ckpt:
        outputs = [0] * len(data[0])
        logger.info("No model exists. Do not sort")
        return outputs
    model = critic_factory(ctrl, is_train=False, vs_name=vs_name)
    ds_val = critic_dataflow_factory(ctrl, data, is_train=False)
    output_names = ['{}/predicted_accuracy:0'.format(vs_name)]

    session_config=None
    if ctrl.critic_type == CriticTypes.LSTM:
        session_config = tf.ConfigProto(device_count = {'GPU': 0})
    pred_config = PredictConfig(
        model=model,
        input_names=model.input_names,
        output_names=output_names,
        session_creator=NewSessionCreator(config=session_config),
        session_init=SaverRestore(ckpt)
    )

    #with tf.Graph().as_default():
    predictor = SimpleDatasetPredictor(pred_config, ds_val)
    outputs = []
    for o in predictor.get_result():
        outputs.extend(o[0])
    return outputs 
開發者ID:microsoft,項目名稱:petridishnn,代碼行數:32,代碼來源:critic.py

示例10: create_predict_config

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def create_predict_config(self, session_init):
        """
        Returns:
            a :class:`PredictConfig` to be used for inference.
            The predictor will take inputs and return probabilities.

        Examples:

            pred = OfflinePredictor(model.create_predict_config(SmartInit(args.load)))
            prob = pred(NCHW_image)[0]  # Nx1000 probabilities
        """
        return PredictConfig(model=self, input_names=['input'], output_names=['prob'], session_init=session_init) 
開發者ID:tensorpack,項目名稱:tensorpack,代碼行數:14,代碼來源:imagenet_utils.py

示例11: test

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def test(net,
         session_init,
         val_dataflow,
         do_calc_flops=False,
         extended_log=False):
    """
    Main test routine.

    Parameters:
    ----------
    net : obj
        Model.
    session_init : SessionInit
        Session initializer.
    do_calc_flops : bool, default False
        Whether to calculate count of weights.
    extended_log : bool, default False
        Whether to log more precise accuracy values.
    """
    pred_config = PredictConfig(
        model=net,
        session_init=session_init,
        input_names=["input", "label"],
        output_names=["wrong-top1", "wrong-top5"]
    )
    err_top1 = RatioCounter()
    err_top5 = RatioCounter()

    tic = time.time()
    pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(val_dataflow), device="/gpu:0"))

    for _ in tqdm.trange(val_dataflow.size()):
        err_top1_val, err_top5_val = pred()
        batch_size = err_top1_val.shape[0]
        err_top1.feed(err_top1_val.sum(), batch_size)
        err_top5.feed(err_top5_val.sum(), batch_size)

    err_top1_val = err_top1.ratio
    err_top5_val = err_top5.ratio

    if extended_log:
        logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
            top1=err_top1_val, top5=err_top5_val))
    else:
        logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
            top1=err_top1_val, top5=err_top5_val))
    logging.info("Time cost: {:.4f} sec".format(
        time.time() - tic))

    if do_calc_flops:
        calc_flops(model=net) 
開發者ID:osmr,項目名稱:imgclsmob,代碼行數:53,代碼來源:eval_tf.py

示例12: evaluate_rcnn

# 需要導入模塊: from tensorpack import predict [as 別名]
# 或者: from tensorpack.predict import PredictConfig [as 別名]
def evaluate_rcnn(model_name, paper_arxiv_id, cfg_list, model_file):
    evaluator = COCOEvaluator(
        root=COCO_ROOT, model_name=model_name, paper_arxiv_id=paper_arxiv_id
    )
    category_id_to_coco_id = {
        v: k for k, v in COCODetection.COCO_id_to_category_id.items()
    }

    cfg.update_args(cfg_list)  # TODO backup/restore config
    finalize_configs(False)
    MODEL = ResNetFPNModel() if cfg.MODE_FPN else ResNetC4Model()
    predcfg = PredictConfig(
        model=MODEL,
        session_init=SmartInit(model_file),
        input_names=MODEL.get_inference_tensor_names()[0],
        output_names=MODEL.get_inference_tensor_names()[1],
    )
    predictor = OfflinePredictor(predcfg)

    def xyxy_to_xywh(box):
        box[2] -= box[0]
        box[3] -= box[1]
        return box

    df = get_eval_dataflow("coco_val2017")
    df.reset_state()
    for img, img_id in tqdm.tqdm(df, total=len(df)):
        results = predict_image(img, predictor)
        res = [
            {
                "image_id": img_id,
                "category_id": category_id_to_coco_id.get(
                    int(r.class_id), int(r.class_id)
                ),
                "bbox": xyxy_to_xywh([round(float(x), 4) for x in r.box]),
                "score": round(float(r.score), 3),
            }
            for r in results
        ]
        evaluator.add(res)
        if evaluator.cache_exists:
            break

    evaluator.save() 
開發者ID:tensorpack,項目名稱:tensorpack,代碼行數:46,代碼來源:sotabench.py


注:本文中的tensorpack.predict.PredictConfig方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。