本文整理汇总了Python中tensorpack.utils.stats.RatioCounter方法的典型用法代码示例。如果您正苦于以下问题:Python stats.RatioCounter方法的具体用法?Python stats.RatioCounter怎么用?Python stats.RatioCounter使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorpack.utils.stats
的用法示例。
在下文中一共展示了stats.RatioCounter方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: eval_classification
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def eval_classification(model, sessinit, dataflow):
"""
Eval a classification model on the dataset. It assumes the model inputs are
named "input" and "label", and contains "wrong-top1" and "wrong-top5" in the graph.
"""
pred_config = PredictConfig(
model=model,
session_init=sessinit,
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5']
)
acc1, acc5 = RatioCounter(), RatioCounter()
# This does not have a visible improvement over naive predictor,
# but will have an improvement if image_dtype is set to float32.
pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
for _ in tqdm.trange(dataflow.size()):
top1, top5 = pred()
batch_size = top1.shape[0]
acc1.feed(top1.sum(), batch_size)
acc5.feed(top5.sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
示例2: eval_on_ILSVRC12
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def eval_on_ILSVRC12(model_file, data_dir):
ds = get_data('val')
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_file),
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5']
)
pred = SimpleDatasetPredictor(pred_config, ds)
acc1, acc5 = RatioCounter(), RatioCounter()
for o in pred.get_result():
batch_size = o[0].shape[0]
acc1.feed(o[0].sum(), batch_size)
acc5.feed(o[1].sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
示例3: eval
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def eval(model_file, path, k, max_eval=None):
df_val = get_data(os.path.join(path, 'go_val.lmdb'), shuffle=True, isTrain=False)
if max_eval:
df_val = FixedSizeData(df_val, max_eval)
pred_config = PredictConfig(
model=Model(k, add_wrong=True),
session_init=get_model_loader(model_file),
input_names=['feature_planes', 'labels', 'labels_2d'],
output_names=['wrong-top1', 'wrong-top5']
)
pred = SimpleDatasetPredictor(pred_config, df_val)
acc1, acc5 = RatioCounter(), RatioCounter()
try:
for o in pred.get_result():
batch_size = o[0].shape[0]
acc1.feed(o[0].sum(), batch_size)
acc5.feed(o[1].sum(), batch_size)
except Exception as e:
print e
from IPython import embed
embed()
err1 = (acc1.ratio) * 100
err5 = (acc5.ratio) * 100
print("Top1 Accuracy: {0:.2f}% Error: {1:.2f}% Random-Guess: ~0.44%".format(100 - err1, err1))
print("Top5 Accuracy: {0:.2f}% Error: {1:.2f}% Random-Guess: ~2.00%".format(100 - err5, err5))
示例4: eval_on_ILSVRC12
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def eval_on_ILSVRC12(model, sessinit, dataflow):
pred_config = PredictConfig(
model=model,
session_init=sessinit,
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5']
)
acc1, acc5 = RatioCounter(), RatioCounter()
# This does not have a visible improvement over naive predictor,
# but will have an improvement if image_dtype is set to float32.
pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
for _ in tqdm.trange(dataflow.size()):
top1, top5 = pred()
batch_size = top1.shape[0]
acc1.feed(top1.sum(), batch_size)
acc5.feed(top5.sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
示例5: eval_on_ILSVRC12
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def eval_on_ILSVRC12(model, sessinit, dataflow):
pred_config = PredictConfig(
model=model,
session_init=sessinit,
input_names=['input', 'label', 'input2', 'label2'],
output_names=['wrong-top1', 'wrong-top5']
)
acc1, acc5 = RatioCounter(), RatioCounter()
# This does not have a visible improvement over naive predictor,
# but will have an improvement if image_dtype is set to float32.
pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(dataflow), device='/gpu:0'))
for _ in tqdm.trange(dataflow.size()):
top1, top5 = pred()
batch_size = top1.shape[0]
acc1.feed(top1.sum(), batch_size)
acc5.feed(top5.sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
示例6: eval_on_ILSVRC12
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def eval_on_ILSVRC12(model, sessinit, dataflow):
pred_config = PredictConfig(
model=model,
session_init=sessinit,
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5']
)
pred = SimpleDatasetPredictor(pred_config, dataflow)
acc1, acc5 = RatioCounter(), RatioCounter()
for top1, top5 in pred.get_result():
batch_size = top1.shape[0]
acc1.feed(top1.sum(), batch_size)
acc5.feed(top5.sum(), batch_size)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
示例7: eval_on_ILSVRC12
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def eval_on_ILSVRC12(model, sessinit, dataflow):
pred_config = PredictConfig(
model=model,
session_init=sessinit,
input_names=['input', 'label'],
output_names=['wrong-top1', 'wrong-top5', 'res-top5', 'label', 'logits']
)
pred = SimpleDatasetPredictor(pred_config, dataflow)
acc1, acc5 = RatioCounter(), RatioCounter()
top5s = []
labels = []
logits = []
for top1, top5, pred, label, logit in pred.get_result():
batch_size = top1.shape[0]
acc1.feed(top1.sum(), batch_size)
acc5.feed(top5.sum(), batch_size)
top5s.extend(pred.tolist())
labels.extend(label.tolist())
logits.extend(logit.tolist())
with open("top5_resnet2x.json", "w") as f:
json.dump(top5s, f)
with open("labels_resnet2x.json", "w") as f:
json.dump(labels, f)
print("Top1 Error: {}".format(acc1.ratio))
print("Top5 Error: {}".format(acc5.ratio))
return acc1.ratio, acc5.ratio
示例8: test
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def test(net,
session_init,
val_dataflow,
do_calc_flops=False,
extended_log=False):
"""
Main test routine.
Parameters:
----------
net : obj
Model.
session_init : SessionInit
Session initializer.
do_calc_flops : bool, default False
Whether to calculate count of weights.
extended_log : bool, default False
Whether to log more precise accuracy values.
"""
pred_config = PredictConfig(
model=net,
session_init=session_init,
input_names=["input", "label"],
output_names=["wrong-top1", "wrong-top5"]
)
err_top1 = RatioCounter()
err_top5 = RatioCounter()
tic = time.time()
pred = FeedfreePredictor(pred_config, StagingInput(QueueInput(val_dataflow), device="/gpu:0"))
for _ in tqdm.trange(val_dataflow.size()):
err_top1_val, err_top5_val = pred()
batch_size = err_top1_val.shape[0]
err_top1.feed(err_top1_val.sum(), batch_size)
err_top5.feed(err_top5_val.sum(), batch_size)
err_top1_val = err_top1.ratio
err_top5_val = err_top5.ratio
if extended_log:
logging.info("Test: err-top1={top1:.4f} ({top1})\terr-top5={top5:.4f} ({top5})".format(
top1=err_top1_val, top5=err_top5_val))
else:
logging.info("Test: err-top1={top1:.4f}\terr-top5={top5:.4f}".format(
top1=err_top1_val, top5=err_top5_val))
logging.info("Time cost: {:.4f} sec".format(
time.time() - tic))
if do_calc_flops:
calc_flops(model=net)
示例9: evaluate_ilsvrc
# 需要导入模块: from tensorpack.utils import stats [as 别名]
# 或者: from tensorpack.utils.stats import RatioCounter [as 别名]
def evaluate_ilsvrc(args, subset, model_cls):
ds = get_augmented_data.get_ilsvrc_augmented_data(subset, args, do_multiprocess=False)
INPUT_SIZE = ILSVRC_DEFAULT_INPUT_SIZE
model = model_cls(INPUT_SIZE, args)
args.store_basename = None # This is disabled for now; it used to help storing predictions
output_names = []
accs = []
n_preds = 0
if args.num_anytime_preds == 0:
output_names.append('dummy_image_mean')
else:
for i, w in enumerate(model.weights):
if w > 0:
n_preds += 1
scope_name = model.compute_scope_basename(i)
scope_name = model.prediction_scope(scope_name)
output_names.append('{}/wrong-top1'.format(scope_name))
output_names.append('{}/wrong-top5'.format(scope_name))
accs.extend([stats.RatioCounter(), stats.RatioCounter()])
#output_names.append('{}/linear/output:0'.format(scope_name))
if args.num_anytime_preds > 0 and n_preds >= args.num_anytime_preds:
break
pred_config = PredictConfig(
model=model,
input_names=['input', 'label'],
output_names=output_names
)
if args.load:
pred_config.session_init = get_model_loader(args.load)
pred = SimpleDatasetPredictor(pred_config, ds)
if args.store_basename is not None:
store_fn = args.store_basename + "_{}.bin".format(subset)
f_store_out = open(store_fn, 'wb')
n_batches = 0
import time
start_time = time.time()
for o in pred.get_result():
n_batches += 1
if args.num_anytime_preds == 0:
continue
if args.store_basename is not None:
preds = o[0]
f_store_out.write(preds)
batch_size = o[0].shape[0]
for i, acc in enumerate(accs):
acc.feed(o[i].sum(), batch_size)
logger.info('Inference finished, time: {:.4f}sec'.format(time.time() - start_time))
if args.num_anytime_preds != 0:
for i, name in enumerate(output_names):
logger.info("Name {}, RatioCount {}".format(name, accs[i].ratio))
if args.store_basename is not None:
f_store_out.close()