本文整理匯總了Python中utils.evaluate方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.evaluate方法的具體用法?Python utils.evaluate怎麽用?Python utils.evaluate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.evaluate方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: load_apilog
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import evaluate [as 別名]
def load_apilog(self, log_fname, limit):
with open(log_fname, 'rb') as f:
data = f.read().split('\n')[:-1]
if len(data) %2 !=0:
data = data[:-1]
idx = 0
apilogs = []
while idx < len(data) and idx < limit*2:
if data[idx][:2] == 'IN':
il = utils.evaluate(data[idx][2:])
else:
utils.error('load_apilog: parse IN error')
if data[idx+1][:3] == 'OUT' :
ol = utils.evaluate(data[idx+1][3:])
else:
utils.error('load_apilog: parse OUT error')
apilog = log.ApiLog(self.apis[il[0]], il, ol)
apilogs.append(apilog)
idx+=2
return apilogs
示例2: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import evaluate [as 別名]
def main(args):
"""Run testing."""
test_data = utils.read_data(args, "test")
print("total test samples:%s" % test_data.num_examples)
if args.random_other:
print("warning, testing mode with 'random_other' will result in "
"different results every run...")
model = models.get_model(args, gpuid=args.gpuid)
tfconfig = tf.ConfigProto(allow_soft_placement=True)
tfconfig.gpu_options.allow_growth = True
tfconfig.gpu_options.visible_device_list = "%s" % (
",".join(["%s" % i for i in [args.gpuid]]))
with tf.Session(config=tfconfig) as sess:
utils.initialize(load=True, load_best=args.load_best,
args=args, sess=sess)
# load the graph and variables
tester = models.Tester(model, args, sess)
perf = utils.evaluate(test_data, args, sess, tester)
print("performance:")
numbers = []
for k in sorted(perf.keys()):
print("%s, %s" % (k, perf[k]))
numbers.append("%s" % perf[k])
print(" ".join(sorted(perf.keys())))
print(" ".join(numbers))
示例3: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import evaluate [as 別名]
def main():
args = parse_args()
C = importlib.import_module(args.config).TrainConfig
print("MODEL ID: {}".format(C.model_id))
summary_writer = SummaryWriter(C.log_dpath)
train_iter, val_iter, test_iter, vocab = build_loaders(C)
model = build_model(C, vocab)
optimizer = torch.optim.Adam(model.parameters(), lr=C.lr, weight_decay=C.weight_decay, amsgrad=True)
lr_scheduler = ReduceLROnPlateau(optimizer, mode='min', factor=C.lr_decay_gamma,
patience=C.lr_decay_patience, verbose=True)
best_val_scores = { 'CIDEr': 0. }
best_epoch = 0
best_ckpt_fpath = None
for e in range(1, C.epochs + 1):
ckpt_fpath = C.ckpt_fpath_tpl.format(e)
""" Train """
print("\n")
train_loss = train(e, model, optimizer, train_iter, vocab, C.decoder.rnn_teacher_forcing_ratio,
C.reg_lambda, C.recon_lambda, C.gradient_clip)
log_train(C, summary_writer, e, train_loss, get_lr(optimizer))
""" Validation """
val_loss = test(model, val_iter, vocab, C.reg_lambda, C.recon_lambda)
val_scores = evaluate(val_iter, model, model.vocab)
log_val(C, summary_writer, e, val_loss, val_scores)
if e >= C.save_from and e % C.save_every == 0:
print("Saving checkpoint at epoch={} to {}".format(e, ckpt_fpath))
save_checkpoint(e, model, ckpt_fpath, C)
if e >= C.lr_decay_start_from:
lr_scheduler.step(val_loss['total'])
if e == 1 or val_scores['CIDEr'] > best_val_scores['CIDEr']:
best_epoch = e
best_val_scores = val_scores
best_ckpt_fpath = ckpt_fpath
""" Test with Best Model """
print("\n\n\n[BEST]")
best_model = load_checkpoint(model, best_ckpt_fpath)
test_scores = evaluate(test_iter, best_model, best_model.vocab)
log_test(C, summary_writer, best_epoch, test_scores)
save_checkpoint(best_epoch, best_model, C.ckpt_fpath_tpl.format("best"), C)