本文整理汇总了Python中mxnet.metric.Accuracy方法的典型用法代码示例。如果您正苦于以下问题:Python metric.Accuracy方法的具体用法?Python metric.Accuracy怎么用?Python metric.Accuracy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.metric
的用法示例。
在下文中一共展示了metric.Accuracy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_metrics
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def create_metrics():
"""
Create metrics
:return: metrics
"""
metrics = {'Train-Xent-Src': Loss(),
'Train-Xent-Tgt-l': Loss(),
'Train-Xent-Tgt-Ul': Loss(),
'Train-Aux-Src': Loss(),
'Train-Aux-Tgt-l': Loss(),
'Train-Aux-Tgt-Ul': Loss(),
'Train-Cons-Src': Loss(),
'Train-Cons-Tgt-l': Loss(),
'Train-Cons-Tgt-Ul': Loss(),
'Train-Total-Src': Loss(),
'Train-Total-Tgt-l': Loss(),
'Train-Total-Tgt-Ul': Loss(),
'Train-Acc-Src': Accuracy(),
'Train-Acc-Tgt-l': Accuracy(),
'Train-Acc-Tgt-Ul': Accuracy()}
return metrics
示例2: gluon_random_data_run
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def gluon_random_data_run():
mlflow.gluon.autolog()
with mlflow.start_run() as run:
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
validation = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3, val_data=validation)
client = mlflow.tracking.MlflowClient()
return client.get_run(run.info.run_id)
示例3: test_autolog_ends_auto_created_run
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def test_autolog_ends_auto_created_run():
mlflow.gluon.autolog()
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
assert mlflow.active_run() is None
示例4: test_autolog_persists_manually_created_run
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def test_autolog_persists_manually_created_run():
mlflow.gluon.autolog()
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
with mlflow.start_run() as run:
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
assert mlflow.active_run().info.run_id == run.info.run_id
示例5: gluon_model
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def gluon_model(model_data):
train_data, train_label, _ = model_data
train_data_loader = DataLoader(list(zip(train_data, train_label)),
batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(128, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(train_data_loader, epochs=3)
return model
示例6: save_checkpoint
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def save_checkpoint(epoch, top1, best_acc):
if opt.save_frequency and (epoch + 1) % opt.save_frequency == 0:
fname = os.path.join(opt.prefix, '%s_%d_acc_%.4f.params' % (opt.model, epoch, top1))
net.save_parameters(fname)
logger.info('[Epoch %d] Saving checkpoint to %s with Accuracy: %.4f', epoch, fname, top1)
if top1 > best_acc[0]:
best_acc[0] = top1
fname = os.path.join(opt.prefix, '%s_best.params' % (opt.model))
net.save_parameters(fname)
logger.info('[Epoch %d] Saving checkpoint to %s with Accuracy: %.4f', epoch, fname, top1)
示例7: eval_acc
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def eval_acc(inference, val_loader, ctx, return_meta=False):
mtc_acc = Accuracy()
mtc_acc.reset()
feature_nest, y_nest, y_hat_nest = [], [], []
for X, y in val_loader:
X = X.as_in_context(ctx[0])
y = y.as_in_context(ctx[0])
with autograd.record(train_mode=False):
y_hat, features = inference(X)
# update metric
mtc_acc.update([y], [y_hat])
if return_meta:
y_nest.extend(y.asnumpy())
feature_nest.extend(features.asnumpy())
y_hat_nest.extend(y_hat.asnumpy())
feature_nest = np.array(feature_nest)
y_nest = np.array(y_nest)
y_hat_nest = np.array(y_hat_nest)
if return_meta:
return mtc_acc.get()[1], y_nest, y_hat_nest, feature_nest
return mtc_acc.get()[1]
示例8: create_metrics
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def create_metrics():
"""
Create metrics
:return: metrics
"""
metrics = {'Train-Xent-Src': Loss(),
'Train-Xent-Tgt': Loss(),
'Train-Acc-Src': Accuracy(),
'Train-Acc-Tgt': Accuracy(),
'Train-Aux-Src': Loss(),
'Train-Aux-Tgt': Loss(),
'Train-Total-Src': Loss(),
'Train-Total-Tgt': Loss()}
return metrics
示例9: validate
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def validate(net, val_data, ctx, loss, plot=False):
metric = mtc.Accuracy()
val_loss = 0
ebs = []
lbs = []
for i, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
ots = [net(X) for X in data]
embedds = [ot[0] for ot in ots]
outputs = [ot[1] for ot in ots]
losses = [loss(yhat, y) for yhat, y in zip(outputs, labels)]
metric.update(labels, outputs)
val_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)
if plot:
for es, ls in zip(embedds, labels):
assert len(es) == len(ls)
for idx in range(len(es)):
ebs.append(es[idx].asnumpy())
lbs.append(ls[idx].asscalar())
if plot:
ebs = np.vstack(ebs)
lbs = np.hstack(lbs)
_, val_acc = metric.get()
return val_acc, val_loss / len(val_data), ebs, lbs
示例10: validate
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def validate(net, val_data, ctx, loss, plot=False):
metric = mtc.Accuracy()
val_loss = 0
ebs = []
lbs = []
for i, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
ots = [net(X) for X in data]
embedds = [ot[0] for ot in ots]
outputs = [ot[1] for ot in ots]
losses = [loss(yhat, y, emb) for yhat, y, emb in zip(outputs, labels, embedds)]
metric.update(labels, outputs)
val_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)
if plot:
for es, ls in zip(embedds, labels):
assert len(es) == len(ls)
for idx in range(len(es)):
ebs.append(es[idx].asnumpy())
lbs.append(ls[idx].asscalar())
if plot:
ebs = np.vstack(ebs)
lbs = np.hstack(lbs)
_, val_acc = metric.get()
return val_acc, val_loss / len(val_data), ebs, lbs
示例11: validate
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def validate(net, val_data, ctx, loss, plot=False):
metric = mtc.Accuracy()
val_loss = 0
ebs = []
lbs = []
for i, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
embedds = [net(X) for X in data]
ots = [loss(emb, y) for emb, y in zip(embedds, labels)]
losses = [ot[0] for ot in ots]
outputs = [ot[1] for ot in ots]
metric.update(labels, outputs)
val_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)
if plot:
for es, ls in zip(embedds, labels):
assert len(es) == len(ls)
for idx in range(len(es)):
ebs.append(es[idx].asnumpy())
lbs.append(ls[idx].asscalar())
if plot:
ebs = np.vstack(ebs)
lbs = np.hstack(lbs)
_, val_acc = metric.get()
return val_acc, val_loss / len(val_data), ebs, lbs
示例12: validate
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def validate(net, val_data, ctx, loss, plot=False):
metric = mtc.Accuracy()
val_loss = 0
ebs = []
lbs = []
for i, batch in enumerate(val_data):
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
labels = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
ots = [net(X) for X in data]
embedds = [ot[0] for ot in ots]
outputs = [ot[1] for ot in ots]
losses = [loss(yhat, y, emb) for yhat, y, emb in zip(outputs, labels, embedds)]
metric.update(labels, outputs)
val_loss += sum([l.mean().asscalar() for l in losses]) / len(losses)
if plot:
for es, ls in zip(embedds, labels):
assert len(es) == len(ls)
for idx in range(len(es)):
ebs.append(es[idx].asnumpy())
lbs.append(ls[idx].asscalar())
if plot:
ebs = np.vstack(ebs)
lbs = np.hstack(lbs)
_, val_acc = metric.get()
return val_acc, val_loss / len(val_data), ebs, lbs
示例13: __init__
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def __init__(self):
is_pair = False
class_labels = ['0', '1']
self.metric = Accuracy()
super(ToySSTTask, self).__init__(class_labels, self.metric, is_pair)
示例14: save_checkpoint
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def save_checkpoint(epoch, top1, best_acc):
if opt.save_frequency and (epoch + 1) % opt.save_frequency == 0:
fname = os.path.join(opt.prefix, '%s_%d_acc_%.4f.params' % (opt.model, epoch, top1))
net.save_params(fname)
logger.info('[Epoch %d] Saving checkpoint to %s with Accuracy: %.4f', epoch, fname, top1)
if top1 > best_acc[0]:
best_acc[0] = top1
fname = os.path.join(opt.prefix, '%s_best.params' % (opt.model))
net.save_params(fname)
logger.info('[Epoch %d] Saving checkpoint to %s with Accuracy: %.4f', epoch, fname, top1)
示例15: eval
# 需要导入模块: from mxnet import metric [as 别名]
# 或者: from mxnet.metric import Accuracy [as 别名]
def eval(self, inference, val_loader, log=True, target=True, epoch=True):
"""
Evaluate the model
:param inference: network
:param val_loader: data loader
:param log: log flag
:param target: target flag for updating the record and log
:param epoch: epoch flag for updating the record and log
:return:
"""
mtc_acc = Accuracy()
mtc_acc.reset()
# val_loader.reset()
feature_nest, y_nest, y_hat_nest = [], [], []
for X, Y in val_loader:
X_lst = split_and_load(X, self.args.ctx, even_split=False)
Y_lst = split_and_load(Y, self.args.ctx, even_split=False)
for x, y in zip(X_lst, Y_lst):
y_hat, features = inference(x)
# update metric
mtc_acc.update([y], [y_hat])
y_nest.extend(y.asnumpy())
feature_nest.extend(features.asnumpy())
y_hat_nest.extend(y_hat.asnumpy())
feature_nest = np.array(feature_nest)
y_nest = np.array(y_nest).astype(int)
y_hat_nest = np.array(y_hat_nest)
if log:
target_key = 'Tgt' if target else 'Src'
epoch_key = 'Epoch' if epoch else 'Iter'
record = self.cur_epoch if epoch else self.cur_iter
if mtc_acc.get()[1] > self.records[epoch_key]['%s-Acc' % target_key]:
if target:
self.records[epoch_key][epoch_key] = record
self.records[epoch_key]['%s-Acc' % target_key] = mtc_acc.get()[1]
self.records[epoch_key]['%s-label' % target_key] = y_nest
self.records[epoch_key]['%s-preds' % target_key] = y_hat_nest
self.records[epoch_key]['%s-features' % target_key] = feature_nest
self.save_params(inference, 0, epoch_key)
self.logger.update_scalar('%s [%d]: Eval-Acc-%s' % (epoch_key, record, target_key), mtc_acc.get()[1])
if self.sw:
self.sw.add_scalar('Acc/Eval-%s-Acc-%s' % (epoch, target_key), mtc_acc.get()[1], global_step=record)
return mtc_acc.get()[1], y_nest, y_hat_nest, feature_nest