本文整理汇总了Python中mxnet.gluon.loss.SoftmaxCrossEntropyLoss方法的典型用法代码示例。如果您正苦于以下问题:Python loss.SoftmaxCrossEntropyLoss方法的具体用法?Python loss.SoftmaxCrossEntropyLoss怎么用?Python loss.SoftmaxCrossEntropyLoss使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.gluon.loss
的用法示例。
在下文中一共展示了loss.SoftmaxCrossEntropyLoss方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_loss
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def get_loss(loss_name, loss_extra_kwargs):
"""
Get loss by name.
Parameters:
----------
loss_name : str
Loss name.
loss_extra_kwargs : dict
Loss extra parameters.
Returns
-------
Loss
Loss object instance.
"""
if loss_name == "SoftmaxCrossEntropy":
return SoftmaxCrossEntropyLoss(**loss_extra_kwargs)
if loss_name == "SegSoftmaxCrossEntropy":
return SegSoftmaxCrossEntropyLoss(**loss_extra_kwargs)
if loss_name == "MixSoftmaxCrossEntropy":
return MixSoftmaxCrossEntropyLoss(**loss_extra_kwargs)
else:
raise Exception("Wrong loss name: {}".format(loss_name))
示例2: test
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def test(test_net, ctx, test_loader, iteration, logger):
# print("Start testing iter %d." % iteration)
Loss = gloss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
metric_top5 = mx.metric.TopKAccuracy(5)
test_loss = mx.metric.Loss()
for batch in test_loader:
trans = gutils.split_and_load(batch[0], ctx)
labels = gutils.split_and_load(batch[1], ctx)
outputs = [test_net(tran) for tran in trans]
losses = [Loss(output, label) for output, label in zip(outputs, labels)]
test_loss.update(0, losses)
metric.update(labels, outputs)
metric_top5.update(labels, outputs)
_, test_top1_acc = metric.get()
_, test_top5_acc = metric_top5.get()
_, test_loss = test_loss.get()
if test_top1_acc >= 0.7:
test_net.save_parameters('imagenet_param/test_iter%d_%.5f.param' % (iteration, test_top1_acc))
test_str = ("test_Loss: %f, test top1-acc %f, test top5-acc %f." % (test_loss, test_top1_acc, test_top5_acc))
logger.info(test_str)
示例3: test
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def test(test_net, ctx, test_loader, iteration, logger):
Loss = gloss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
test_loss = mx.metric.Loss()
for batch in test_loader:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
outputs = [test_net(X.astype(dtype, copy=False)) for X in data]
losses = [Loss(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs, label)]
test_loss.update(0, losses)
metric.update(label, outputs)
_, test_acc = metric.get()
_, test_loss = test_loss.get()
test_net.save_parameters('cifar_param/test_epoch%d_%.5f.param' % (iteration, test_acc))
test_str = ("Test Loss: %f, Test acc %f." % (test_loss, test_acc))
logger.info(test_str)
示例4: gluon_random_data_run
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def gluon_random_data_run():
mlflow.gluon.autolog()
with mlflow.start_run() as run:
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
validation = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3, val_data=validation)
client = mlflow.tracking.MlflowClient()
return client.get_run(run.info.run_id)
示例5: test_autolog_ends_auto_created_run
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def test_autolog_ends_auto_created_run():
mlflow.gluon.autolog()
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
assert mlflow.active_run() is None
示例6: test_autolog_persists_manually_created_run
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def test_autolog_persists_manually_created_run():
mlflow.gluon.autolog()
data = DataLoader(LogsDataset(), batch_size=128, last_batch="discard")
with mlflow.start_run() as run:
model = HybridSequential()
model.add(Dense(64, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(data, epochs=3)
assert mlflow.active_run().info.run_id == run.info.run_id
示例7: gluon_model
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def gluon_model(model_data):
train_data, train_label, _ = model_data
train_data_loader = DataLoader(list(zip(train_data, train_label)),
batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(128, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(model.collect_params(), "adam",
optimizer_params={"learning_rate": .001, "epsilon": 1e-07})
est = estimator.Estimator(net=model, loss=SoftmaxCrossEntropyLoss(),
metrics=Accuracy(), trainer=trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(train_data_loader, epochs=3)
return model
示例8: train_ch5
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx,
num_epochs):
"""Train and evaluate a model with CPU or GPU."""
print('training on', ctx)
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
for X, y in train_iter:
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y).sum()
l.backward()
trainer.step(batch_size)
y = y.astype('float32')
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
test_acc = evaluate_accuracy(test_iter, net, ctx)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
'time %.1f sec'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc,
time.time() - start))
示例9: train_batch
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def train_batch(self, Xs_lst, Ys_lst, Xt_lst, Yt_lst, inference, target=True):
criterion_xent = SoftmaxCrossEntropyLoss()
postfix = 'Tgt' if target else 'Src'
with autograd.record():
loss = []
for xs, ys, xt, yt in zip(Xs_lst, Ys_lst, Xt_lst, Yt_lst):
criterion_aux = dSNELoss(xs.shape[0], xt.shape[0], self.args.embed_size, self.args.margin,
self.args.fn)
ys_hat, fts = inference(xs)
yt_hat, ftt = inference(xt)
loss_xent_src = criterion_xent(ys_hat, ys)
loss_aux = criterion_aux(fts, ys, ftt, yt)
loss_total = (1 - self.args.alpha) * loss_xent_src + self.args.alpha * loss_aux
loss.append(loss_total)
self.metrics['Train-Xent-%s' % postfix].update(None, [loss_xent_src])
self.metrics['Train-Acc-Src'].update([ys], [ys_hat])
self.metrics['Train-Acc-Tgt'].update([yt], [yt_hat])
self.metrics['Train-Aux-%s' % postfix].update(None, [loss_aux])
self.metrics['Train-Total-%s' % postfix].update(None, [loss_total])
self.cur_iter += 1
for l in loss:
l.backward()
示例10: train_batch
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def train_batch(self, x0, y0, x1, y1, xu0, xu1, student, teacher=None, target=False):
criterion_xent = SoftmaxCrossEntropyLoss()
criterion_consistency = SoftmaxL2Loss()
postfix = 'Tgt-l' if target else 'Src'
with autograd.record():
y0_hat, fss = student(x0)
# cross entropy
loss_xent = criterion_xent(y0_hat, y0)
self.metrics['Train-Xent-%s' % postfix].update(None, [loss_xent])
self.metrics['Train-Acc-%s' % postfix].update([y0], [y0_hat])
if teacher is not None:
ysu_hat, fsu = student(xu0)
ytu_hat, ftu = teacher(xu1)
loss_consistency = criterion_consistency(ytu_hat, ysu_hat)
self.metrics['Train-Cons-%s' % postfix].update(None, [loss_consistency])
else:
loss_consistency = 0
# weighted loss
consistency_weight = self.update_beta()
loss = loss_xent + consistency_weight * loss_consistency
self.metrics['Train-Total-%s' % postfix].update(None, [loss])
self.cur_iter += 1
loss.backward()
示例11: train_and_predict_rnn_gluon
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def train_and_predict_rnn_gluon(model, num_hiddens, vocab_size, ctx,
corpus_indices, idx_to_char, char_to_idx,
num_epochs, num_steps, lr, clipping_theta,
batch_size, pred_period, pred_len, prefixes):
"""Train an Gluon RNN model and predict the next item in the sequence."""
loss = gloss.SoftmaxCrossEntropyLoss()
model.initialize(ctx=ctx, force_reinit=True, init=init.Normal(0.01))
trainer = gluon.Trainer(model.collect_params(), 'sgd',
{'learning_rate': lr, 'momentum': 0, 'wd': 0})
for epoch in range(num_epochs):
l_sum, n, start = 0.0, 0, time.time()
data_iter = data_iter_consecutive(
corpus_indices, batch_size, num_steps, ctx)
state = model.begin_state(batch_size=batch_size, ctx=ctx)
for X, Y in data_iter:
for s in state:
s.detach()
with autograd.record():
(output, state) = model(X, state)
y = Y.T.reshape((-1,))
l = loss(output, y).mean()
l.backward()
params = [p.data() for p in model.collect_params().values()]
grad_clipping(params, clipping_theta, ctx)
trainer.step(1)
l_sum += l.asscalar() * y.size
n += y.size
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(l_sum / n), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn_gluon(
prefix, pred_len, model, vocab_size, ctx, idx_to_char,
char_to_idx))
示例12: train
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def train(train_net, iterations, trainer, ctx, lr_period: tuple, lr_decay, train_loader, test_loader, cat_interval):
# set up logger
logging.basicConfig()
logger = logging.getLogger()
logger.setLevel(logging.INFO)
log_file_path = 'Attention56_train.log'
fh = logging.FileHandler(log_file_path)
logger.addHandler(fh)
net.collect_params().reset_ctx(ctx)
train_gen = inf_train_gen(train_loader)
Loss = gloss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
metric_top5 = mx.metric.TopKAccuracy(5)
train_loss = mx.metric.Loss()
prev_time = datetime.datetime.now()
metric.reset()
train_loss.reset()
for iteration in range(int(iterations)):
batch = next(train_gen)
trans = gutils.split_and_load(batch.data[0], ctx)
labels = gutils.split_and_load(batch.label[0], ctx)
with autograd.record():
outputs = [train_net(tran) for tran in trans]
losses = [Loss(output, label) for output, label in zip(outputs, labels)]
for loss in losses:
loss.backward()
trainer.step(batch_size)
train_loss.update(0, losses)
metric.update(labels, outputs)
metric_top5.update(labels, outputs)
if iteration % cat_interval == cat_interval - 1:
cur_time = datetime.datetime.now()
time_str = format_time(prev_time, cur_time)
_, top1_acc = metric.get()
_, top5_acc = metric_top5.get()
_, epoch_loss = train_loss.get()
metric.reset()
metric_top5.reset()
train_loss.reset()
epoch_str = ("Iter %d. Loss: %.5f, Train top1-acc %f, Train top5-acc %f."
% (iteration, epoch_loss, top1_acc, top5_acc))
prev_time = cur_time
logger.info(epoch_str + time_str + 'lr ' + str(trainer.learning_rate))
test(train_net, ctx, test_loader, iteration, logger)
if iteration in lr_period:
trainer.set_learning_rate(trainer.learning_rate * lr_decay)
示例13: train
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def train(train_net, epochs, lr, wd, ctx, warmup_epochs, train_loader, test_loader, use_mixup, logger):
num_batches = train_samples // batch_size
lr_scheduler = LRSequential([
LRScheduler('linear', base_lr=0, target_lr=lr,
nepochs=warmup_epochs, iters_per_epoch=num_batches),
LRScheduler('cosine', base_lr=lr, target_lr=0,
nepochs=epochs - warmup_epochs,
iters_per_epoch=num_batches)
])
opt_params = {'learning_rate': lr, 'momentum': 0.9, 'wd': wd, 'lr_scheduler': lr_scheduler}
if dtype != 'float32':
opt_params['multi_precision'] = True
trainer = gluon.Trainer(train_net.collect_params(), 'nag', opt_params)
Loss = gluon.loss.SoftmaxCrossEntropyLoss(sparse_label=False)
metric = mx.metric.RMSE()
train_loss = mx.metric.Loss()
alpha = 1
classes = 10
print("Start training with mixup.")
for epoch in range(epochs):
metric.reset()
train_loss.reset()
st_time = time.time()
for i, batch in enumerate(train_loader):
lam = np.random.beta(alpha, alpha)
if epoch >= (epochs - 20) or not use_mixup:
lam = 1
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0)
trans = [lam * X + (1 - lam) * X[::-1] for X in data]
labels = []
for Y in label:
y1 = label_transform(Y, classes)
y2 = label_transform(Y[::-1], classes)
labels.append(lam * y1 + (1 - lam) * y2)
with autograd.record():
outputs = [train_net(X.astype(dtype, copy=False)) for X in trans]
losses = [Loss(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs, labels)]
for l in losses:
l.backward()
trainer.step(batch_size)
train_loss.update(0, losses)
metric.update(labels, outputs)
cur_time = time.time() - st_time
eps_samples = int(train_samples // cur_time)
_, train_acc = metric.get()
_, epoch_loss = train_loss.get()
epoch_str = ("Epoch %d. Loss: %.5f, Train RMSE %.5f. %d samples/s. lr %.5f"
% (epoch, epoch_loss, train_acc, eps_samples, trainer.learning_rate))
logger.info(epoch_str)
test(train_net, ctx, test_loader, epoch, logger)
示例14: train_and_predict_rnn
# 需要导入模块: from mxnet.gluon import loss [as 别名]
# 或者: from mxnet.gluon.loss import SoftmaxCrossEntropyLoss [as 别名]
def train_and_predict_rnn(rnn, get_params, init_rnn_state, num_hiddens,
vocab_size, ctx, corpus_indices, idx_to_char,
char_to_idx, is_random_iter, num_epochs, num_steps,
lr, clipping_theta, batch_size, pred_period,
pred_len, prefixes):
"""Train an RNN model and predict the next item in the sequence."""
if is_random_iter:
data_iter_fn = data_iter_random
else:
data_iter_fn = data_iter_consecutive
params = get_params()
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
if not is_random_iter:
state = init_rnn_state(batch_size, num_hiddens, ctx)
l_sum, n, start = 0.0, 0, time.time()
data_iter = data_iter_fn(corpus_indices, batch_size, num_steps, ctx)
for X, Y in data_iter:
if is_random_iter:
state = init_rnn_state(batch_size, num_hiddens, ctx)
else:
for s in state:
s.detach()
with autograd.record():
inputs = to_onehot(X, vocab_size)
(outputs, state) = rnn(inputs, state, params)
outputs = nd.concat(*outputs, dim=0)
y = Y.T.reshape((-1,))
l = loss(outputs, y).mean()
l.backward()
grad_clipping(params, clipping_theta, ctx)
sgd(params, lr, 1)
l_sum += l.asscalar() * y.size
n += y.size
if (epoch + 1) % pred_period == 0:
print('epoch %d, perplexity %f, time %.2f sec' % (
epoch + 1, math.exp(l_sum / n), time.time() - start))
for prefix in prefixes:
print(' -', predict_rnn(
prefix, pred_len, rnn, params, init_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx))