本文整理匯總了Python中mxnet.nd.SoftmaxActivation方法的典型用法代碼示例。如果您正苦於以下問題:Python nd.SoftmaxActivation方法的具體用法?Python nd.SoftmaxActivation怎麽用?Python nd.SoftmaxActivation使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.nd
的用法示例。
在下文中一共展示了nd.SoftmaxActivation方法的1個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: default_train_fn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import SoftmaxActivation [as 別名]
def default_train_fn(epoch, num_epochs, net, batch, batch_size, criterion, trainer, batch_fn, ctx,
mixup=False, label_smoothing=False, distillation=False,
mixup_alpha=0.2, mixup_off_epoch=0, classes=1000,
dtype='float32', metric=None, teacher_prob=None):
data, label = batch_fn(batch, ctx)
if mixup:
lam = np.random.beta(mixup_alpha, mixup_alpha)
if epoch >= num_epochs - mixup_off_epoch:
lam = 1
data = [lam * X + (1 - lam) * X[::-1] for X in data]
if label_smoothing:
eta = 0.1
else:
eta = 0.0
label = mixup_transform(label, classes, lam, eta)
elif label_smoothing:
hard_label = label
label = smooth(label, classes)
with mx.autograd.record():
outputs = [net(X.astype(dtype, copy=False)) for X in data]
if distillation:
loss = [
criterion(
yhat.astype('float', copy=False),
y.astype('float', copy=False),
p.astype('float', copy=False)
)
for yhat, y, p in zip(outputs, label, teacher_prob(data))
]
else:
loss = [criterion(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
trainer.step(batch_size, ignore_stale_grad=True)
if metric:
if mixup:
output_softmax = [
nd.SoftmaxActivation(out.astype('float32', copy=False))
for out in outputs
]
metric.update(label, output_softmax)
else:
if label_smoothing:
metric.update(hard_label, outputs)
else:
metric.update(label, outputs)
return metric
else:
return