本文整理汇总了Python中mxnet.nd.SoftmaxActivation方法的典型用法代码示例。如果您正苦于以下问题:Python nd.SoftmaxActivation方法的具体用法?Python nd.SoftmaxActivation怎么用?Python nd.SoftmaxActivation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.nd
的用法示例。
在下文中一共展示了nd.SoftmaxActivation方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: default_train_fn
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import SoftmaxActivation [as 别名]
def default_train_fn(epoch, num_epochs, net, batch, batch_size, criterion, trainer, batch_fn, ctx,
mixup=False, label_smoothing=False, distillation=False,
mixup_alpha=0.2, mixup_off_epoch=0, classes=1000,
dtype='float32', metric=None, teacher_prob=None):
data, label = batch_fn(batch, ctx)
if mixup:
lam = np.random.beta(mixup_alpha, mixup_alpha)
if epoch >= num_epochs - mixup_off_epoch:
lam = 1
data = [lam * X + (1 - lam) * X[::-1] for X in data]
if label_smoothing:
eta = 0.1
else:
eta = 0.0
label = mixup_transform(label, classes, lam, eta)
elif label_smoothing:
hard_label = label
label = smooth(label, classes)
with mx.autograd.record():
outputs = [net(X.astype(dtype, copy=False)) for X in data]
if distillation:
loss = [
criterion(
yhat.astype('float', copy=False),
y.astype('float', copy=False),
p.astype('float', copy=False)
)
for yhat, y, p in zip(outputs, label, teacher_prob(data))
]
else:
loss = [criterion(yhat, y.astype(dtype, copy=False)) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
trainer.step(batch_size, ignore_stale_grad=True)
if metric:
if mixup:
output_softmax = [
nd.SoftmaxActivation(out.astype('float32', copy=False))
for out in outputs
]
metric.update(label, output_softmax)
else:
if label_smoothing:
metric.update(hard_label, outputs)
else:
metric.update(label, outputs)
return metric
else:
return