本文整理汇总了Python中mxnet.nd.one_hot方法的典型用法代码示例。如果您正苦于以下问题:Python nd.one_hot方法的具体用法?Python nd.one_hot怎么用?Python nd.one_hot使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类mxnet.nd
的用法示例。
在下文中一共展示了nd.one_hot方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: hybrid_forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import one_hot [as 别名]
def hybrid_forward(self, F, images, num_classes, labels, X_l2norm,
lambda_value = 0.5, sample_weight=None):
self.num_classes = num_classes
labels_onehot = nd.one_hot(labels, num_classes)
first_term_base = F.square(nd.maximum(0.9-X_l2norm,0))
second_term_base = F.square(nd.maximum(X_l2norm -0.1, 0))
# import pdb; pdb.set_trace()
margin_loss = labels_onehot * first_term_base + lambda_value * (1-labels_onehot) * second_term_base
margin_loss = margin_loss.sum(axis=1)
loss = F.mean(margin_loss, axis=self._batch_axis, exclude=True)
loss = _apply_weighting(F, loss, self._weight/2, sample_weight)
return F.mean(loss, axis=self._batch_axis, exclude=True)
示例2: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import one_hot [as 别名]
def forward(self, inputs, state):
X = nd.one_hot(inputs.T, self.vocab_size)
Y, state = self.rnn(X, state)
output = self.dense(Y.reshape((-1, Y.shape[-1])))
return output, state
示例3: to_onehot
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import one_hot [as 别名]
def to_onehot(X, size):
"""Represent inputs with one-hot encoding."""
return [nd.one_hot(x, size) for x in X.T]
示例4: train
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import one_hot [as 别名]
def train(train_data, test_data, net, loss, trainer, ctx, num_epochs, print_batches=100):
"""Train a network"""
for epoch in range(num_epochs):
train_loss = 0.
train_acc = 0.
n = 0
for i, (data, label) in tqdm(enumerate(train_data), total=len(train_data), ncols=70, leave=False, unit='b'):
# for i, batch in enumerate(train_data):
# data, label = batch
one_hot_label = nd.one_hot(label,10)
label = label.as_in_context(ctx)
one_hot_label = one_hot_label.as_in_context(ctx)
data = data.as_in_context(ctx)
with autograd.record():
output = net(data)
L = loss(output, one_hot_label)
L.backward()
trainer.step(data.shape[0])
train_loss += nd.mean(L).asscalar()
# print('nd.mean(L).asscalar()',nd.mean(L).asscalar())
train_acc += accuracy(output, label)
n = i + 1
if print_batches and n % print_batches == 0:
print('output',output)
print("Batch %d. Loss: %f, Train acc %f" % (
n, train_loss/n, train_acc/n
))
# print('train_loss',train_loss)
test_acc = evaluate_accuracy(test_data, net, ctx)
print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
epoch, train_loss/n, train_acc/n, test_acc
))
示例5: forward
# 需要导入模块: from mxnet import nd [as 别名]
# 或者: from mxnet.nd import one_hot [as 别名]
def forward(self,labels,y_pred):
labels_onehot = labels #nd.one_hot(labels, self.num_classes)
first_term_base = nd.square(nd.maximum(0.9-y_pred,0))
second_term_base = nd.square(nd.maximum(y_pred -0.1, 0))
# import pdb; pdb.set_trace()
margin_loss = labels_onehot * first_term_base + self.lambda_value * (1-labels_onehot) * second_term_base
margin_loss = margin_loss.sum(axis=1)
loss = nd.mean(margin_loss, axis=self._batch_axis, exclude=True)
loss = _apply_weighting(nd, loss, self._weight/2, self.sample_weight)
return nd.mean(loss, axis=self._batch_axis, exclude=True)