本文整理匯總了Python中mxnet.nd.mean方法的典型用法代碼示例。如果您正苦於以下問題:Python nd.mean方法的具體用法?Python nd.mean怎麽用?Python nd.mean使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.nd
的用法示例。
在下文中一共展示了nd.mean方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: evaluate_accuracy_multi
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def evaluate_accuracy_multi(data_iterator, net, ctx):
data_iterator.reset()
acc = 0
dummy_label = np.zeros((0,6))
dummy_pred = np.zeros((0,6))
t1 = time.time()
for i, batch in enumerate(data_iterator):
data, label = _get_batch_multi(batch, ctx, False)
# acc += np.mean([accuracy(net(X), Y) for X, Y in zip(data, label)])
# acc += np.mean([roc_auc_score(Y.asnumpy(), net(X).asnumpy()) for X, Y in zip(data, label)])
output = np.vstack((net(X).asnumpy() for X in data))
labels = np.vstack((Y.asnumpy() for Y in label))
dummy_label = np.vstack((dummy_label, labels))
dummy_pred = np.vstack((dummy_pred, output))
# return acc / (i+1)
# print dummy_label.shape, dummy_pred.shape
dummy_pred_label = dummy_pred > 0.5
for i in range(dummy_label.shape[1]):
print i, confusion_matrix(dummy_label[:,i], dummy_pred_label[:,i])
return roc_auc_score(dummy_label, dummy_pred), accuracy(dummy_pred, dummy_label), time.time() - t1
示例2: train
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def train(net,epochs, ctx, train_data,test_data,
margin_loss, reconstructions_loss,
batch_size,scale_factor):
num_classes = 10
trainer = gluon.Trainer(
net.collect_params(),'sgd', {'learning_rate': 0.05, 'wd': 5e-4})
for epoch in range(epochs):
train_loss = 0.0
for batch_idx, (data, label) in tqdm(enumerate(train_data), total=len(train_data), ncols=70, leave=False, unit='b'):
label = label.as_in_context(ctx)
data = data.as_in_context(ctx)
with autograd.record():
prob, X_l2norm, reconstructions = net(data, label)
loss1 = margin_loss(data, num_classes, label, X_l2norm)
loss2 = reconstructions_loss(reconstructions, data)
loss = loss1 + scale_factor * loss2
loss.backward()
trainer.step(batch_size)
train_loss += nd.mean(loss).asscalar()
test_acc = test(test_data, net, ctx)
print('Epoch:{}, TrainLoss:{:.5f}, TestAcc:{}'.format(epoch,train_loss / len(train_data),test_acc))
示例3: test_compute_quantile_loss
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def test_compute_quantile_loss() -> None:
y_true = nd.ones(shape=(10, 10, 10))
y_pred = nd.zeros(shape=(10, 10, 10, 2))
quantiles = [0.5, 0.9]
loss = QuantileLoss(quantiles)
correct_qt_loss = [1.0, 1.8]
for idx, q in enumerate(quantiles):
assert (
nd.mean(
loss.compute_quantile_loss(
nd.ndarray, y_true, y_pred[:, :, :, idx], q
)
)
- correct_qt_loss[idx]
< 1e-5
), f"computing quantile loss at quantile {q} fails!"
示例4: _evaluate_accuracy
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def _evaluate_accuracy(self, X, Y, batch_size=64):
data_loader = self.generate_batch(X, Y, batch_size, shuffled=False)
softmax_loss = gluon.loss.SoftmaxCrossEntropyLoss()
num_batches = len(X) // batch_size
metric = mx.metric.Accuracy()
loss_avg = 0.
for i, (data, label) in enumerate(data_loader):
data = data.as_in_context(self.model_ctx)
label = label.as_in_context(self.model_ctx)
output = self.model(data)
predictions = nd.argmax(output, axis=1)
loss = softmax_loss(output, label)
metric.update(preds=predictions, labels=label)
loss_avg = loss_avg * i / (i + 1) + nd.mean(loss).asscalar() / (i + 1)
if i + 1 == num_batches:
break
return metric.get()[1], loss_avg
示例5: facc
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def facc(label, pred):
""" evaluate accuracy """
pred = pred.ravel()
label = label.ravel()
return ((pred > 0.5) == label).mean()
# setting
示例6: evaluate
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def evaluate(data):
acc_test = mx.metric.Accuracy()
test_loss = 0.0
cnt = 0
for epoch_percent, batch_slots in batch_iter(data,batch_size,shuffle=False):
batch_sequence, batch_label = zip(*batch_slots)
batch_sequence = nd.array(batch_sequence,ctx)
batch_label = nd.array(batch_label,ctx)
output = net(batch_sequence)
loss = SCE(output,batch_label)
acc_test.update(preds=[output],labels=[batch_label])
test_loss += nd.mean(loss).asscalar()
cnt = cnt+1
return acc_test.get()[1],test_loss/cnt
示例7: accuracy
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def accuracy(output, label):
return nd.mean(output.argmax(axis=1) == label).asscalar()
示例8: accuracy
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def accuracy(output, label):
L = -label*np.log2(output) - (1-label) * np.log2(1-output)
return np.mean(L)
示例9: train
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def train(train_data, test_data, net, loss, trainer,
ctx, num_epochs, print_batches=None):
"""Train a network"""
min_loss = 100000
for epoch in range(num_epochs):
train_loss = 0.
train_acc = 0.
n = 0
for i, batch in enumerate(train_data):
data, label = _get_batch(batch, ctx)
with autograd.record():
output = net(data)
L = loss(output, label)
L.backward()
trainer.step(data.shape[0], ignore_stale_grad=True)
train_loss += nd.mean(L).asscalar()
train_acc += accuracy(output, label)
n = i + 1
if print_batches and n % print_batches == 0:
test_acc = evaluate_accuracy(test_data, net, ctx)
test_data.reset()
print("Batch %d. Loss: %f, Train acc %f, Test Loss %f" % (
n, train_loss/n, train_acc/n, test_acc))
if test_acc < min_loss:
min_loss = test_acc
net.save_params('net.params')
test_acc = evaluate_accuracy(test_data, net, ctx)
train_data.reset()
test_data.reset()
print("Epoch %d. Loss: %f, Train acc %f, Test Loss %f" % (
epoch, train_loss/n, train_acc/n, test_acc))
if test_acc < min_loss:
min_loss = test_acc
net.save_params('net.params')
示例10: train_multi
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def train_multi(train_data, test_data, iteration, net, loss, trainer,
ctx, num_epochs, print_batches=None, pos_tr_ratio=None):
"""Train a network"""
min_loss = 0
for epoch in range(num_epochs):
train_loss = 0.
train_acc = 0.
n = 0
for i, batch in enumerate(train_data):
data, label = _get_batch_multi(batch, ctx)
with autograd.record():
losses = [loss(net(X), Y, pos_tr_ratio) for X, Y in zip(data, label)]
for l in losses:
l.backward()
trainer.step(batch.data[0].shape[0], ignore_stale_grad=True)
train_loss += np.mean([nd.mean(l).asscalar() for l in losses])
# train_acc += accuracy(output, label)
n = i + 1
if print_batches and n % print_batches == 0:
test_acc, test_loss, eval_time = evaluate_accuracy_multi(test_data, net, ctx)
print("Batch %d. Loss: %f, Test roc_auc: %f, test_loss: %f , eval time: %f" % (
n, train_loss/n, test_acc, test_loss, eval_time))
if test_acc > min_loss:
min_loss = test_acc
net.save_params('net'+str(iteration)+'.params')
train_data.reset()
test_acc, test_loss, eval_time = evaluate_accuracy_multi(test_data, net, ctx)
print("Epoch %d. Loss: %f, roc_auc: %f, test_loss: %f , eval time: %f" % (
epoch, train_loss/n, test_acc, test_loss, eval_time))
if test_acc > min_loss:
min_loss = test_acc
net.save_params('net'+str(iteration)+'.params')
示例11: CapLoss
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def CapLoss(y_pred, y_true):
L = y_true * nd.square(nd.maximum(0., 0.9 - y_pred)) + \
0.5 * (1 - y_true) * nd.square(nd.maximum(0., y_pred - 0.1))
return nd.mean(nd.sum(L, 1))
示例12: EntropyLoss1
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def EntropyLoss1(y_pred, y_true, train_pos_ratio):
scale = 10
train_pos_ratio = array(train_pos_ratio, ctx=y_pred.context, dtype=np.float32) * scale
train_neg_ratio = (scale - train_pos_ratio)
L = - y_true*nd.log2(y_pred) * train_neg_ratio - (1-y_true) * nd.log2(1-y_pred)*train_pos_ratio
return nd.mean(L)
示例13: EntropyLoss
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def EntropyLoss(y_pred, y_true):
L = - y_true*nd.log2(y_pred) - (1-y_true) * nd.log2(1-y_pred)
return nd.mean(L)
示例14: EntropyLoss1
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def EntropyLoss1(y_pred, y_true):
train_pos_ratio = array([ 0.09584448, 0.00999555, 0.05294822, 0.00299553, 0.04936361, 0.00880486], ctx=y_pred.context, dtype=np.float32)*10
train_neg_ratio = (1.0-train_pos_ratio)*10
L = - y_true*nd.log2(y_pred) * train_neg_ratio - (1-y_true) * nd.log2(1-y_pred) * train_pos_ratio
return nd.mean(L)
示例15: meta_knowledge
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import mean [as 別名]
def meta_knowledge(self, feature):
return self.geo_encoder(nd.mean(feature, axis=0))