本文整理匯總了Python中mxnet.nd.argmax方法的典型用法代碼示例。如果您正苦於以下問題:Python nd.argmax方法的具體用法?Python nd.argmax怎麽用?Python nd.argmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類mxnet.nd
的用法示例。
在下文中一共展示了nd.argmax方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: evaluate_accuracy
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def evaluate_accuracy(data_iterator, net):
acc = mx.metric.Accuracy()
# Iterate through data and label
for i, (data, label) in enumerate(data_iterator):
# Get the data and label into the GPU
data = data.as_in_context(ctx[0])
label = label.as_in_context(ctx[0])
# Get network's output which is a probability distribution
# Apply argmax on the probability distribution to get network's classification.
output = net(data)
predictions = nd.argmax(output, axis=1)
# Give network's prediction and the correct label to update the metric
acc.update(preds=predictions, labels=label)
# Return the accuracy
return acc.get()[1]
# We'll use cross entropy loss since we are doing multiclass classification
示例2: eval_net
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def eval_net(args, net, dataloader, criterion):
total = 0
total_loss = 0
total_correct = 0
for data in dataloader:
graphs, labels = data
labels = labels.as_in_context(args.device)
feat = graphs.ndata['attr'].astype('float32').as_in_context(args.device)
total += len(labels)
outputs = net(graphs, feat)
predicted = nd.argmax(outputs, axis=1)
total_correct += (predicted == labels).sum().asscalar()
loss = criterion(outputs, labels)
# crossentropy(reduce=True) for default
total_loss += loss.sum().asscalar()
loss, acc = 1.0 * total_loss / total, 1.0*total_correct / total
return loss, acc
示例3: get_max_pred
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def get_max_pred(batch_heatmaps):
batch_size = batch_heatmaps.shape[0]
num_joints = batch_heatmaps.shape[1]
width = batch_heatmaps.shape[3]
heatmaps_reshaped = batch_heatmaps.reshape((batch_size, num_joints, -1))
idx = nd.argmax(heatmaps_reshaped, 2)
maxvals = nd.max(heatmaps_reshaped, 2)
maxvals = maxvals.reshape((batch_size, num_joints, 1))
idx = idx.reshape((batch_size, num_joints, 1))
preds = nd.tile(idx, (1, 1, 2)).astype(np.float32)
preds[:, :, 0] = (preds[:, :, 0]) % width
preds[:, :, 1] = nd.floor((preds[:, :, 1]) / width)
pred_mask = nd.tile(nd.greater(maxvals, 0.0), (1, 1, 2))
pred_mask = pred_mask.astype(np.float32)
preds *= pred_mask
return preds, maxvals
示例4: hybrid_forward
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def hybrid_forward(self, F, X, y=None):
# import pdb; pdb.set_trace()
X = self.net[0](X) # Conv1
X = self.net[1](X) # Primary Capsule
X = self.net[2](X) # Digital Capsule
# import pdb ; pdb.set_trace()
X = X.reshape((X.shape[0],X.shape[2], X.shape[4]))
# get length of vector for margin loss calculation
X_l2norm = nd.sqrt((X**2).sum(axis=-1))
# import pdb ; pdb.set_trace()
prob = nd.softmax(X_l2norm, axis=-1)
if y is not None:
max_len_indices = y
else:
max_len_indices = nd.argmax(prob,axis=-1)
y_tile = nd.tile(y.expand_dims(axis=1), reps=(1, X.shape[-1]))
batch_activated_capsules = nd.pick(X, y_tile, axis=1, keepdims=True)
reconstrcutions = self.net[3](batch_activated_capsules)
return prob, X_l2norm, reconstrcutions
示例5: train
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def train(train_iter, test_iter, net, loss, trainer, ctx, num_epochs):
"""Train and evaluate a model."""
print('training on', ctx)
if isinstance(ctx, mx.Context):
ctx = [ctx]
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, m, start = 0.0, 0.0, 0, 0, time.time()
for i, batch in enumerate(train_iter):
Xs, ys, batch_size = _get_batch(batch, ctx)
with autograd.record():
y_hats = [net(X) for X in Xs]
ls = [loss(y_hat, y) for y_hat, y in zip(y_hats, ys)]
for l in ls:
l.backward()
trainer.step(batch_size)
train_l_sum += sum([l.sum().asscalar() for l in ls])
n += sum([l.size for l in ls])
train_acc_sum += sum([(y_hat.argmax(axis=1) == y).sum().asscalar()
for y_hat, y in zip(y_hats, ys)])
m += sum([y.size for y in ys])
test_acc = evaluate_accuracy(test_iter, net, ctx)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
'time %.1f sec'
% (epoch + 1, train_l_sum / n, train_acc_sum / m, test_acc,
time.time() - start))
示例6: train_ch5
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def train_ch5(net, train_iter, test_iter, batch_size, trainer, ctx,
num_epochs):
"""Train and evaluate a model with CPU or GPU."""
print('training on', ctx)
loss = gloss.SoftmaxCrossEntropyLoss()
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n, start = 0.0, 0.0, 0, time.time()
for X, y in train_iter:
X, y = X.as_in_context(ctx), y.as_in_context(ctx)
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y).sum()
l.backward()
trainer.step(batch_size)
y = y.astype('float32')
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
test_acc = evaluate_accuracy(test_iter, net, ctx)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f, '
'time %.1f sec'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc,
time.time() - start))
示例7: train_ch3
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def train_ch3(net, train_iter, test_iter, loss, num_epochs, batch_size,
params=None, lr=None, trainer=None):
"""Train and evaluate a model with CPU."""
for epoch in range(num_epochs):
train_l_sum, train_acc_sum, n = 0.0, 0.0, 0
for X, y in train_iter:
with autograd.record():
y_hat = net(X)
l = loss(y_hat, y).sum()
l.backward()
if trainer is None:
sgd(params, lr, batch_size)
else:
trainer.step(batch_size)
y = y.astype('float32')
train_l_sum += l.asscalar()
train_acc_sum += (y_hat.argmax(axis=1) == y).sum().asscalar()
n += y.size
test_acc = evaluate_accuracy(test_iter, net)
print('epoch %d, loss %.4f, train acc %.3f, test acc %.3f'
% (epoch + 1, train_l_sum / n, train_acc_sum / n, test_acc))
示例8: _evaluate_accuracy
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def _evaluate_accuracy(self, X, Y, batch_size=64):
data_loader = self.generate_batch(X, Y, batch_size, shuffled=False)
softmax_loss = gluon.loss.SoftmaxCrossEntropyLoss()
num_batches = len(X) // batch_size
metric = mx.metric.Accuracy()
loss_avg = 0.
for i, (data, label) in enumerate(data_loader):
data = data.as_in_context(self.model_ctx)
label = label.as_in_context(self.model_ctx)
output = self.model(data)
predictions = nd.argmax(output, axis=1)
loss = softmax_loss(output, label)
metric.update(preds=predictions, labels=label)
loss_avg = loss_avg * i / (i + 1) + nd.mean(loss).asscalar() / (i + 1)
if i + 1 == num_batches:
break
return metric.get()[1], loss_avg
示例9: predict
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def predict(yolo:Yolo,x,threshold=0.5):
"""
return label ,C,location
:param yolo:
:return:
"""
assert len(x)==1,"Only One image for now"
ypre = yolo(x)
label, preds, location = deal_output(ypre, yolo.s, b=yolo.b, c=yolo.class_num)
indexs = []
for i,c in enumerate(preds[0]):
if c > threshold:
indexs.append(i)
class_names = []
C_list =[]
bos_list = []
for index in indexs:
label_index = int(index / 2)
location_offect = int(index % 2)
class_index = nd.argmax(label[0][label_index], axis=0)
C = preds[0][index]
locat = location[0][label_index][location_offect]
C_list.append(C.asscalar())
#######traslate the name
label_name = yolo.class_names
text = label_name[int(class_index.asscalar()) ]
class_names.append(text)
###traslate the locat
x, y, w, h = locat
w, h = nd.power(w, 2), nd.power(h, 2)
ceil = 1 / 4
row = int(label_index / 4)
columns = label_index % 4
x_center = columns * ceil + x
y_center = row * ceil + y
x_min, y_min, x_max, y_max = x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h
box = nd.concatenate([x_min, y_min, x_max, y_max], axis=0) * 256
bos_list.append(box.asnumpy())
return class_names,C_list,bos_list
示例10: pseudo_labeling
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def pseudo_labeling(self, logits, confidence=0.):
softmax = nd.softmax(logits, axis=1)
prob = nd.max(softmax, axis=1)
p_label = nd.argmax(softmax, axis=1)
mask = prob > confidence
return p_label, mask
# def update_beta(self):
# return self.args.beta
示例11: evaluate_accuracy
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def evaluate_accuracy(data_iterator, net):
acc = mx.metric.Accuracy()
for i, (data, label) in enumerate(data_iterator):
data = data.as_in_context(model_ctx).reshape((-1, 784))
label = label.as_in_context(model_ctx)
output = net(data)
predictions = nd.argmax(output, axis=1)
acc.update(preds=predictions, labels=label)
return acc.get()[1]
示例12: test
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def test(data_iterator, net, ctx):
acc = mx.metric.Accuracy()
for i, (data, label) in tqdm(enumerate(data_iterator),total=len(data_iterator), ncols=70, leave=False, unit='b'):
data = data.as_in_context(ctx)
label = label.as_in_context(ctx)
prob,_,_ = net(data,label)
predictions = nd.argmax(prob, axis=1)
acc.update(preds=predictions, labels=label)
return acc.get()[1]
示例13: evaluate_accuracy
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def evaluate_accuracy(data_iter, net, ctx=[mx.cpu()]):
"""Evaluate accuracy of a model on the given data set."""
if isinstance(ctx, mx.Context):
ctx = [ctx]
acc_sum, n = nd.array([0]), 0
for batch in data_iter:
features, labels, _ = _get_batch(batch, ctx)
for X, y in zip(features, labels):
y = y.astype('float32')
acc_sum += (net(X).argmax(axis=1) == y).sum().copyto(mx.cpu())
n += y.size
acc_sum.wait_to_read()
return acc_sum.asscalar() / n
示例14: predict_rnn
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def predict_rnn(prefix, num_chars, rnn, params, init_rnn_state,
num_hiddens, vocab_size, ctx, idx_to_char, char_to_idx):
"""Predict next chars with a RNN model"""
state = init_rnn_state(1, num_hiddens, ctx)
output = [char_to_idx[prefix[0]]]
for t in range(num_chars + len(prefix) - 1):
X = to_onehot(nd.array([output[-1]], ctx=ctx), vocab_size)
(Y, state) = rnn(X, state, params)
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(int(Y[0].argmax(axis=1).asscalar()))
return ''.join([idx_to_char[i] for i in output])
示例15: predict_rnn_gluon
# 需要導入模塊: from mxnet import nd [as 別名]
# 或者: from mxnet.nd import argmax [as 別名]
def predict_rnn_gluon(prefix, num_chars, model, vocab_size, ctx, idx_to_char,
char_to_idx):
"""Precit next chars with a Gluon RNN model"""
state = model.begin_state(batch_size=1, ctx=ctx)
output = [char_to_idx[prefix[0]]]
for t in range(num_chars + len(prefix) - 1):
X = nd.array([output[-1]], ctx=ctx).reshape((1, 1))
(Y, state) = model(X, state)
if t < len(prefix) - 1:
output.append(char_to_idx[prefix[t + 1]])
else:
output.append(int(Y.argmax(axis=1).asscalar()))
return ''.join([idx_to_char[i] for i in output])