本文整理匯總了Python中sklearn.metrics.balanced_accuracy_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.balanced_accuracy_score方法的具體用法?Python metrics.balanced_accuracy_score怎麽用?Python metrics.balanced_accuracy_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.metrics
的用法示例。
在下文中一共展示了metrics.balanced_accuracy_score方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: infer
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def infer(model, test_loader, criterion, opt):
model.eval()
test_true = []
test_pred = []
with torch.no_grad():
for i, (data, label) in enumerate(test_loader):
data, label = data.to(opt.device), label.to(opt.device).squeeze()
data = data.permute(0, 2, 1).unsqueeze(-1)
logits = model(data)
loss = criterion(logits, label.squeeze())
pred = logits.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(pred.detach().cpu().numpy())
opt.test_losses.update(loss.item())
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
overall_acc = metrics.accuracy_score(test_true, test_pred)
class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
return overall_acc, class_acc, opt
示例2: test_sklearn_custom_scoring_and_cv
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def test_sklearn_custom_scoring_and_cv(tmp_dir):
tuner = sklearn_tuner.Sklearn(
oracle=kt.oracles.BayesianOptimization(
objective=kt.Objective('score', 'max'),
max_trials=10),
hypermodel=build_model,
scoring=metrics.make_scorer(metrics.balanced_accuracy_score),
cv=model_selection.StratifiedKFold(5),
directory=tmp_dir)
x = np.random.uniform(size=(50, 10))
y = np.random.randint(0, 2, size=(50,))
tuner.search(x, y)
assert len(tuner.oracle.trials) == 10
best_trial = tuner.oracle.get_best_trials()[0]
assert best_trial.status == 'COMPLETED'
assert best_trial.score is not None
assert best_trial.best_step == 0
assert best_trial.metrics.exists('score')
# Make sure best model can be reloaded.
best_model = tuner.get_best_models()[0]
best_model.score(x, y)
示例3: fit
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def fit(self, X_train, y_train, X_val, y_val):
results = dict()
self.model = SVC(**self.config)
self.all_nan = np.all(np.isnan(X_train), axis=0)
X_train = X_train[:, ~self.all_nan]
X_val = X_val[:, ~self.all_nan]
self.model.fit(X_train, y_train)
pred_val_probas = self.model.predict_proba(X_val)
pred_train = self.model.predict(X_train)
pred_val = self.model.predict(X_val)
results["train_acc"] = metrics.accuracy_score(y_train, pred_train)
results["train_balanced_acc"] = metrics.balanced_accuracy_score(y_train, pred_train)
results["val_acc"] = metrics.accuracy_score(y_val, pred_val)
results["val_balanced_acc"] = metrics.balanced_accuracy_score(y_val, pred_val)
results["val_preds"] = pred_val_probas.tolist()
results["labels"] = y_val.tolist()
return results
示例4: infer
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def infer(test_queue, model, criterion):
model.eval()
objs = utils.AverageMeter()
test_true = []
test_pred = []
with torch.no_grad():
for i, (data, label) in enumerate(test_queue):
data, label = data.to(DEVICE), label.to(DEVICE).squeeze()
data = data.permute(0, 2, 1).unsqueeze(3)
out, out_aux = model(data)
pred = out.max(dim=1)[1]
test_true.append(label.cpu().numpy())
test_pred.append(pred.detach().cpu().numpy())
loss = criterion(out, label.squeeze())
n = label.size(0)
objs.update(loss.item(), n)
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
overall_acc = metrics.accuracy_score(test_true, test_pred)
class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
return overall_acc, class_acc, objs.avg
示例5: test_balanced_accuracy_score_unseen
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def test_balanced_accuracy_score_unseen():
assert_warns_message(UserWarning, 'y_pred contains classes not in y_true',
balanced_accuracy_score, [0, 0, 0], [0, 0, 1])
示例6: test_balanced_accuracy_score
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def test_balanced_accuracy_score(y_true, y_pred):
macro_recall = recall_score(y_true, y_pred, average='macro',
labels=np.unique(y_true))
with ignore_warnings():
# Warnings are tested in test_balanced_accuracy_score_unseen
balanced = balanced_accuracy_score(y_true, y_pred)
assert balanced == pytest.approx(macro_recall)
adjusted = balanced_accuracy_score(y_true, y_pred, adjusted=True)
chance = balanced_accuracy_score(y_true, np.full_like(y_true, y_true[0]))
assert adjusted == (balanced - chance) / (1 - chance)
示例7: test_balanced_accuracy
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def test_balanced_accuracy():
output = torch.rand((16, 4))
output_np = output.numpy()
target = torch.randint(0, 4, (16,))
target_np = target.numpy()
expected = 100 * balanced_accuracy_score(target_np, np.argmax(output_np, 1))
result = BalancedAccuracy()(output, target).flatten().numpy()
assert np.allclose(expected, result)
示例8: train_step
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def train_step(model, train_loader, optimizer, criterion, opt):
model.train()
train_pred = []
train_true = []
for data, label in train_loader:
data, label = data.to(opt.device), label.to(opt.device).squeeze()
data = data.permute(0, 2, 1).unsqueeze(-1)
optimizer.zero_grad()
logits = model(data)
loss = criterion(logits, label)
loss.backward()
optimizer.step()
opt.train_losses.update(loss.item())
preds = logits.max(dim=1)[1]
train_true.append(label.cpu().numpy())
train_pred.append(preds.detach().cpu().numpy())
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
overall_acc = metrics.accuracy_score(train_true, train_pred)
class_acc = metrics.balanced_accuracy_score(train_true, train_pred)
return overall_acc, class_acc, opt
示例9: test_sklearn_additional_metrics
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def test_sklearn_additional_metrics(tmp_dir):
tuner = sklearn_tuner.Sklearn(
oracle=kt.oracles.BayesianOptimization(
objective=kt.Objective('score', 'max'),
max_trials=10),
hypermodel=build_model,
metrics=[metrics.balanced_accuracy_score,
metrics.recall_score],
directory=tmp_dir)
x = np.random.uniform(size=(50, 10))
y = np.random.randint(0, 2, size=(50,))
tuner.search(x, y)
assert len(tuner.oracle.trials) == 10
best_trial = tuner.oracle.get_best_trials()[0]
assert best_trial.status == 'COMPLETED'
assert best_trial.score is not None
assert best_trial.best_step == 0
assert best_trial.metrics.exists('score')
assert best_trial.metrics.exists('balanced_accuracy_score')
assert best_trial.metrics.exists('recall_score')
# Make sure best model can be reloaded.
best_model = tuner.get_best_models()[0]
best_model.score(x, y)
示例10: score
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def score(self, X_test, y_test):
results = dict()
y_pred = self.predict(X_test)
results["test_acc"] = metrics.accuracy_score(y_test, y_pred)
results["test_balanced_acc"] = metrics.balanced_accuracy_score(y_test, y_pred)
return results
示例11: infer
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def infer(valid_queue, model, criterion, valid_losses):
model.eval()
test_true = []
test_pred = []
valid_losses.reset()
with torch.no_grad():
for i, data in enumerate(valid_queue):
data = data.to(DEVICE)
inputs = data.pos.transpose(2, 1).unsqueeze(3)
gt = data.y
out = model(inputs)
pred = out.max(dim=1)[1]
test_true.append(gt.cpu().numpy())
test_pred.append(pred.detach().cpu().numpy())
loss = criterion(out, gt.squeeze())
n = gt.size(0)
valid_losses.update(loss.item(), n)
test_true = np.concatenate(test_true)
test_pred = np.concatenate(test_pred)
overall_acc = metrics.accuracy_score(test_true, test_pred)
class_acc = metrics.balanced_accuracy_score(test_true, test_pred)
return overall_acc, class_acc, valid_losses
示例12: train_step
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def train_step(train_queue, model, criterion, optimizer, args):
objs = utils.AverageMeter()
train_true = []
train_pred = []
for step, (data, label) in enumerate(tqdm(train_queue)):
model.train()
data, label = data.to(DEVICE), label.to(DEVICE).squeeze()
data = data.permute(0, 2, 1).unsqueeze(3)
n = data.size(0)
optimizer.zero_grad()
out, out_aux = model(data)
loss = criterion(out, label)
if args.auxiliary:
loss_aux = criterion(out_aux, label)
loss += args.auxiliary_weight * loss_aux
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
pred = out.max(dim=1)[1]
train_true.append(label.cpu().numpy())
train_pred.append(pred.detach().cpu().numpy())
objs.update(loss.item(), n)
train_true = np.concatenate(train_true)
train_pred = np.concatenate(train_pred)
overall_acc = metrics.accuracy_score(train_true, train_pred)
class_acc = metrics.balanced_accuracy_score(train_true, train_pred)
return overall_acc, class_acc, objs.avg
示例13: return_scoreval
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import balanced_accuracy_score [as 別名]
def return_scoreval(scoretype, y_true, y_preds, y_proba, modeltype):
if modeltype.endswith('Classification'):
if scoretype == 'f1':
try:
scoreval = f1_score(y_true, y_preds)
except:
scoreval = f1_score(y_true, y_preds, average = 'micro')
elif scoretype == 'roc_auc':
#### ROC AUC can be computed only for Binary classifications ###
try:
scoreval = roc_auc_score(y_true, y_proba)
except:
scoreval = balanced_accuracy_score(y_true, y_preds)
print('Multi-class problem. Instead of ROC-AUC, Balanced Accuracy computed')
elif scoretype == 'precision':
try:
scoreval = precision_score(y_true, y_preds)
except:
scoreval = precision_score(y_true, y_preds, average='micro')
elif scoretype == 'recall':
try:
scoreval = recall_score(y_true, y_preds)
except:
scoreval = recall_score(y_true, y_preds, average='micro')
elif scoretype in ['balanced_accuracy','accuracy','balanced-accuracy']:
try:
scoreval = balanced_accuracy_score(y_true, y_preds)
except:
scoreval = accuracy(y_true, y_preds)
else:
print('Scoring Type not Recognized - selecting default as F1.')
scoretype == 'f1'
try:
scoreval = f1_score(y_true, y_preds)
except:
scoreval = f1_score(y_true, y_preds, average='micro')
else:
if scoretype == 'rmse':
try:
scoreval = np.sqrt(mean_squared_error(y_true, y_preds))
except:
scoreval = 0
elif scoretype == 'mae':
try:
scoreval = np.sqrt(mean_absolute_error(y_true, y_preds))
except:
scoreval = 0
else:
print('Scoring Type not Recognized.')
scoretype == 'mae'
scoreval = mean_absolute_error(y_true, y_preds)
return scoreval
######### Print the % count of each class in a Target variable #####