本文整理匯總了Python中sklearn.metrics.adjusted_rand_score方法的典型用法代碼示例。如果您正苦於以下問題:Python metrics.adjusted_rand_score方法的具體用法?Python metrics.adjusted_rand_score怎麽用?Python metrics.adjusted_rand_score使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.metrics
的用法示例。
在下文中一共展示了metrics.adjusted_rand_score方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: clustering_scores
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def clustering_scores(self, prediction_algorithm: str = "knn") -> Tuple:
if self.gene_dataset.n_labels > 1:
latent, _, labels = self.get_latent()
if prediction_algorithm == "knn":
labels_pred = KMeans(
self.gene_dataset.n_labels, n_init=200
).fit_predict(
latent
) # n_jobs>1 ?
elif prediction_algorithm == "gmm":
gmm = GMM(self.gene_dataset.n_labels)
gmm.fit(latent)
labels_pred = gmm.predict(latent)
asw_score = silhouette_score(latent, labels)
nmi_score = NMI(labels, labels_pred)
ari_score = ARI(labels, labels_pred)
uca_score = unsupervised_clustering_accuracy(labels, labels_pred)[0]
logger.debug(
"Clustering Scores:\nSilhouette: %.4f\nNMI: %.4f\nARI: %.4f\nUCA: %.4f"
% (asw_score, nmi_score, ari_score, uca_score)
)
return asw_score, nmi_score, ari_score, uca_score
示例2: test_spectral_clustering
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def test_spectral_clustering(eigen_solver, assign_labels):
S = np.array([[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 0.2, 0.0, 0.0, 0.0],
[0.2, 0.2, 0.2, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0],
[0.0, 0.0, 0.0, 1.0, 1.0, 1.0, 1.0]])
for mat in (S, sparse.csr_matrix(S)):
model = SpectralClustering(random_state=0, n_clusters=2,
affinity='precomputed',
eigen_solver=eigen_solver,
assign_labels=assign_labels
).fit(mat)
labels = model.labels_
if labels[0] == 0:
labels = 1 - labels
assert adjusted_rand_score(labels, [1, 1, 1, 0, 0, 0, 0]) == 1
model_copy = pickle.loads(pickle.dumps(model))
assert model_copy.n_clusters == model.n_clusters
assert model_copy.eigen_solver == model.eigen_solver
assert_array_equal(model_copy.labels_, model.labels_)
示例3: test_discretize
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def test_discretize(n_samples):
# Test the discretize using a noise assignment matrix
random_state = np.random.RandomState(seed=8)
for n_class in range(2, 10):
# random class labels
y_true = random_state.randint(0, n_class + 1, n_samples)
y_true = np.array(y_true, np.float)
# noise class assignment matrix
y_indicator = sparse.coo_matrix((np.ones(n_samples),
(np.arange(n_samples),
y_true)),
shape=(n_samples,
n_class + 1))
y_true_noisy = (y_indicator.toarray()
+ 0.1 * random_state.randn(n_samples,
n_class + 1))
y_pred = discretize(y_true_noisy, random_state)
assert adjusted_rand_score(y_true, y_pred) > 0.8
示例4: calculate_metrics
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def calculate_metrics(y_true, y_pred,duration,clustering=False):
"""
Return a data frame that contains the precision, accuracy, recall and the duration
For clustering it applys the adjusted rand index
"""
if clustering == False:
res = pd.DataFrame(data = np.zeros((1,5),dtype=np.float), index=[0],
columns=['precision','accuracy','error','recall','duration'])
res['precision'] = precision_score(y_true,y_pred,average='macro')
res['accuracy'] = accuracy_score(y_true,y_pred)
res['recall'] = recall_score(y_true,y_pred,average='macro')
res['duration'] = duration
res['error'] = 1-res['accuracy']
return res
else:
res = pd.DataFrame(data = np.zeros((1,2),dtype=np.float), index=[0],
columns=['ari','duration'])
res['duration']=duration
res['ari'] = adjusted_rand_score(y_pred,y_true)
return res
示例5: compare_segms_metric_ars
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def compare_segms_metric_ars(dict_segm_a, dict_segm_b, suffix=''):
""" compute ARS for each pair of segmentation
:param {str: ndarray} dict_segm_a:
:param {str: ndarray} dict_segm_b:
:param str suffix:
:return DF:
"""
df_ars = pd.DataFrame()
for n in dict_segm_a:
if n not in dict_segm_b:
logging.warning('particular key "%s" is missing in dictionary', n)
continue
y_a = dict_segm_a[n].ravel()
y_b = dict_segm_b[n].ravel()
dict_ars = {'image': n,
'ARS' + suffix: metrics.adjusted_rand_score(y_a, y_b)}
df_ars = df_ars.append(dict_ars, ignore_index=True)
df_ars.set_index(['image'], inplace=True)
return df_ars
示例6: init_prob_kmeans
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def init_prob_kmeans(model, eval_loader, args):
torch.manual_seed(1)
model = model.to(device)
# cluster parameter initiate
model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 512))
for _, (x, label, idx) in enumerate(eval_loader):
x = x.to(device)
feat = model(x)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
pca = PCA(n_components=args.n_clusters)
feats = pca.fit_transform(feats)
kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)
y_pred = kmeans.fit_predict(feats)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Init acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = feat2prob(torch.from_numpy(feats), torch.from_numpy(kmeans.cluster_centers_))
return acc, nmi, ari, kmeans.cluster_centers_, probs
示例7: test
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def test(model, test_loader, args):
model.eval()
acc_record = AverageMeter()
preds=np.array([])
targets=np.array([])
feats = np.zeros((len(test_loader.dataset), args.n_clusters))
probs= np.zeros((len(test_loader.dataset), args.n_clusters))
for batch_idx, (x, label, idx) in enumerate(tqdm(test_loader)):
x, label = x.to(device), label.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
_, pred = prob.max(1)
targets=np.append(targets, label.cpu().numpy())
preds=np.append(preds, pred.cpu().numpy())
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.cpu().detach().numpy()
probs[idx, :] = prob.cpu().detach().numpy()
acc, nmi, ari = cluster_acc(targets.astype(int), preds.astype(int)), nmi_score(targets, preds), ari_score(targets, preds)
print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = torch.from_numpy(probs)
return acc, nmi, ari, probs
示例8: init_prob_kmeans
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def init_prob_kmeans(model, eval_loader, args):
torch.manual_seed(1)
model = model.to(device)
# cluster parameter initiate
model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 512))
for _, (x, label, idx) in enumerate(eval_loader):
x = x.to(device)
feat = model(x)
feat = feat.view(x.size(0), -1)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
# evaluate clustering performance
pca = PCA(n_components=args.n_clusters)
feats = pca.fit_transform(feats)
kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)
y_pred = kmeans.fit_predict(feats)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Init acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = feat2prob(torch.from_numpy(feats), torch.from_numpy(kmeans.cluster_centers_))
return acc, nmi, ari, kmeans.cluster_centers_, probs
示例9: test
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def test(model, test_loader, args, epoch=0):
model.eval()
acc_record = AverageMeter()
preds=np.array([])
targets=np.array([])
feats = np.zeros((len(test_loader.dataset), args.n_clusters))
probs = np.zeros((len(test_loader.dataset), args.n_clusters))
for batch_idx, (x, label, idx) in enumerate(tqdm(test_loader)):
x, label = x.to(device), label.to(device)
output = model(x)
prob = feat2prob(output, model.center)
_, pred = prob.max(1)
targets=np.append(targets, label.cpu().numpy())
preds=np.append(preds, pred.cpu().numpy())
idx = idx.data.cpu().numpy()
feats[idx, :] = output.cpu().detach().numpy()
probs[idx, :]= prob.cpu().detach().numpy()
acc, nmi, ari = cluster_acc(targets.astype(int), preds.astype(int)), nmi_score(targets, preds), ari_score(targets, preds)
print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
return acc, nmi, ari, torch.from_numpy(probs)
示例10: init_prob_kmeans
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def init_prob_kmeans(model, eval_loader, args):
torch.manual_seed(1)
model = model.to(device)
# cluster parameter initiate
model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 512))
for _, (x, label, idx) in enumerate(eval_loader):
x = x.to(device)
feat = model(x)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
# evaluate clustering performance
pca = PCA(n_components=args.n_clusters)
feats = pca.fit_transform(feats)
kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)
y_pred = kmeans.fit_predict(feats)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Init acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = feat2prob(torch.from_numpy(feats), torch.from_numpy(kmeans.cluster_centers_))
return acc, nmi, ari, kmeans.cluster_centers_, probs
示例11: test
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def test(model, test_loader, args):
model.eval()
preds=np.array([])
targets=np.array([])
feats = np.zeros((len(test_loader.dataset), args.n_clusters))
probs= np.zeros((len(test_loader.dataset), args.n_clusters))
for batch_idx, (x, label, idx) in enumerate(tqdm(test_loader)):
x, label = x.to(device), label.to(device)
feat = model(x)
prob = feat2prob(feat, model.center)
_, pred = prob.max(1)
targets=np.append(targets, label.cpu().numpy())
preds=np.append(preds, pred.cpu().numpy())
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.cpu().detach().numpy()
probs[idx, :] = prob.cpu().detach().numpy()
acc, nmi, ari = cluster_acc(targets.astype(int), preds.astype(int)), nmi_score(targets, preds), ari_score(targets, preds)
print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = torch.from_numpy(probs)
return acc, nmi, ari, probs
示例12: init_prob_kmeans
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def init_prob_kmeans(model, eval_loader, args):
torch.manual_seed(1)
model = model.to(device)
# cluster parameter initiate
model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 512))
for _, (x, label, idx) in enumerate(eval_loader):
x = x.to(device)
_, feat = model(x)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
# evaluate clustering performance
pca = PCA(n_components=args.n_clusters)
feats = pca.fit_transform(feats)
kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)
y_pred = kmeans.fit_predict(feats)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Init acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = feat2prob(torch.from_numpy(feats), torch.from_numpy(kmeans.cluster_centers_))
return acc, nmi, ari, kmeans.cluster_centers_, probs
示例13: init_prob_kmeans
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def init_prob_kmeans(model, eval_loader, args):
torch.manual_seed(1)
model = model.to(device)
# cluster parameter initiate
model.eval()
targets = np.zeros(len(eval_loader.dataset))
feats = np.zeros((len(eval_loader.dataset), 1024))
for _, (x, _, label, idx) in enumerate(eval_loader):
x = x.to(device)
_, feat = model(x)
feat = feat.view(x.size(0), -1)
idx = idx.data.cpu().numpy()
feats[idx, :] = feat.data.cpu().numpy()
targets[idx] = label.data.cpu().numpy()
# evaluate clustering performance
pca = PCA(n_components=args.n_clusters)
feats = pca.fit_transform(feats)
kmeans = KMeans(n_clusters=args.n_clusters, n_init=20)
y_pred = kmeans.fit_predict(feats)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Init acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = feat2prob(torch.from_numpy(feats), torch.from_numpy(kmeans.cluster_centers_))
return kmeans.cluster_centers_, probs
示例14: test
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def test(model, eval_loader, args):
model.eval()
targets = np.zeros(len(eval_loader.dataset))
y_pred = np.zeros(len(eval_loader.dataset))
probs= np.zeros((len(eval_loader.dataset), args.n_clusters))
for _, (x, _, label, idx) in enumerate(eval_loader):
x = x.to(device)
_, feat = model(x)
prob = feat2prob(feat, model.center)
# prob = F.softmax(logit, dim=1)
idx = idx.data.cpu().numpy()
y_pred[idx] = prob.data.cpu().detach().numpy().argmax(1)
targets[idx] = label.data.cpu().numpy()
probs[idx, :] = prob.cpu().detach().numpy()
# evaluate clustering performance
y_pred = y_pred.astype(np.int64)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = torch.from_numpy(probs)
return acc, nmi, ari, probs
示例15: test
# 需要導入模塊: from sklearn import metrics [as 別名]
# 或者: from sklearn.metrics import adjusted_rand_score [as 別名]
def test(model, eval_loader, args):
model.eval()
targets = np.zeros(len(eval_loader.dataset))
y_pred = np.zeros(len(eval_loader.dataset))
probs= np.zeros((len(eval_loader.dataset), args.n_clusters))
for _, (x, _, label, idx) in enumerate(eval_loader):
x = x.to(device)
_, feat = model(x)
prob = feat2prob(feat, model.center)
idx = idx.data.cpu().numpy()
y_pred[idx] = prob.data.cpu().detach().numpy().argmax(1)
targets[idx] = label.data.cpu().numpy()
probs[idx, :] = prob.cpu().detach().numpy()
# evaluate clustering performance
y_pred = y_pred.astype(np.int64)
acc, nmi, ari = cluster_acc(targets, y_pred), nmi_score(targets, y_pred), ari_score(targets, y_pred)
print('Test acc {:.4f}, nmi {:.4f}, ari {:.4f}'.format(acc, nmi, ari))
probs = torch.from_numpy(probs)
return acc, nmi, ari, probs