本文整理汇总了Python中sklearn.mixture.GaussianMixture.aic方法的典型用法代码示例。如果您正苦于以下问题:Python GaussianMixture.aic方法的具体用法?Python GaussianMixture.aic怎么用?Python GaussianMixture.aic使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.mixture.GaussianMixture
的用法示例。
在下文中一共展示了GaussianMixture.aic方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gmm_analysis
# 需要导入模块: from sklearn.mixture import GaussianMixture [as 别名]
# 或者: from sklearn.mixture.GaussianMixture import aic [as 别名]
def gmm_analysis(self, X_train, X_test, y_train, y_test, data_set_name, max_clusters, analysis_name='GMM'):
scl = RobustScaler()
X_train_scl = scl.fit_transform(X_train)
X_test_scl = scl.transform(X_test)
em_bic = []
em_aic = []
em_completeness_score = []
em_homogeneity_score = []
em_measure_score = []
em_adjusted_rand_score = []
em_adjusted_mutual_info_score = []
cluster_range = np.arange(2, max_clusters+1, 1)
for k in cluster_range:
print('K Clusters: ', k)
##
## Expectation Maximization
##
em = GaussianMixture(n_components=k, covariance_type='full')
em.fit(X_train_scl)
em_pred = em.predict(X_train_scl)
em_bic.append(em.bic(X_train_scl))
em_aic.append(em.aic(X_train_scl))
# metrics
y_train_score = y_train.reshape(y_train.shape[0],)
em_homogeneity_score.append(homogeneity_score(y_train_score, em_pred))
em_completeness_score.append(completeness_score(y_train_score, em_pred))
em_measure_score.append(v_measure_score(y_train_score, em_pred))
em_adjusted_rand_score.append(adjusted_rand_score(y_train_score, em_pred))
em_adjusted_mutual_info_score.append(adjusted_mutual_info_score(y_train_score, em_pred))
##
## Plots
##
ph = plot_helper()
##
## BIC/AIC Plot
##
title = 'Information Criterion Plot (' + analysis_name + ') for ' + data_set_name
name = data_set_name.lower() + '_' + analysis_name.lower() + '_ic'
filename = './' + self.out_dir + '/' + name + '.png'
ph.plot_series(cluster_range,
[em_bic, em_aic],
[None, None],
['bic', 'aic'],
cm.viridis(np.linspace(0, 1, 2)),
['o', '*'],
title,
'Number of Clusters',
'Information Criterion',
filename)
##
## Score Plot
##
title = 'Score Summary Plot (' + analysis_name + ') for ' + data_set_name
name = data_set_name.lower() + '_' + analysis_name.lower() + '_score'
filename = './' + self.out_dir + '/' + name + '.png'
ph.plot_series(cluster_range,
[em_homogeneity_score, em_completeness_score, em_measure_score, em_adjusted_rand_score, em_adjusted_mutual_info_score],
[None, None, None, None, None, None],
['homogeneity', 'completeness', 'measure', 'adjusted_rand', 'adjusted_mutual_info'],
cm.viridis(np.linspace(0, 1, 5)),
['o', '^', 'v', '>', '<', '1'],
title,
'Number of Clusters',
'Score',
filename)
示例2: range
# 需要导入模块: from sklearn.mixture import GaussianMixture [as 别名]
# 或者: from sklearn.mixture.GaussianMixture import aic [as 别名]
w = np.exp(-np.exp(3 * w.mean(axis=1)))
# gmm model selection with aic:
lowest_aic = np.infty
aic = []
n_components_range = range(1, 7)
cv_types = ['spherical', 'tied', 'diag', 'full']
for cv_type in cv_types:
for n_components in n_components_range:
# Fit a mixture of Gaussians with EM
gmm = GaussianMixture(n_components=n_components,
covariance_type=cv_type, n_init=5)
gmm.fit(X)
aic.append(gmm.aic(X))
if aic[-1] < lowest_aic:
lowest_aic = aic[-1]
best_gmm = gmm
preds = best_gmm.predict(X)
probs = best_gmm.predict_proba(X)
for name, col in zip(cv_types, np.array(aic).reshape(-1, len(cv_types)).T):
plt.plot(n_components_range, col, label=name)
plt.legend()
plt.savefig('gmm_sklearn_aic/aic.pdf')
data_thr['preds'] = pd.Series(preds).astype("category")