本文整理汇总了Python中sklearn.decomposition.PCA.explained_variance_ratio_方法的典型用法代码示例。如果您正苦于以下问题:Python PCA.explained_variance_ratio_方法的具体用法?Python PCA.explained_variance_ratio_怎么用?Python PCA.explained_variance_ratio_使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.PCA
的用法示例。
在下文中一共展示了PCA.explained_variance_ratio_方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_init
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import explained_variance_ratio_ [as 别名]
def test_init(self, df_norm, n_components):
from flotilla.compute.decomposition import DataFramePCA
test_pca = DataFramePCA(df_norm, n_components=n_components)
true_pca = PCA(n_components=n_components)
true_pca.fit(df_norm.values)
pc_names = ['pc_{}'.format(i + 1) for i in
range(true_pca.components_.shape[0])]
true_pca.components_ = pd.DataFrame(true_pca.components_,
index=pc_names,
columns=df_norm.columns)
true_pca.explained_variance_ = pd.Series(
true_pca.explained_variance_, index=pc_names)
true_pca.explained_variance_ratio_ = pd.Series(
true_pca.explained_variance_ratio_, index=pc_names)
true_pca.reduced_space = true_pca.transform(df_norm.values)
true_pca.reduced_space = pd.DataFrame(true_pca.reduced_space,
index=df_norm.index,
columns=pc_names)
npt.assert_array_equal(test_pca.X, df_norm.values)
pdt.assert_frame_equal(test_pca.components_,
true_pca.components_)
pdt.assert_series_equal(test_pca.explained_variance_,
true_pca.explained_variance_)
pdt.assert_series_equal(test_pca.explained_variance_ratio_,
true_pca.explained_variance_ratio_)
pdt.assert_frame_equal(test_pca.reduced_space,
true_pca.reduced_space)
示例2: load
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import explained_variance_ratio_ [as 别名]
def load(self, filename='pca.nc'):
"""
Read sklearn PCA parameters from a netcdf file
"""
infile = netCDF4.Dataset(filename, 'r')
self.locations = [json.loads(string) for string in list(infile.variables['location'])]
self.pcas = []
id = 0
for location in self.locations:
n_components = infile.variables['n_components'][id]
components = infile.variables['components'][id]
mean = infile.variables['means'][id]
explained_variance_ratio = infile.variables['explained_variance_ratio'][id]
noise_variance = infile.variables['noise_variance'][id]
pca = PCA(n_components=n_components)
pca.components_ = components
pca.mean_ = mean
pca.explained_variance_ratio_ = explained_variance_ratio
pca.noise_variance_ = noise_variance
self.pcas.append(pca)
id += 1
示例3: test_init
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import explained_variance_ratio_ [as 别名]
def test_init(self, df_norm, n_components):
from flotilla.compute.decomposition import DataFramePCA
test_pca = DataFramePCA(df_norm, n_components=n_components)
true_pca = PCA(n_components=n_components)
true_pca.fit(df_norm.values)
pc_names = ['pc_{}'.format(i+1) for i in
range(true_pca.components_.shape[0])]
true_pca.components_ = pd.DataFrame(true_pca.components_,
index=pc_names,
columns=df_norm.columns)
true_pca.explained_variance_ = pd.Series(
true_pca.explained_variance_, index=pc_names)
true_pca.explained_variance_ratio_ = pd.Series(
true_pca.explained_variance_ratio_, index=pc_names)
true_pca.reduced_space = true_pca.transform(df_norm.values)
true_pca.reduced_space = pd.DataFrame(true_pca.reduced_space,
index=df_norm.index,
columns=pc_names)
npt.assert_array_equal(test_pca.X, df_norm.values)
pdt.assert_frame_equal(test_pca.components_,
true_pca.components_)
pdt.assert_series_equal(test_pca.explained_variance_,
true_pca.explained_variance_)
pdt.assert_series_equal(test_pca.explained_variance_ratio_,
true_pca.explained_variance_ratio_)
pdt.assert_frame_equal(test_pca.reduced_space,
true_pca.reduced_space)
# class TestDataFrameNMF():
# def test_init(self, df_nonneg, n_components, RANDOM_STATE):
# from flotilla.compute.decomposition import DataFrameNMF
#
# test_nmf = DataFrameNMF(df_nonneg, n_components=n_components,
# random_state=RANDOM_STATE)
#
# true_nmf = NMF(n_components=n_components, random_state=RANDOM_STATE)
# true_nmf.reduced_space = true_nmf.fit_transform(df_nonneg.values)
# pc_names = ['pc_{}'.format(i + 1) for i in
# range(true_nmf.components_.shape[0])]
# true_nmf.reduced_space = pd.DataFrame(true_nmf.reduced_space,
# index=df_nonneg.index,
# columns=pc_names)
# true_nmf.components_ = pd.DataFrame(true_nmf.components_,
# index=pc_names,
# columns=df_nonneg.columns)
#
# npt.assert_almost_equal(test_nmf.X, df_nonneg.values, decimal=4)
# pdt.assert_frame_equal(test_nmf.components_,
# true_nmf.components_)
# pdt.assert_frame_equal(test_nmf.reduced_space,
# true_nmf.reduced_space)