本文整理汇总了Python中sklearn.decomposition.PCA.get_precision方法的典型用法代码示例。如果您正苦于以下问题:Python PCA.get_precision方法的具体用法?Python PCA.get_precision怎么用?Python PCA.get_precision使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.PCA
的用法示例。
在下文中一共展示了PCA.get_precision方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_pca
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_precision [as 别名]
def test_pca():
"""PCA on dense arrays"""
pca = PCA(n_components=2)
X = iris.data
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], 2)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
pca = PCA()
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision with n_components == n_features
# with n_components < n_features and with n_components == 0
for n_components in [0, 2, X.shape[1]]:
pca.n_components = n_components
pca.fit(X)
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision), np.eye(X.shape[1]), 12)
示例2: test_pca
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_precision [as 别名]
def test_pca():
# PCA on dense arrays
X = iris.data
for n_comp in np.arange(X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='full')
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
# test explained_variance_ratio_ == 1 with all components
pca = PCA(svd_solver='full')
pca.fit(X)
assert_almost_equal(pca.explained_variance_ratio_.sum(), 1.0, 3)
示例3: test_pca_randomized_solver
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_precision [as 别名]
def test_pca_randomized_solver():
# PCA on dense arrays
X = iris.data
# Loop excluding the 0, invalid for randomized
for n_comp in np.arange(1, X.shape[1]):
pca = PCA(n_components=n_comp, svd_solver='randomized', random_state=0)
X_r = pca.fit(X).transform(X)
np.testing.assert_equal(X_r.shape[1], n_comp)
X_r2 = pca.fit_transform(X)
assert_array_almost_equal(X_r, X_r2)
X_r = pca.transform(X)
assert_array_almost_equal(X_r, X_r2)
# Test get_covariance and get_precision
cov = pca.get_covariance()
precision = pca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]), 12)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
pca = PCA(n_components=0, svd_solver='randomized', random_state=0)
assert_raises(ValueError, pca.fit, X)
# Check internal state
assert_equal(pca.n_components,
PCA(n_components=0,
svd_solver='randomized', random_state=0).n_components)
assert_equal(pca.svd_solver,
PCA(n_components=0,
svd_solver='randomized', random_state=0).svd_solver)
示例4: make_tuned_inference
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_precision [as 别名]
def make_tuned_inference(X):
print('Making tuned inference...')
t = [0.100, 0.101, 0.102, 0.103, 0.104, 0.105, 0.106, 0.107, 0.108, 0.109,
0.110, 0.111, 0.112, 0.113, 0.114, 0.115, 0.116, 0.117, 0.118, 0.119,
0.120, 0.121, 0.122, 0.123, 0.124, 0.125, 0.126, 0.127, 0.128, 0.129,
0.130, 0.131, 0.132, 0.133, 0.134, 0.135, 0.136, 0.137, 0.138, 0.139,
0.140, 0.141, 0.142, 0.143, 0.144, 0.145, 0.146, 0.147, 0.148, 0.149,
0.150, 0.151, 0.152, 0.154, 0.155, 0.156, 0.157, 0.158, 0.159, 0.160,
0.161, 0.162, 0.163, 0.164, 0.165, 0.166, 0.167, 0.168, 0.169, 0.170,
0.171, 0.172, 0.173, 0.174, 0.175, 0.176, 0.177, 0.178, 0.179, 0.180,
0.181, 0.182, 0.183, 0.184, 0.185, 0.186, 0.187, 0.188, 0.189, 0.190,
0.191, 0.192, 0.193, 0.194, 0.195, 0.196, 0.197, 0.198, 0.199, 0.200,
0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.200,
0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.210]
weight = 0
n_samples, n_nodes = X.shape
y_pred_agg = np.zeros((n_nodes, n_nodes))
for threshold in t:
for filtering in ["f1", "f2", "f3", "f4"]:
print('Current: %0.3f, %s' % (threshold, filtering))
X_new = tuned_filter(X, LP = filtering, threshold = t, weights = True)
pca = PCA(whiten=True, n_components=int(0.8 * n_nodes)).fit(X_new)
y_pred = - pca.get_precision()
if filtering == 'f1':
y_pred_agg += y_pred
weight += 1
elif filtering == 'f2':
y_pred_agg += y_pred * 0.9
weight += 0.9
elif filtering == 'f3':
y_pred_agg += y_pred * 0.01
weight += 0.01
elif filtering == 'f4':
y_pred_agg += y_pred * 0.7
weight += 0.7
return scale(y_pred_agg / weight)
示例5: PCA
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_precision [as 别名]
iris = datasets.load_iris()
X = iris.data
Y = iris.target
names = iris.target_names
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
tran_x = pca.transform(X)
print "PCA Precision:", pca.get_precision()
print "PCA Explained Variance Ratio:", pca.explained_variance_ratio_
print "PCA Score:", pca.score(X)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis(n_components=2)
lda.fit(X, Y)
tran_x = lda.transform(X)
print "LDA Slope:", lda.coef_
print "LDA Intercept:", lda.intercept_
示例6: make_prediction_PCA
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_precision [as 别名]
def make_prediction_PCA(X):
"""Score neuron connectivity using a partial correlation approach
Parameters
----------
X : numpy array of shape (n_samples, n_nodes)
Fluorescence signals
Returns
-------
score : numpy array of shape (n_nodes, n_nodes)
Pairwise neuron connectivity score.
"""
n_samples, n_nodes = X.shape
# Init for a given data set
y_pred_agg = np.zeros((n_nodes, n_nodes))
# Thresholds to evaluate
# Some thresohlds are duplicated or missing.
t = [0.100, 0.101, 0.102, 0.103, 0.104, 0.105, 0.106, 0.107, 0.108, 0.109,
0.110, 0.111, 0.112, 0.113, 0.114, 0.115, 0.116, 0.117, 0.118, 0.119,
0.120, 0.121, 0.122, 0.123, 0.124, 0.125, 0.126, 0.127, 0.128, 0.129,
0.130, 0.131, 0.132, 0.133, 0.134, 0.135, 0.136, 0.137, 0.138, 0.139,
0.140, 0.141, 0.142, 0.143, 0.144, 0.145, 0.146, 0.147, 0.148, 0.149,
0.150, 0.151, 0.152, 0.154, 0.155, 0.156, 0.157, 0.158, 0.159, 0.160,
0.161, 0.162, 0.163, 0.164, 0.165, 0.166, 0.167, 0.168, 0.169, 0.170,
0.171, 0.172, 0.173, 0.174, 0.175, 0.176, 0.177, 0.178, 0.179, 0.180,
0.181, 0.182, 0.183, 0.184, 0.185, 0.186, 0.187, 0.188, 0.189, 0.190,
0.191, 0.192, 0.193, 0.194, 0.195, 0.196, 0.197, 0.198, 0.199, 0.200,
0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.200,
0.201, 0.202, 0.203, 0.204, 0.205, 0.206, 0.207, 0.208, 0.209, 0.210]
weight = 0
# Loop over all the tresholds and methods
for threshold in t:
for filtering in ['sym', 'future', 'past', 'alt']:
print(threshold, filtering)
# Preprocess data
X_new = _preprocess(X, filtering=filtering, threshold=threshold)
# Making the prediction
pca = PCA(whiten=True, n_components=int(0.8 * n_nodes)).fit(X_new)
y_pred = - pca.get_precision()
# Adding the (weigthed) prediction to global prediction
if filtering == 'sym':
y_pred_agg += y_pred
weight += 1
elif filtering == 'alt':
y_pred_agg += y_pred * 0.9
weight += 0.9
elif filtering == 'future':
y_pred_agg += y_pred * 0.01
weight += 0.01
elif filtering == 'past':
y_pred_agg += y_pred * 0.7
weight += 0.7
# Normalizing the global prediction
return scale(y_pred_agg / weight)