本文整理汇总了Python中sklearn.decomposition.PCA.score方法的典型用法代码示例。如果您正苦于以下问题:Python PCA.score方法的具体用法?Python PCA.score怎么用?Python PCA.score使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.PCA
的用法示例。
在下文中一共展示了PCA.score方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: pca
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
def pca(data,components):
from sklearn.decomposition import PCA
global pca_transf
pca = PCA(n_components=components)
pca.fit(data)
pca_transf = pca.fit_transform(data)
print(pca.explained_variance_ratio_)
pca.score(data)
示例2: test_pca_score2
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
def test_pca_score2():
"""Test that probabilistic PCA correctly separated different datasets"""
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * 0.2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
示例3: test_pca_score2
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
def test_pca_score2():
# Test that probabilistic PCA correctly separated different datasets
n, p = 100, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * .1 + np.array([3, 4, 5])
for solver in solver_list:
pca = PCA(n_components=2, svd_solver=solver)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives different scores if whiten=True
pca = PCA(n_components=2, whiten=True, svd_solver=solver)
pca.fit(X)
ll2 = pca.score(X)
assert ll1 > ll2
示例4: test_pca_score
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
def test_pca_score():
"""Test that probabilistic PCA scoring yields a reasonable score"""
n, p = 1000, 3
rng = np.random.RandomState(0)
X = rng.randn(n, p) * 0.1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
h = -0.5 * np.log(2 * np.pi * np.exp(1) * 0.1 ** 2) * p
np.testing.assert_almost_equal(ll1 / h, 1, 0)
示例5: test_pca_score3
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
def test_pca_score3():
"""Check that probabilistic PCA selects the right model"""
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
Xt = rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) + np.array([1, 0, 7])
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
示例6: test_pca_score3
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5]) +
np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k, svd_solver='full')
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert ll.argmax() == 1
示例7: save_resampled_transformation_single
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
def save_resampled_transformation_single(file, resample_dim=[4, 4, 4]):
nii_obj = nib.load(file) # standard_mask=True is default
resamp_nii = make_resampled_transformation(nii_obj, resample_dim)
image_data = resamp_nii.get_data()
image_vector = image_data.flatten()
image_vector = np.nan_to_num(image_vector)
image_vector = image_vector.reshape(image_vector.shape[0], 1)
print image_data.shape
# do I have to create a 116380*4 array to do the PCA?
# 116380 = image_data.shape[0] * image_data.shape[1]*image_data.shape[2]
# 4 = (x,y,z,v) where v is the value
size = image_data.shape
mesh = np.array(np.meshgrid(np.arange(size[0]),np.arange(size[1]),np.arange(size[2]))).T.reshape(-1,3)
image_spatial_data = np.concatenate((mesh, image_vector),axis=1)
pca = PCA(n_components=3)
pca.fit_transform(image_spatial_data)
a = pca.score(image_vector)
# np.save(f, image_vector)
return
示例8: SelectivePCA
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
#.........这里部分代码省略.........
# fails thru if names don't exist:
self.pca_ = PCA(
n_components=self.n_components,
whiten=self.whiten).fit(X[cols].as_matrix())
return self
def transform(self, X):
"""Transform a test matrix given the already-fit transformer.
Parameters
----------
X : Pandas ``DataFrame``, shape=(n_samples, n_features)
The Pandas frame to transform. The operation will
be applied to a copy of the input data, and the result
will be returned.
Returns
-------
X : Pandas ``DataFrame``
The operation is applied to a copy of ``X``,
and the result set is returned.
"""
check_is_fitted(self, 'pca_')
# check on state of X and cols
X, _ = validate_is_pd(X, self.cols)
cols = _cols_if_none(X, self.cols)
other_nms = [nm for nm in X.columns if nm not in cols]
transform = self.pca_.transform(X[cols].as_matrix())
# do weighting if necessary
if self.weight:
# get the weight vals
weights = self.pca_.explained_variance_ratio_
weights -= np.median(weights)
weights += 1
# now add to the transformed features
transform *= weights
left = pd.DataFrame.from_records(data=transform,
columns=[('PC%i' % (i + 1)) for i in range(transform.shape[1])])
# concat if needed
x = pd.concat([left, X[other_nms]], axis=1) if other_nms else left
return x if self.as_df else x.as_matrix()
@overrides(_BaseSelectiveDecomposer)
def get_decomposition(self):
"""Overridden from the :class:``skutil.decomposition.decompose._BaseSelectiveDecomposer`` class,
this method returns the internal decomposition class:
``sklearn.decomposition.PCA``
Returns
-------
self.pca_ : ``sklearn.decomposition.PCA``
The fit internal decomposition class
"""
return self.pca_ if hasattr(self, 'pca_') else None
def score(self, X, y=None):
"""Return the average log-likelihood of all samples.
This calls sklearn.decomposition.PCA's score method
on the specified columns [1].
Parameters
----------
X: Pandas ``DataFrame``, shape=(n_samples, n_features)
The data to score.
y: None
Passthrough for pipeline/gridsearch
Returns
-------
ll: float
Average log-likelihood of the samples under the fit
PCA model (`self.pca_`)
References
----------
.. [1] Bishop, C. "Pattern Recognition and Machine Learning"
12.2.1 p. 574 http://www.miketipping.com/papers/met-mppca.pdf
"""
check_is_fitted(self, 'pca_')
X, _ = validate_is_pd(X, self.cols)
cols = X.columns if not self.cols else self.cols
ll = self.pca_.score(X[cols].as_matrix(), _as_numpy(y))
return ll
示例9: range
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
for x in range(2048, 3072):
bluecount += imgdt[x]
average_rgb.append((redcount/3072, greencount/3072, bluecount/3072))
#print redcount/3072, greencount/3072, bluecount/3072,label_names[lind]
X_test.append([redcount/3072, greencount/3072, bluecount/3072,max(imgdt[:1024]),min(imgdt[:1024]),max(imgdt[1024:2048]),min(imgdt[1024:2048]),max(imgdt[2048:3072]),min(imgdt[2048:3072])])
#y_test.append(label_names[lind])
#X_test ,y_test
# In[10]:
#computing the principle components
X = np.array(X_train)
pca = PCA(n_components=9)
abc = pca.fit_transform(X)
pca1 = pca.score(X)
print "PCA of Feature Vectors"
print abc,pca1
# In[11]:
#plotting the principle components
x1 = []
y1 = []
z1 = []
for item in abc:
x1.append(item[0])
y1.append(item[1])
z1.append(item[2])
fig1 = plt.figure()
示例10: PCA
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
X = iris.data
Y = iris.target
names = iris.target_names
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
pca.fit(X)
tran_x = pca.transform(X)
print "PCA Precision:", pca.get_precision()
print "PCA Explained Variance Ratio:", pca.explained_variance_ratio_
print "PCA Score:", pca.score(X)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
lda = LinearDiscriminantAnalysis(n_components=2)
lda.fit(X, Y)
tran_x = lda.transform(X)
print "LDA Slope:", lda.coef_
print "LDA Intercept:", lda.intercept_
示例11: range
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
Xt = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
+ np.array([1, 0, 7]))
ll = np.zeros(p)
for k in range(p):
pca = PCA(n_components=k)
pca.fit(Xl)
ll[k] = pca.score(Xt)
assert_true(ll.argmax() == 1)
p) * .1 + np.array([3, 4, 5])
pca = PCA(n_components=2)
pca.fit(X)
ll1 = pca.score(X)
ll2 = pca.score(rng.randn(n, p) * .2 + np.array([3, 4, 5]))
assert_greater(ll1, ll2)
# Test that it gives the same scores if whiten=True
pca = PCA(n_components=2, whiten=True)
pca.fit(X)
ll2 = pca.score(X)
assert_almost_equal(ll1, ll2)
def test_pca_score3():
# Check that probabilistic PCA selects the right model
n, p = 200, 3
rng = np.random.RandomState(0)
Xl = (rng.randn(n, p) + rng.randn(n, 1) * np.array([3, 4, 5])
示例12: TfidfVectorizer
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
card = card.split(' ')
cardT = []
for w in card:
if w != '' and w != '\r\n':
cardT.append(w)
cards.append(' '.join(cardT))
vectorizer = TfidfVectorizer(min_df = 5,max_df = 0.5,ngram_range = (1,2))
X = vectorizer.fit_transform(cards).toarray()
n_components = np.arange(50,80, 5) # options for n_components
print X.shape
fa = PCA()
fa_scores = []
for n in n_components:
print n
sys.stdout.flush()
fa.n_components = n
fa.fit(X)
fa_scores.append(fa.score(X))
print '\t',fa_scores[-1]
fa.n_components = n_components[np.argmax(fa_scores)]
Y = fa.fit_transform(X)
for name,ii in zip(cardnames,range(len(Y))):
print name+'@'+ '@'.join(str(v) for v in list(Y[ii,:]))
示例13: KNeighborsClassifier
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import score [as 别名]
model = KNeighborsClassifier(n_neighbors=12, weights='distance')
model.fit(data_train, label_train)
#
# INFO: Be sure to always keep the domain of the problem in mind! It's
# WAY more important to errantly classify a benign tumor as malignant,
# and have it removed, than to incorrectly leave a malignant tumor, believing
# it to be benign, and then having the patient progress in cancer. Since the UDF
# weights don't give you any class information, the only way to introduce this
# data into SKLearn's KNN Classifier is by "baking" it into your data. For
# example, randomly reducing the ratio of benign samples compared to malignant
# samples from the training set.
#
# TODO: Calculate + Print the accuracy of the testing set
#
# .. your code here ..
print('==========\nScore\n')
print(model.score(data_test, label_test))
print('==========')
plotDecisionBoundary(model, data_test, label_test)