本文整理汇总了Python中sklearn.decomposition.PCA.get_params方法的典型用法代码示例。如果您正苦于以下问题:Python PCA.get_params方法的具体用法?Python PCA.get_params怎么用?Python PCA.get_params使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.PCA
的用法示例。
在下文中一共展示了PCA.get_params方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_establish_reducer_use_existing
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_params [as 别名]
def test_establish_reducer_use_existing(self):
from cupcake.smush.base import SmushPlotterBase
pca_kws = {}
n_components = 2
reducer = PCA(n_components=n_components, **pca_kws)
p = SmushPlotterBase()
p.establish_reducer(reducer)
assert isinstance(p.reducer, type(reducer))
pdt.assert_dict_equal(p.reducer.get_params(), reducer.get_params())
示例2: dict
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_params [as 别名]
#reverse_dims = dict()
#for k in dimensions:
# for e in k['bins']:
# reverse_dims[e['name']] = (k['key'], e['key'])
## scale data if asked
if scale_data:
data = scale(data)
## compute PCA or manual projection if necessary and project data
if (projection_mode == 1):
print "### using PCA with %s components ###" % n_components
#pca = PCA(n_components=n_digits).fit(data)
pca = PCA(n_components=n_components)
reduced_data = pca.fit_transform(data)
print pca.get_params()
with open(get_filename("pca.pk"),"wb") as filehandler:
pickle.dump(pca, filehandler)
print "ok !"
elif (projection_mode >= 2):
print "### using manual projection ###"
n_components = 3 # for display
# get axis
# assign axis to mask
nb_rows = len(mask)
axis = [0] * nb_rows
weight = [1] * nb_rows
for i in range(nb_rows):
var = dims[mask[i]]
if var in X:
axis[i] = 0
示例3: PcaWhitening
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_params [as 别名]
class PcaWhitening(object):
"""
Whitens the data using principal component analysis.
To speed up training the transformation, you can specify how many data
points from the training data source to use.
Parameters
----------
n_train_vectors : int or None
Number of data points to use when training the PCA. If `None`, use all
values.
n_components : int or None
Number of components to use in PCA. If `None`, use all components,
resulting in no data reduction.
"""
def __init__(self, n_train_vectors=None, n_components=None):
from sklearn.decomposition import PCA
self.pca = PCA(whiten=True, n_components=self.n_components)
self.fit = False
self.n_train_vectors = n_train_vectors
self.n_components = n_components
def __call__(self, data):
"""Project the :param:data using the PCA projection."""
if self.fit:
# flatten features, pca only works on 1d arrays
data_shape = data.shape
data_flat = data.reshape((data_shape[0], -1))
whitened_data = self.pca.transform(data_flat)
# get back original shape
return whitened_data.reshape(data_shape).astype(np.float32)
else:
return data
def train(self, data_source, batch_size=4096):
"""
Fit the PCA projection to data.
Parameters
----------
data_source : :class:DataSource
Data to use for fitting the projection
batch_size : int
Not used here.
"""
# select a random subset of the data if self.n_train_vectors is not
# None
if self.n_train_vectors is not None:
sel_data = list(np.random.choice(data_source.n_data,
size=self.n_train_vectors,
replace=False))
else:
sel_data = slice(None)
data = data_source[sel_data][0] # ignore the labels
data_flat = data.reshape((data.shape[0], -1)) # flatten features
self.pca.fit(data_flat)
self.fit = True
def load(self, filename):
"""
Load the PCA projection parameters from a pickle file.
Parameters
----------
filename : str
Pickle file containing the projection parameters
"""
with open(filename, 'r') as f:
self.pca.set_params(pickle.load(f))
self.fit = True
def save(self, filename):
"""
Save the PCA projection parameters to a pickle file.
Parameters
----------
filename : str
Pickle file to store the parameters to
"""
with open(filename, 'w') as f:
pickle.dump(self.pca.get_params(deep=True), f)
示例4: printHeader
# 需要导入模块: from sklearn.decomposition import PCA [as 别名]
# 或者: from sklearn.decomposition.PCA import get_params [as 别名]
#######
# PCA Non Normalized Data
#######
printHeader("Run PCA On Non-Normalized Data 15 Components");
from sklearn.decomposition import PCA
print(np.array([trainDataLabels]))
#trainDataNew = np.concatenate((trainData, np.array([trainDataLabels]).T),axis=1);
pca = PCA(n_components=15);
pca.fit(trainData);
transformedTrainData = pca.transform(trainData);
transformedTestData = pca.transform(testData);
print(pca.get_params());
printHeader("KKN-C PCA Data with Non-Normalized Train Data Hyper-Parameter Search");
#Let's do some hyper-parameter searching
trainDataSplit1 = np.array(transformedTrainData[0:int(len(transformedTrainData)/2.0)]);
trainDataSplit2 = np.array(transformedTrainData[int(len(transformedTrainData)/2.0):]);
trainDataSplit1Labels = np.array(trainDataLabels[0:int(len(transformedTrainData)/2.0)]);
trainDataSplit2Labels = np.array(trainDataLabels[int(len(transformedTrainData)/2.0):]);
maxK = 1;
maxAccuracy = 0;
for k in range(1,39,2):
knn = KNeighborsClassifier(n_neighbors=k, metric="euclidean")
knn.fit(trainDataSplit1, trainDataSplit1Labels)
predictions = knn.predict(trainDataSplit2)