本文整理汇总了Python中sklearn.decomposition.pca.PCA.transform方法的典型用法代码示例。如果您正苦于以下问题:Python PCA.transform方法的具体用法?Python PCA.transform怎么用?Python PCA.transform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.decomposition.pca.PCA
的用法示例。
在下文中一共展示了PCA.transform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: PCA
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def PCA佮SVM模型(self, 問題, 答案):
sample_weight_constant = np.ones(len(問題))
clf = svm.SVC(C=1)
pca = PCA(n_components=100)
# clf = svm.NuSVC()
print('訓練PCA')
pca.fit(問題)
print('訓練SVM')
clf.fit(pca.transform(問題), 答案, sample_weight=sample_weight_constant)
print('訓練了')
return lambda 問:clf.predict(pca.transform(問))
示例2: dimensional
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def dimensional(tx, ty, rx, ry, add=None):
print "pca"
for j in range(tx[1].size):
i = j + 1
print "===" + str(i)
compressor = PCA(n_components = i)
t0 = time()
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
runtime=time() - t0
V = compressor.components_
print runtime, V.shape, compressor.score(tx)
distances = np.linalg.norm(tx-compressor.inverse_transform(newtx))
print distances
print "pca done"
print "ica"
for j in range(tx[1].size):
i = j + 1
print "===" + str(i)
compressor = ICA(whiten=True)
t0 = time()
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
runtime=time() - t0
print newtx.shape, runtime
distances = np.linalg.norm(tx-compressor.inverse_transform(newtx))
print distances
print "ica done"
print "RP"
for j in range(tx[1].size):
i = j + 1
print "===" + str(i)
compressor = RandomProjection(n_components=i)
t0 = time()
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
runtime=time() - t0
shape = newtx.shape
print runtime, shape
print "RP done"
print "K-best"
for j in range(tx[1].size):
i = j + 1
print "===" + str(i)
compressor = best(add, k=i)
t0 = time()
compressor.fit(tx, y=ty.ravel())
newtx = compressor.transform(tx)
runtime=time() - t0
shape = newtx.shape
print runtime, shape
print "K-best done"
示例3: pca
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def pca(target, control, title, name_one, name_two):
np_fps = []
for fp in target + control:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
ys_fit = [1] * len(target) + [0] * len(control)
names = ["PAINS", "Control"]
pca = PCA(n_components=3)
pca.fit(np_fps)
np_fps_r = pca.transform(np_fps)
p1 = figure(x_axis_label="PC1",
y_axis_label="PC2",
title=title)
p1.scatter(np_fps_r[:len(target), 0], np_fps_r[:len(target), 1],
color="blue", legend=name_one)
p1.scatter(np_fps_r[len(target):, 0], np_fps_r[len(target):, 1],
color="red", legend=name_two)
p2 = figure(x_axis_label="PC2",
y_axis_label="PC3",
title=title)
p2.scatter(np_fps_r[:len(target), 1], np_fps_r[:len(target), 2],
color="blue", legend=name_one)
p2.scatter(np_fps_r[len(target):, 1], np_fps_r[len(target):, 2],
color="red", legend=name_two)
return HBox(p1, p2)
示例4: pca_plot
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def pca_plot(fp_list, clusters):
np_fps = []
for fp in fp_list:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
pca = PCA(n_components=3)
pca.fit(np_fps)
np_fps_r = pca.transform(np_fps)
p1 = figure(x_axis_label="PC1",
y_axis_label="PC2",
title="PCA clustering of PAINS")
p2 = figure(x_axis_label="PC2",
y_axis_label="PC3",
title="PCA clustering of PAINS")
color_vector = ["blue", "red", "green", "orange", "pink", "cyan", "magenta",
"brown", "purple"]
print len(set(clusters))
for clust_num in set(clusters):
print clust_num
local_cluster = []
for i in xrange(len(clusters)):
if clusters[i] == clust_num:
local_cluster.append(np_fps_r[i])
print len(local_cluster)
p1.scatter(np_fps_r[:,0], np_fps_r[:,1],
color=color_vector[clust_num])
p2.scatter(np_fps_r[:,1], np_fps_r[:,2],
color=color_vector[clust_num])
return HBox(p1, p2)
示例5: LogisticClassifier
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
class LogisticClassifier(object):
def __init__(self, learning_rate=0.01, reg=0., momentum=0.5):
self.classifier = LogisticRegression(learning_rate, reg, momentum)
self.pca = None
self.scaler = None
def sgd_optimize(self, data, n_epochs, mini_batch_size):
data = self._preprocess_data(data)
sgd_optimization(data, self.classifier, n_epochs, mini_batch_size)
def _preprocess_data(self, data):
# center data and scale to unit std
if self.scaler is None:
self.scaler = StandardScaler()
data = self.scaler.fit_transform(data)
else:
data = self.scaler.transform(data)
if self.pca is None:
# use minika's mle to guess appropriate dimension
self.pca = PCA(n_components='mle')
data = self.pca.fit_transform(data)
else:
data = self.pca.transform(data)
return data
示例6: pca
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def pca(tx, ty, rx, ry):
compressor = PCA(n_components = tx[1].size/2)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wPCAtr", times=10)
km(newtx, ty, newrx, ry, add="wPCAtr", times=10)
nn(newtx, ty, newrx, ry, add="wPCAr")
示例7: pca
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def pca(tx, ty, rx, ry):
print "pca"
compressor = PCA(n_components = tx[1].size/2)
compressor.fit(tx, y=ty)
newtx = compressor.transform(tx)
newrx = compressor.transform(rx)
em(newtx, ty, newrx, ry, add="wPCAtr")
km(newtx, ty, newrx, ry, add="wPCAtr")
nn(newtx, ty, newrx, ry, add="wPCAtr")
print "pca done"
示例8: do_train_with_freq
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def do_train_with_freq():
tf_mix = TrainFiles(train_path = train_path_mix, labels_file = labels_file, test_size = 0.)
tf_freq = TrainFiles(train_path = train_path_freq, labels_file = labels_file, test_size = 0.)
X_m, Y_m, _, _ = tf_mix.prepare_inputs()
X_f, Y_f, _, _ = tf_freq.prepare_inputs()
X = np.c_[X_m, X_f]
Y = Y_f
X, Xt, Y, Yt = train_test_split(X, Y, test_size = 0.1)
sl = SKSupervisedLearning(SVC, X, Y, Xt, Yt)
sl.fit_standard_scaler()
pca = PCA(250)
pca.fit(np.r_[sl.X_train_scaled, sl.X_test_scaled])
X_pca = pca.transform(sl.X_train_scaled)
X_pca_test = pca.transform(sl.X_test_scaled)
#sl.train_params = {'C': 100, 'gamma': 0.0001, 'probability' : True}
#print "Start SVM: ", time_now_str()
#sl_ll_trn, sl_ll_tst = sl.fit_and_validate()
#print "Finish Svm: ", time_now_str()
##construct a dataset for RBM
#X_rbm = X[:, 257:]
#Xt_rbm = X[:, 257:]
#rng = np.random.RandomState(123)
#rbm = RBM(X_rbm, n_visible=X_rbm.shape[1], n_hidden=X_rbm.shape[1]/4, numpy_rng=rng)
#pretrain_lr = 0.1
#k = 2
#pretraining_epochs = 200
#for epoch in xrange(pretraining_epochs):
# rbm.contrastive_divergence(lr=pretrain_lr, k=k)
# cost = rbm.get_reconstruction_cross_entropy()
# print >> sys.stderr, 'Training epoch %d, cost is ' % epoch, cost
trndata, tstdata = createDataSets(X_pca, Y, X_pca_test, Yt)
fnn = train(trndata, tstdata, epochs = 1000, test_error = 0.025, momentum = 0.2, weight_decay = 0.0001)
示例9: train_pca
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def train_pca(pains_fps, num_components=3):
'''
Dimensional reduction of fps bit vectors to principal components
:param pains_fps:
:return: pca reduced fingerprints bit vectors
'''
np_fps = []
for fp in pains_fps:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
pca = PCA(n_components=num_components)
pca.fit(np_fps)
fps_reduced = pca.transform(np_fps)
return fps_reduced
示例10: reduction
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def reduction(data, params):
# parse parameters
for item in params:
if isinstance(params[item], str):
exec(item+'='+'"'+params[item]+'"')
else:
exec(item+'='+str(params[item]))
# apply PCA
pca = PCA(n_components=n_components)
pca.fit(data)
X = pca.transform(data)
return X
示例11: pca_no_labels
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def pca_no_labels(target, title="PCA clustering of PAINS", color="blue"):
np_fps = []
for fp in target:
arr = numpy.zeros((1,))
DataStructs.ConvertToNumpyArray(fp, arr)
np_fps.append(arr)
pca = PCA(n_components=3)
pca.fit(np_fps)
np_fps_r = pca.transform(np_fps)
p3 = figure(x_axis_label="PC1",
y_axis_label="PC2",
title=title)
p3.scatter(np_fps_r[:, 0], np_fps_r[:, 1], color=color)
p4 = figure(x_axis_label="PC2",
y_axis_label="PC3",
title=title)
p4.scatter(np_fps_r[:, 1], np_fps_r[:, 2], color=color)
return HBox(p3, p4)
示例12: airline_pca
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
def airline_pca():
X = np.array(pca_data)
pca = PCA(n_components=3)
pca.fit(X)
Y=pca.transform(normalize(X))
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
colordict = {carrier:i for i,carrier in enumerate(major_carriers)}
pointcolors = [colordict[carrier] for carrier in target_carrier]
ax.scatter(Y[:, 0], Y[:, 1], Y[:, 2], c=pointcolors)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
示例13: r
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
## Generate predictions
r('predictions_dl <- h2o.predict(dlmodel, test3.hex)')
r('head(predictions_dl)')
## new predictions
pred = r('as.matrix(predictions_dl)')
return var(pred -test)
################################################################
figure()
variances_table = []
for i in range(2,11,1):
pca = PCA(n_components=i)
der = derivatives[train_mask_TL]
pca.fit(der)
X = pca.transform(derivatives[test_mask])
pred_pca_temp = (pca.inverse_transform(X))
#
var_fraction_pca_TL = var(pred_pca_temp-derivatives[test_mask])/var(derivatives[test_mask])
#plot([i], [var(pred_pca_temp-derivatives[test_mask])],'D')
var_fraction_DL_TL = DL( derivatives[train_mask_TL], derivatives[test_mask], i)/var(derivatives[test_mask])
#plot([i], [var_DL_TL ],'Dk')
pca = PCA(n_components=i)
der = derivatives[train_mask_no_TL]
pca.fit(der)
X = pca.transform(derivatives[test_mask])
pred_pca_temp = (pca.inverse_transform(X))
示例14: open
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
from sklearn.decomposition.pca import PCA # package for principal
# component analysis
from sklearn import svm
import csv
X_train = pd.read_csv('train.csv', header=None).as_matrix()
X_test = pd.read_csv('test.csv', header=None).as_matrix()
trainLabels = np.loadtxt(open('trainLabels.csv', 'rb'), delimiter=',', skiprows=0)
pca=PCA(n_components=12, whiten=True)
#pca.fit(np.r_[X_train, X_test],trainLabels)
pca.fit(X_train)
X_train_pca = pca.transform(X_train)
X_test_pca = pca.transform(X_test)
clf = svm.SVC(C=3, gamma=0.6)
clf.fit(X_train_pca,trainLabels)
predictions = clf.predict(X_test_pca)
with open('svm_model_submission.csv', 'wb') as prediction_file:
writer=csv.writer(prediction_file, delimiter=',')
writer.writerow(['Id','Solution'])
for i in range(0,len(predictions)):
writer.writerow([i+1,int(predictions[i])])
# scores around 92%, could maybe get a bit better tweaking parameters for SVC
# -- use GridSearch to do this? Need a way of testing "goodness" of model
示例15: PCA
# 需要导入模块: from sklearn.decomposition.pca import PCA [as 别名]
# 或者: from sklearn.decomposition.pca.PCA import transform [as 别名]
ax.set_title('%s (%s)' % (name, 'correlation'))
pos += 1
plt.savefig(wd + '/reports/Figure5_dendrograms_signif_protein.pdf', bbox_inches='tight')
plt.close('all')
# ---- Figure 6
(f, m_plot), pos = plt.subplots(3, 2, sharex=False, sharey=False, figsize=(12, 22)), 0
for name, dataset in datasets_quant.items():
plot_df = dataset.loc[:, ['FED' in i.upper() for i in dataset.columns]].T
n_components = 3
pca_o = PCA(n_components=n_components).fit(plot_df)
pcs = pca_o.transform(plot_df)
explained_var = ['%.2f' % (pca_o.explained_variance_ratio_[i] * 100) for i in range(n_components)]
# Plot 1
ax = m_plot[pos][0]
x_pc, y_pc = 0, 1
ax.scatter(pcs[:, x_pc], pcs[:, y_pc], s=90, c=datasets_colour[name], linewidths=0)
ax.set_xlabel('PC 1 (%s%%)' % explained_var[x_pc])
ax.set_ylabel('PC 2 (%s%%)' % explained_var[y_pc])
ax.set_title(name)
sns.despine(ax=ax)
for i, txt in enumerate(plot_df.index):
ax.annotate(txt, (pcs[:, x_pc][i], pcs[:, y_pc][i]), size='x-small')
# Plot 2