本文整理汇总了Python中sklearn.cross_decomposition.PLSRegression.transform方法的典型用法代码示例。如果您正苦于以下问题:Python PLSRegression.transform方法的具体用法?Python PLSRegression.transform怎么用?Python PLSRegression.transform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.cross_decomposition.PLSRegression
的用法示例。
在下文中一共展示了PLSRegression.transform方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit_base_model
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
def fit_base_model(classifiers, fully, dummyY, trainx, testx):
""" Takes a list of classifiers and/or PLS regression and
does dimension reduction by returning the predictions of the classifiers
or first two scores of the PLS regression on bootstrapped subsamples of
the data."""
trainProbs = []
testProbs = []
iterations = 0
for clf in classifiers:
for i in range(clf[1]):
iterations += 1
print(iterations)
print(clf[0])
train_rows = np.random.choice(trainx.shape[0],
round(trainx.shape[0] * base_prop),
True)
oob_rows = list(set(range(trainx.shape[0])) - set(train_rows))
print(len(train_rows))
print(len(oob_rows))
x = trainx[train_rows, :]
if clf[0] == 'PLS':
y = dummyY[train_rows, :]
mod = PLSRegression().fit(x, y)
trainscores = mod.transform(trainx)
testscores = mod.transform(testx)
trainProbs.append(trainscores[:, 0])
trainProbs.append(trainscores[:, 1])
testProbs.append(testscores[:, 0])
testProbs.append(testscores[:, 1])
else:
y = fully[train_rows]
print('\t Fitting model...')
mod = clf[0].fit(x, y)
print('\t Predicting training results...')
tpreds = mod.predict_proba(trainx)
trainProbs.append(list(tpreds[:, 1]))
print('\t Predicting test results...')
testProbs.append(list(mod.predict_proba(testx)[:, 1]))
print('\t OOB score: ' + str(log_loss(fully[oob_rows],
tpreds[oob_rows, :])))
return trainProbs, testProbs
示例2: do_pls
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
def do_pls(X, Y):
pls2 = PLSRegression(n_components=2)
pls2.fit(X,Y)
out = pls2.transform(X)
print(out)
print(out.shape)
plt.title("PLS2")
plt.xlabel("PL1")
plt.ylabel("PL2")
plt.grid();
plt.scatter(out[:, 0], out[:, 1], c=Y, cmap='viridis')
plt.savefig('pls.png', dpi=125)
示例3: pls_approach
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
def pls_approach():
from sklearn.cross_decomposition import PLSRegression
(X, Y), cities = pull_xy_data()
pls = PLSRegression()
pls.fit(X, Y)
plsX, plsY = pls.transform(X, Y)
plot(plsX, cities, ["Lat01", "Lat02", "Lat03"], ellipse_sigma=1)
return "OK What Now?"
示例4: hacerPLS
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
def hacerPLS(X,Y):
pls_wild_b = PLSRegression(n_components = 9)
pls_wild_b.fit(X,Y)
Z = pls_wild_b.transform(X)
scores = list()
scores_std = list()
n_features = np.shape(X)[1]
X,X_test_tot, Y, Y_test_tot = cross_validation.train_test_split(X,Y,test_size = 0.5,random_state = 0)
N = np.shape(X)[0]
for num_comp in range(n_features):
kf = KFold(N,n_folds = 10)
aux_scores = list()
for train, test in kf:
X_train, X_test, y_train, y_test = X[train], X[test], Y[train], Y[test]
if num_comp == 0:
y_pred = np.mean(y_test)
y_pred = y_pred* np.ones(np.shape(y_test))
aux_scores.append(metrics.mean_squared_error(y_test,y_pred))
else:
pls_foo = PLSRegression(n_components = num_comp)
pls_foo.fit(X_train,y_train)
y_pred = pls_foo.predict(X_test)
#obtaing the score
this_score = metrics.mean_squared_error(y_test,y_pred)
aux_scores.append(this_score)
scores.append(np.mean(aux_scores))
scores_std.append(np.std(aux_scores))
plt.plot(scores)
xlabel('Componentes')
ylabel("$MSE$")
title("Animales PLS")
plt.show()
num_comp = np.argmin(scores)
pls_pred = PLSRegression(n_components =2)
pls_pred.fit(X,Y)
y_pred_test = pls_pred.predict(X_test_tot)
print "MSE test = " + str(metrics.mean_squared_error(Y_test_tot,y_pred_test))
示例5: reduce_PLS
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
def reduce_PLS(dataframe):
PLS_file="data/pls_structure.pickle"
selectedcolumn=[x for x in dataframe.columns if x not in ["id","click","device_id","device_ip"]]
X=np.array(dataframe[selectedcolumn])
y=np.array(dataframe["click"])
if os.path.exists(PLS_file):
stand_PLS=pickle.load(open(PLS_file,'rb'))
print "PLS structure is loaded."
else:
stand_PLS=PLSRegression(n_components=10,scale=True)
stand_PLS.fit(X, y[:,np.newaxis])
stand_PLS.y_scores_=None
stand_PLS.x_scores_=None
pickle.dump(stand_PLS,open(PLS_file,"wb"))
print "PLS transform structure is stored."
T=stand_PLS.transform(X)
print "PLS transformation is performed."
return T
示例6: print
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
y = dataset["target"]
# Center each feature and scale the variance to be unitary
X = preprocessing.scale(X)
# Compute the variance for each column
print(numpy.var(X, 0).sum())
# Now use PCA using 3 components
pca = PCA(3)
X2 = pca.fit_transform(X)
print(numpy.var(X2, 0).sum())
pls = PLSRegression(3)
pls.fit(X, y)
X2 = pls.transform(X)
print(numpy.var(X2, 0).sum())
# Make predictions using an SVM with PCA and PLS
pca_error = 0
pls_error = 0
n_folds = 10
svc = LinearSVC()
for train_inds, test_inds in KFold(X.shape[0], n_folds=n_folds):
X_train, X_test = X[train_inds], X[test_inds]
y_train, y_test = y[train_inds], y[test_inds]
# Use PCA and then classify using an SVM
X_train2 = pca.fit_transform(X_train)
示例7: PLSRegression
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
plt.ylabel('1st component')
elif i == 1:
plt.ylabel('2nd component')
else:
plt.ylabel('3rd component')
axis_c = plt.gca()
axis_c.set_xticklabels(wild_boar_ddbb['header'][3:],fontsize = 7)
axis_c.set_xticks(axis_c.get_xticks() + 0.5)
print "dentro del bucleeeeeeeeeee"
#Select the number of components using CV
#%%
##PLSR
pls_wild_b = PLSRegression(n_components = 3)
pls_wild_b.fit(X_train_prepro,Y_train)
X_train_pls_proj = pls_wild_b.transform(X_train_prepro)
print("loadings")
for i in range(pls_wild_b.n_components):
plt.figure()
plt.bar(np.arange(np.shape(X_train_prepro)[1]), pls_wild_b.x_loadings_[:,i])
if i == 0:
plt.ylabel('PLS 1st component')
elif i == 1:
plt.ylabel('PLS2nd component')
else:
plt.ylabel('PLS 3rd component')
axis_c = plt.gca()
axis_c.set_xticklabels(wild_boar_ddbb['header'][3:],fontsize = 7)
axis_c.set_xticks(axis_c.get_xticks() + 0.5)
示例8: float
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
#print "yp_t_not ", yp_t_not.shape
pls.fit(Xp_t,yp_t_not.astype(int))
yp_new = pls.predict(Xp_t, copy=True)
yp_pred = (yp_new[:,0] > yp_new[:,1]).astype(int)
yp_t = yp_t.astype(int)
#print y_new,y_pred, y_t
error = ((yp_t - yp_pred) ** 2).sum()
print "PLS Training error " , float(error)/yp_t.shape[0]
yp_new = pls.predict(Xp_v, copy=True)
yp_pred = (yp_new[:,0] > yp_new[:,1]).astype(int)
#print y_new, y_pred, y_v
#print ((y_v - y_pred) ** 2).sum(), y_v.shape[0]
error = ((yp_v - yp_pred) ** 2).sum()
print "PLS Validation error " , float(error)/yp_v.shape[0]
X_new = pls.transform(X)
rf = RandomForestClassifier(n_estimators=500, max_depth=None, max_features=int(math.sqrt(n_components)), min_samples_split=100, random_state=144, n_jobs=4)
#print "shapes ", X_new.shape, y.shape
#print X_new,y
X_t, X_v, y_t, y_v = tts(X_new,yd,train_size=0.85)
rf.fit(X_t, y_t)
print "Random Forest Classifier: ", rf.get_params()
print "Covariance Classifier Training score: ", rf.score(X_t, y_t)
print "Covariance Classifier Validation score: ", rf.score(X_v, y_v)
#print "Class prob: ", zip(rf.predict_proba(X_v), y_v)
sample_weights = rf.predict_proba(pls.transform(Xp_t))[:,1]
print sample_weights.shape
sample_weights = abs(sample_weights-0.5)
示例9: enumerate
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import transform [as 别名]
plt.xlim(1,np.amax(nComponents))
plt.title('PLS Cannonical accuracy')
plt.xlabel('Number of components')
plt.ylabel('accuracy')
plt.legend (['LR','LDA','GNB','Linear SVM','rbf SVM'],loc='lower right')
plt.grid(True)
if (0):
#%% PLS Regression
nComponents = np.arange(1,nClasses+1)
plsRegScores = np.zeros((5,np.alen(nComponents)))
for i,n in enumerate(nComponents):
plsReg = PLSRegression(n_components=n)
plsReg.fit(Xtrain,Ytrain)
XtrainT = plsReg.transform(Xtrain)
XtestT = plsReg.transform(Xtest)
plsRegScores[:,i] = util.classify(XtrainT,XtestT,labelsTrain,labelsTest)
plsReg = PLSRegression(n_components=2)
plsReg.fit(Xtrain,Ytrain)
xt = plsReg.transform(Xtrain)
fig = plt.figure()
util.plotData(fig,xt,labelsTrain,classColors)
plt.title('First 2 components of projected data')
#%% Plot accuracies for PLSSVD
plt.figure()
for i in range (5):