本文整理汇总了Python中sklearn.cross_decomposition.PLSRegression.score方法的典型用法代码示例。如果您正苦于以下问题:Python PLSRegression.score方法的具体用法?Python PLSRegression.score怎么用?Python PLSRegression.score使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.cross_decomposition.PLSRegression
的用法示例。
在下文中一共展示了PLSRegression.score方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import score [as 别名]
def fit(self, predictors, predictands, locations, log=False, **kwargs):
self.locations = locations
self.models = []
self.n = predictors['n']
id = 0
for location in locations:
X = extract_n_by_n(predictors, location, **kwargs)
Y = predictands[:,id]
if log:
Y = np.log(Y)
#pca = PCA(n_components='mle', whiten=True)
model = PLSRegression(n_components=2)
model = model.fit(X,Y)
#components = pca.components_
#pca.components_ = components
self.models.append(model)
print "pls: ", location, model.score(X, Y), model.x_loadings_.shape, np.argmax(model.x_loadings_, axis=0)
id += 1
示例2: lex_function_learning
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import score [as 别名]
def lex_function_learning( class_name, hyper_vec ) :
#pls2 = KernelRidge( kernel = "rbf", gamma= 100)
#pls2 = KernelRidge( )
pls2 = PLSRegression(n_components=50, max_iter=5000)
X = extract_postive_features ( train_dataset[class_name][0], train_dataset[class_name][1] )
Y = []
for hypo_vec in X :
sub = hyper_vec-hypo_vec
Y.append(sub) # Target = difference vector ( Hypernym_vector - Hyponym_vector )
#Y.append(hyper_vec) # Target = Hypernym vector
pls2.fit( X, Y)
train_acc = pls2.score(X, Y)
print "class = ", class_name, "train len = ", len(X)
return pls2, train_acc, len(X)
示例3: plsvip
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import score [as 别名]
def plsvip (X, Y, V, lat_var):
attributes = len(X[0])
if not lat_var:
latent_variables = attributes
else:
latent_variables = lat_var
num_instances = len(X)
attributes_gone = []
min_att = -1
#start_time = time.time()
#attr_time = time.time()
#time_counter = 0
while attributes>0:
#if (attributes +9) %10 ==0:
# print "total time: ", time.time() - start_time
# print "attr time: ", time.time() - attr_time
# attr_time = time.time()
if (latent_variables == 0) or (latent_variables > attributes):
latent_variables = attributes
lv_best = best_latent_variable(X, Y, latent_variables, num_instances)
#print "current best lv: ", lv_best, "num. attr. ", attributes ####
#fin_pls = PLSCanonical(n_components = lv_best)
fin_pls = PLSRegression(n_components = lv_best)
fin_pls.fit(X, Y)
currentR2 = fin_pls.score(X, Y)
#######################################w
# alternative r2
"""
meanY4r2 = numpy.mean(Y)
predY = fin_pls.predict(X)
RSS = 0
for i in range (len(Y)):
RSS += numpy.power (Y[i] - predY[i], 2)
TSS = 0
for i in range (len(Y)):
TSS += numpy.power (Y[i] - meanY4r2, 2)
alterR2 = 1 - (RSS/TSS)
#print currentR2, "vs", alterR2
"""
#######################################w
min_vip = 1000
if min_att ==-1:
attributes_gone.append(["None", currentR2, attributes, lv_best])
##########################################r
#threaded version
"""
myThreads = []
VIPcurrent = []
for i in range (0,attributes):
myThreads.append(enthread( target = get_vip, args = (fin_pls, lv_best, i, attributes_gone, attributes )) )
for i in range (0,attributes):
VIPcurrent.append(myThreads[i].get())
min_vip = min(VIPcurrent)
min_att = VIPcurrent.index(min_vip)
"""
# Working version
#"""
for i in range (0,attributes):
VIPcurrent = get_vip (fin_pls, lv_best, i, attributes_gone, attributes )
if VIPcurrent< min_vip:
min_vip = VIPcurrent
min_att = i
#"""
##########################################r
if min_att >-1:
attributes_gone.append([V[min_att], currentR2, attributes, lv_best]) ####### CURRENT : to BE popped, NOT already popped
V.pop(min_att)
for i in range (num_instances):
X[i].pop(min_att)
attributes -= 1
#print attributes_gone ####
#time_counter +=1
return attributes_gone
示例4: train_test_split
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import score [as 别名]
#correct not accurate
from sklearn.cross_validation import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
from sklearn.svm import SVC
import numpy as np
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
from sklearn.cross_decomposition import PLSCanonical
df=pd.read_csv('newdata.csv')
x=df.drop(['tag'],axis=1)
y=df.drop(['kx','ky','kz','wa','wb','wc','wd','we','wf'],axis=1)
X_train , X_test , Y_train , Y_test = train_test_split(x,y , random_state=5)
plsr=PLSRegression()
plsr.fit(X_train,Y_train)
plsc=PLSCanonical()
plsc.fit(X_train,Y_train)
print (plsr.score(X_test,Y_test))
print (plsc.score(X_test,Y_test))
示例5: loadData
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import score [as 别名]
(Xtest, ytest) = loadData(xtestpath, ytestpath)
#trim off background and scale
ytrain=ytrain[:,1:]
#ytrain=scale(ytrain)
Xtrain=standardize(Xtrain)
#trim off background and scale
ytest = ytest[:,1:]
#ytest = scale(ytest)
Xtest = standardize(Xtest)
pls = PLSRegression(n_components=10)
pls.fit(Xtrain, ytrain)
y_pls = pls.predict(Xtest)
print 1 + pls.score(Xtest, ytest)
pls_rmse=[]
pls_rmse.append(sqrt(mean_squared_error(ytest[:,0], y_pls[:,0])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,1], y_pls[:,1])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,2], y_pls[:,2])))
pls_rmse.append(sqrt(mean_squared_error(ytest[:,3], y_pls[:,3])))
fig = plt.figure(figsize=(20,10))
ax1 = fig.add_subplot(241)
ax1.plot(y_pls[:,0], c='r', label='PLS Fit')
ax1.plot(ytest[:,0], c='grey', label='Target')
ax1.set_xlabel('Time')
ax1.set_ylabel('[c]')
示例6: round
# 需要导入模块: from sklearn.cross_decomposition import PLSRegression [as 别名]
# 或者: from sklearn.cross_decomposition.PLSRegression import score [as 别名]
print round(SVRpreds[i],2)
i += 1
print "\n"
SVRr2.append(optSVR.score(XTest, yTest))
SVRmse.append( metrics.mean_squared_error(yTest,SVRpreds))
SVRrmse.append(math.sqrt(SVRmse[metcount]))
print ("Support Vector Regression prediction statistics for fold %d are; MSE = %5.2f RMSE = %5.2f R2 = %5.2f\n\n" % (metcount+1, SVRmse[metcount], SVRrmse[metcount],SVRr2[metcount]))
with open(train_name,'a') as ftrain :
ftrain.write("Support Vector Regression prediction statistics for fold %d are, MSE =, %5.2f, RMSE =, %5.2f, R2 =, %5.2f,\n\n" % (metcount+1, SVRmse[metcount], SVRrmse[metcount],SVRr2[metcount]))
ftrain.close()
# Train partial least squares and predict with optimised parameters
print("\n\n------------------- Starting opitimised PLS training -------------------")
optPLS = PLSRegression(n_components = nc)
optPLS.fit(XTrain, yTrain) # Train the model
print("Training R2 = %5.2f" % optPLS.score(XTrain,yTrain))
print("Starting optimised PLS prediction")
PLSpreds = optPLS.predict(XTest)
print("The predicted values now follow :")
PLSpredsdim = PLSpreds.shape[0]
i = 0
if PLSpredsdim%5 == 0:
while i < PLSpredsdim:
print round(PLSpreds[i],2),'\t', round(PLSpreds[i+1],2),'\t', round(PLSpreds[i+2],2),'\t', round(PLSpreds[i+3],2),'\t', round(PLSpreds[i+4],2)
i += 5
elif PLSpredsdim%4 == 0:
while i < PLSpredsdim:
print round(PLSpreds[i],2),'\t', round(PLSpreds[i+1],2),'\t', round(PLSpreds[i+2],2),'\t', round(PLSpreds[i+3],2)
i += 4
elif PLSpredsdim%3 == 0 :
while i < PLSpredsdim :