本文整理汇总了Python中sklearn.cross_decomposition.PLSRegression类的典型用法代码示例。如果您正苦于以下问题:Python PLSRegression类的具体用法?Python PLSRegression怎么用?Python PLSRegression使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了PLSRegression类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit
def fit(self, predictors, predictands, locations, log=False, **kwargs):
self.locations = locations
self.models = []
self.n = predictors['n']
id = 0
for location in locations:
X = extract_n_by_n(predictors, location, **kwargs)
Y = predictands[:,id]
if log:
Y = np.log(Y)
#pca = PCA(n_components='mle', whiten=True)
model = PLSRegression(n_components=2)
model = model.fit(X,Y)
#components = pca.components_
#pca.components_ = components
self.models.append(model)
print "pls: ", location, model.score(X, Y), model.x_loadings_.shape, np.argmax(model.x_loadings_, axis=0)
id += 1
示例2: PLSCrossValidation
def PLSCrossValidation(n_components, trainSet, validationSet):
pls = PLSRegression(n_components=n_components)
pls.fit(trainSet[predictorList], trainSet['Apps'])
predictPls = pls.predict(validationSet[predictorList])
different = predictPls.flat - validationSet['Apps']
error_rate = np.mean(different ** 2)
return error_rate
示例3: build_model
def build_model(X, y):
# gbr = GradientBoostingRegressor(learning_rate= 0.03, n_estimators=2000, max_depth=8, subsample=0.9)
# rf = RandomForestRegressor(n_estimators=200)
# lr = LinearRegression(fit_intercept=True)
# knr = KNeighborsRegressor(n_neighbors=10, weights='uniform')
# svr = SVR(C=5.0, kernel='linear')
pls = PLSRegression(n_components=35)
return pls.fit(X, y)
示例4: Training
def Training(df,seed, yratio, xratio, index = 1):
snp_matrix = np.array(df.values)
xdim, ydim = snp_matrix.shape
ydimlist = range(0,ydim)
xdimlist = range(0,xdim)
random.seed(seed)
random.shuffle(ydimlist) # shuffle the individuals
random.shuffle(xdimlist) # shuffle the SNPs
accuracy = 0
snp_matrix_shuffle = np.copy(snp_matrix[:,ydimlist])
snp_matrix_shuffle = np.copy(snp_matrix[xdimlist,:])
snp_matrix_train = snp_matrix_shuffle[:,0:int(ydim*yratio)]
snp_matrix_test = snp_matrix_shuffle[:,int(ydim*yratio):]
snp_matrix_train_x = snp_matrix_train[0:int(xdim*xratio),:]
snp_matrix_test_x = snp_matrix_test[0:int(xdim*xratio),:]
for i in range(int(xdim*xratio), xdim):
snp_matrix_train_y = snp_matrix_train[i,:]
snp_matrix_test_y = snp_matrix_test[i,:]
if index != 7:
if index == 1:
clf = AdaBoostClassifier(n_estimators= 100)
elif index == 2:
clf = RandomForestClassifier(n_estimators=100)
elif index == 3:
clf = linear_model.LogisticRegression(C=1e5)
elif index == 4:
clf = svm.SVC(kernel = 'rbf')
elif index == 5:
clf = svm.SVC(kernel = 'poly')
else:
clf = svm.SVC(kernel = 'linear')
clf = clf.fit(snp_matrix_train_x.T, snp_matrix_train_y)
Y_pred = clf.predict(snp_matrix_test_x.T)
prediction = snp_matrix_test_y - Y_pred
wrong = np.count_nonzero(prediction)
tmp = 1 - (wrong + 0.0) / len(prediction)
print tmp
accuracy += tmp
accuracy = accuracy / (xdim - int(xdim*xratio))
if index == 7:
pls2 = PLSRegression(n_components = 50, scale=False, max_iter=1000)
snp_matrix_train_y = snp_matrix_train[int(xdim*xratio):,:]
pls2.fit(snp_matrix_train_x.T,snp_matrix_train_y.T)
snp_matrix_test_x = snp_matrix_test[0:int(xdim*xratio),:]
snp_matrix_test_y = snp_matrix_test[int(xdim*xratio):,:]
Y_pred = transform(pls2.predict(snp_matrix_test_x.T))
prediction = snp_matrix_test_y - Y_pred.T
xdim, ydim = prediction.shape
wrong = np.count_nonzero(prediction)
accuracy = 1 - wrong / (xdim * ydim + 0.0)
return accuracy
示例5: get_correlations
def get_correlations(param, spec, wave):
'''Returns correlations between spec and params by wavelengths'''
# using PLS
pls = PLSRegression(10)
pls.fit(spec, param)
#get corretalions
nparam = param.shape[1]
cor = pls.coefs*np.asarray([pls.x_std_]*nparam).T
cor /= np.tile(pls.y_std_, (cor.shape[0],1))
return cor
示例6: do_pls
def do_pls(X, Y):
pls2 = PLSRegression(n_components=2)
pls2.fit(X,Y)
out = pls2.transform(X)
print(out)
print(out.shape)
plt.title("PLS2")
plt.xlabel("PL1")
plt.ylabel("PL2")
plt.grid();
plt.scatter(out[:, 0], out[:, 1], c=Y, cmap='viridis')
plt.savefig('pls.png', dpi=125)
示例7: pls_approach
def pls_approach():
from sklearn.cross_decomposition import PLSRegression
(X, Y), cities = pull_xy_data()
pls = PLSRegression()
pls.fit(X, Y)
plsX, plsY = pls.transform(X, Y)
plot(plsX, cities, ["Lat01", "Lat02", "Lat03"], ellipse_sigma=1)
return "OK What Now?"
示例8: __init__
class PLSPredictor:
def __init__(self):
self.pls2 = PLSRegression(n_components=2,
scale=True,
max_iter=500,
tol=1e-06,
copy=True)
def predict(self, values):
self.pls2.predict(values)
def train(self, measured_values, screen_points):
self.pls2.fit(measured_values, screen_points)
示例9: __one_pls
def __one_pls(self, cat):
np.seterr(all='raise')
lcat = np.zeros(self.train_set['labels'].size)
lcat[self.train_set['labels'] != cat] = -1
lcat[self.train_set['labels'] == cat] = +1
pls = PLSRegression(n_components=2, scale=False)
pls.fit(self.train_set['data'], lcat)
return pls
示例10: fit_base_model
def fit_base_model(classifiers, fully, dummyY, trainx, testx):
""" Takes a list of classifiers and/or PLS regression and
does dimension reduction by returning the predictions of the classifiers
or first two scores of the PLS regression on bootstrapped subsamples of
the data."""
trainProbs = []
testProbs = []
iterations = 0
for clf in classifiers:
for i in range(clf[1]):
iterations += 1
print(iterations)
print(clf[0])
train_rows = np.random.choice(trainx.shape[0],
round(trainx.shape[0] * base_prop),
True)
oob_rows = list(set(range(trainx.shape[0])) - set(train_rows))
print(len(train_rows))
print(len(oob_rows))
x = trainx[train_rows, :]
if clf[0] == 'PLS':
y = dummyY[train_rows, :]
mod = PLSRegression().fit(x, y)
trainscores = mod.transform(trainx)
testscores = mod.transform(testx)
trainProbs.append(trainscores[:, 0])
trainProbs.append(trainscores[:, 1])
testProbs.append(testscores[:, 0])
testProbs.append(testscores[:, 1])
else:
y = fully[train_rows]
print('\t Fitting model...')
mod = clf[0].fit(x, y)
print('\t Predicting training results...')
tpreds = mod.predict_proba(trainx)
trainProbs.append(list(tpreds[:, 1]))
print('\t Predicting test results...')
testProbs.append(list(mod.predict_proba(testx)[:, 1]))
print('\t OOB score: ' + str(log_loss(fully[oob_rows],
tpreds[oob_rows, :])))
return trainProbs, testProbs
示例11: pls_regr
def pls_regr(x, y):
from sklearn.cross_decomposition import PLSRegression
n = len(x[0])
if n < 2:
raise TypeError
score = -999999999999
pls = None
'''
for i in range(3, n):
pls2 = PLSRegression(n_components=i)
pls2.fit(x,y)
cscore = pls2.score(x, y)
#print i, cscore
if cscore > score:
pls = pls2
score = cscore
'''
pls = PLSRegression(n_components=5)
pls.fit(x,y)
return pls
示例12: train_PLSR
def train_PLSR(x_filename, y_filename, model_filename, n):
"""
Train a PLSR model and save it to the model_filename.
X and Y matrices are read from x_filename and y_filename.
The no. of PLSR components is given by n.
"""
X = loadMatrix(x_filename)[0].todense()
Y = loadMatrix(y_filename)[0].todense()
if X.shape[0] != Y.shape[0]:
sys.stderr.write("X and Y must have equal number of rows!\n")
raise ValueError
sys.stderr.write("Learning PLSR...")
startTime = time.time()
pls2 = PLSRegression(copy=True, max_iter=10000, n_components=n, scale=True, tol=1e-06)
pls2.fit(X, Y)
model = open(model_filename, 'w')
pickle.dump(pls2, model, 1)
model.close()
endTime = time.time()
sys.stderr.write(" took %ss\n" % str(round(endTime-startTime, 2)))
pass
示例13: lex_function_learning
def lex_function_learning( class_name, hyper_vec ) :
#pls2 = KernelRidge( kernel = "rbf", gamma= 100)
#pls2 = KernelRidge( )
pls2 = PLSRegression(n_components=50, max_iter=5000)
X = extract_postive_features ( train_dataset[class_name][0], train_dataset[class_name][1] )
Y = []
for hypo_vec in X :
sub = hyper_vec-hypo_vec
Y.append(sub) # Target = difference vector ( Hypernym_vector - Hyponym_vector )
#Y.append(hyper_vec) # Target = Hypernym vector
pls2.fit( X, Y)
train_acc = pls2.score(X, Y)
print "class = ", class_name, "train len = ", len(X)
return pls2, train_acc, len(X)
示例14: reduce_PLS
def reduce_PLS(dataframe):
PLS_file="data/pls_structure.pickle"
selectedcolumn=[x for x in dataframe.columns if x not in ["id","click","device_id","device_ip"]]
X=np.array(dataframe[selectedcolumn])
y=np.array(dataframe["click"])
if os.path.exists(PLS_file):
stand_PLS=pickle.load(open(PLS_file,'rb'))
print "PLS structure is loaded."
else:
stand_PLS=PLSRegression(n_components=10,scale=True)
stand_PLS.fit(X, y[:,np.newaxis])
stand_PLS.y_scores_=None
stand_PLS.x_scores_=None
pickle.dump(stand_PLS,open(PLS_file,"wb"))
print "PLS transform structure is stored."
T=stand_PLS.transform(X)
print "PLS transformation is performed."
return T
示例15: PLSRegression
plt.xticks(())
plt.yticks(())
plt.show()
# #############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coef_ with B
print("Estimated B")
print(np.round(pls2.coef_, 1))
pls2.predict(X)
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)