本文整理匯總了Python中sklearn.cross_decomposition.PLSRegression方法的典型用法代碼示例。如果您正苦於以下問題:Python cross_decomposition.PLSRegression方法的具體用法?Python cross_decomposition.PLSRegression怎麽用?Python cross_decomposition.PLSRegression使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.cross_decomposition
的用法示例。
在下文中一共展示了cross_decomposition.PLSRegression方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_model_pls_regression
# 需要導入模塊: from sklearn import cross_decomposition [as 別名]
# 或者: from sklearn.cross_decomposition import PLSRegression [as 別名]
def test_model_pls_regression(self):
X = numpy.array([[0., 0., 1.], [1., 0., 0.],
[2., 2., 2.], [2., 5., 4.]],
numpy.float32)
Y = numpy.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9],
[11.9, 12.3]],
numpy.float32)
pls2 = PLSRegression(n_components=2)
pls2.fit(X, Y)
model_onnx = convert_sklearn(
pls2, "scikit-learn pls",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(
X, pls2, model_onnx, methods=['predict'],
basename="SklearnPLSRegression",
allow_failure="StrictVersion("
"onnxruntime.__version__)<= StrictVersion('0.2.1')")
示例2: test_model_pls_regression64
# 需要導入模塊: from sklearn import cross_decomposition [as 別名]
# 或者: from sklearn.cross_decomposition import PLSRegression [as 別名]
def test_model_pls_regression64(self):
X = numpy.array([[0., 0., 1.], [1., 0., 0.],
[2., 2., 2.], [2., 5., 4.]],
numpy.float64)
Y = numpy.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9],
[11.9, 12.3]],
numpy.float64)
pls2 = PLSRegression(n_components=2)
pls2.fit(X, Y)
model_onnx = convert_sklearn(
pls2, "scikit-learn pls64",
[("input", DoubleTensorType([None, X.shape[1]]))],
dtype=numpy.float64)
self.assertTrue(model_onnx is not None)
dump_data_and_model(
X, pls2, model_onnx, methods=['predict'],
basename="SklearnPLSRegression64",
allow_failure="StrictVersion("
"onnxruntime.__version__)<= StrictVersion('0.2.1')")
示例3: test_model_pls_regressionInt64
# 需要導入模塊: from sklearn import cross_decomposition [as 別名]
# 或者: from sklearn.cross_decomposition import PLSRegression [as 別名]
def test_model_pls_regressionInt64(self):
X = numpy.array([[0., 0., 1.], [1., 0., 0.],
[2., 2., 2.], [2., 5., 4.]],
numpy.int64)
Y = numpy.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9],
[11.9, 12.3]],
numpy.int64)
pls2 = PLSRegression(n_components=2)
pls2.fit(X, Y)
model_onnx = convert_sklearn(
pls2, "scikit-learn plsint64",
[("input", Int64TensorType([None, X.shape[1]]))])
self.assertTrue(model_onnx is not None)
dump_data_and_model(
X, pls2, model_onnx, methods=['predict'],
basename="SklearnPLSRegressionInt64",
allow_failure="StrictVersion("
"onnxruntime.__version__)<= StrictVersion('0.2.1')")
示例4: test_PLSRegression
# 需要導入模塊: from sklearn import cross_decomposition [as 別名]
# 或者: from sklearn.cross_decomposition import PLSRegression [as 別名]
def test_PLSRegression(self):
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
df = pdml.ModelFrame(X, target=Y)
pls1 = df.cross_decomposition.PLSRegression(n_components=3)
df.fit(pls1)
result = df.predict(pls1)
pls2 = cd.PLSRegression(n_components=3)
pls2.fit(X, Y)
expected = pls2.predict(X)
self.assertIsInstance(result, pdml.ModelFrame)
self.assert_numpy_array_almost_equal(result.values, expected)
示例5: pls
# 需要導入模塊: from sklearn import cross_decomposition [as 別名]
# 或者: from sklearn.cross_decomposition import PLSRegression [as 別名]
def pls(components, train_matrix, target, test_matrix):
"""Projection of latent structure routine.
Parameters
----------
components : int
The number of components to be returned.
train_matrix : array
The training features.
test_matrix : array
The test features.
Returns
-------
new_train : array
Extracted training features.
new_test : array
Extracted test features.
"""
msg = 'The number of components must be a positive int greater than 0.'
assert components > 0, msg
pls = PLSRegression(n_components=components)
model = pls.fit(X=train_matrix, Y=target)
new_train = model.transform(train_matrix)
new_test = model.transform(test_matrix)
return new_train, new_test
示例6: test_objectmapper
# 需要導入模塊: from sklearn import cross_decomposition [as 別名]
# 或者: from sklearn.cross_decomposition import PLSRegression [as 別名]
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.cross_decomposition.PLSRegression, cd.PLSRegression)
self.assertIs(df.cross_decomposition.PLSCanonical, cd.PLSCanonical)
self.assertIs(df.cross_decomposition.CCA, cd.CCA)
self.assertIs(df.cross_decomposition.PLSSVD, cd.PLSSVD)
示例7: trainmodels
# 需要導入模塊: from sklearn import cross_decomposition [as 別名]
# 或者: from sklearn.cross_decomposition import PLSRegression [as 別名]
def trainmodels(m, x, y, iter=1000):
'''For the model type m, train a model on x->y using built-in CV to
parameterize. Return both this model and an unfit model that can be used for CV.
Note for PLS we cheat a little bit since there isn't a built-in CV trainer.
'''
if m == 'pls':
#have to manually cross-validate to choose number of components
kf = KFold(n_splits=3)
bestscore = -10000
besti = 0
for i in range(1,min(100,len(x[0]))):
#try larger number of components until average CV perf decreases
pls = PLSRegression(i)
scores = []
#TODO: parallelize below
for train,test in kf.split(x):
xtrain = x[train]
ytrain = y[train]
xtest = x[test]
ytest = y[test]
pls.fit(xtrain,ytrain)
score = scoremodel(pls,xtest,ytest)
scores.append(score)
ave = np.mean(scores)
if ave < bestscore*0.95: #getting significantly worse
break
elif ave > bestscore:
bestscore = ave
besti = i
model = PLSRegression(besti)
model.fit(x,y)
unfit = PLSRegression(besti) #choose number of components using full data - iffy
print("PLS components =",besti)
elif m == 'lasso':
model = LassoCV(n_jobs=-1,max_iter=iter)
model.fit(x,y)
unfit = LassoCV(n_jobs=-1,max_iter=iter) #(alpha=model.alpha_)
print("LASSO alpha =",model.alpha_)
return (model,unfit)
elif m == 'ridge':
model = RidgeCV()
model.fit(x,y)
print("Ridge alpha =",model.alpha_)
unfit = RidgeCV()
else:
model = ElasticNetCV(n_jobs=-1,l1_ratio=[.1, .5, .7, .9, .95, .99, 1],max_iter=iter)
model.fit(x,y)
print("Elastic alpha =",model.alpha_," l1_ratio =",model.l1_ratio_)
unfit = ElasticNetCV(n_jobs=-1,max_iter=iter)
return (model,unfit)