本文整理汇总了Python中sklearn.linear_model.RidgeCV类的典型用法代码示例。如果您正苦于以下问题:Python RidgeCV类的具体用法?Python RidgeCV怎么用?Python RidgeCV使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RidgeCV类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: RR_cv_estimate_alpha
def RR_cv_estimate_alpha(sspacing, tspacing, alphas):
"""
Estimate the optimal regularization parameter using grid search from a list
and via k-fold cross validation
Parameters
----------
sspacing : 2D subsampling ratio in space (in one direction)
tspacing : 1D subsampling ratio in time
alphas : list of regularization parameters to do grid search
"""
#Load all training data
(Xl_tr, mea_l, sig_l, Xh_tr,mea_h,sig_h) = data_preprocess(sspacing, tspacing)
# RidgeCV
from sklearn.linear_model import RidgeCV
ridge = RidgeCV(alphas = alphas, cv = 10, fit_intercept=False, normalize=False)
ridge.fit(Xl_tr, Xh_tr)
RR_alpha_opt = ridge.alpha_
print('\n Optimal lambda:', RR_alpha_opt)
# save to .mat file
import scipy.io as io
filename = "".join(['/data/PhDworks/isotropic/regerssion/RR_cv_alpha_sspacing',
str(sspacing),'_tspacing',str(tspacing),'.mat'])
io.savemat(filename, dict(alphas=alphas, RR_alpha_opt=RR_alpha_opt))
# return
return RR_alpha_opt
示例2: ridge_predict
def ridge_predict(train_data, train_target, test_data):
# Prep modeller
alpha_ranges = [1e-3, 1e-2, 1e-1, 1, 1e2, 1e3,
2e3, 2.5e3, 3e3, 3.5e3, 4e3,
5e3, 6e3, 6.1e3, 6.15e3, 6.25e3, 6.3e3, 6.4e3, 7e3,
7.75e3, 7.9e3, 8e3, 8.1e3, 8.2e3, 8.25e3, 8.3e3, 8.4e3, 8.5e3, 8.75e3, 9e3, 9.25e3, 9.4e3, 9.5e3, 9.6e3, 9.75e3,
1e4, 1.25e4, 1.4e4, 1.5e4, 1.55e4, 1.58e4, 1.6e4, 1.625e4, 1.65e4, 1.7e4, 1.725e4, 1.74e4, 1.75e4, 1.76e4, 1.78e4, 1.85e4,
2e4, 2.25e4, 2.5e4, 3e4, 4e4,
0.5e5, 0.75e5, 1e5, 1.25e5, 1.5e5,
0.8e6, 0.9e6, 1e6, 1.1e6, 1.2e6, 1.25e6, 1.28e6, 1.3e6, 1.32e6, 1.33e6, 1.34e6, 1.4e6, 1.5e6, 2e6,
1e7, 1e8, 1e9, 5e9, 1e10, 5e10, 1e11, 1e12, 1e13]
clf = RidgeCV(alphas=alpha_ranges,
normalize=True, cv=None, fit_intercept=False, store_cv_values=True)
# Fit
clf.fit(train_data, train_target)
# print("alpha range:", alpha_ranges)
# print("CV per alpha:",np.mean(clf.cv_values_, axis=0))
# print("alpha used:", clf.alpha_)
# print("fit score:", clf.score(train_data, train_target))
# Prediction
predictions = clf.predict(test_data)
return predictions
示例3: fit
def fit(self, X, y):
"""Fit the shape function of each features with the backfitting algorithm.
Please note that the shape functions are centered (not reduced).
Parameters
----------
X : array-like, shape=(n_samples, n_features)
The input samples.
Returns
-------
self : object
The Generalized Additive Model with the fitted shape functions
"""
n_samples, n_features = X.shape
if not isinstance(self.smoothers, list):
self.smoothers_ = [clone(self.smoothers) for i in range(n_features) ]
self.ridge = RidgeCV(alphas = [self.ridge_alphas]*len(self.smoothers_), fit_intercept=False)
else:
self.smoothers_ = [clone(self.smoothers[j]) for j in range(n_features) ]
self.ridge = RidgeCV(alphas = [self.ridge_alphas]*len(self.smoothers_), fit_intercept=False)
self.y_mean_ = np.mean(y)
self.rmse_ = [] # array to stock the train error over the iteration
y -= y.mean()
temp = np.zeros(shape=(n_samples, n_features)) # array to stock the shape function for re-use in the next iteration
shape_functions = np.zeros(shape=(n_samples, n_features))
for i in range(self.max_iter):
for j in range(n_features):
# select all the columns except the j-th one
idx = list(set(np.arange(0, n_features, 1)) - set([j]))
#Compute the residuals of the previous iteration
residuals = y.reshape((n_samples,1)) - temp[:, idx].sum(axis=1, keepdims=True).reshape((n_samples, 1))
residuals -=residuals.mean()
residuals = residuals
#print(np.amin(residuals), np.amax(residuals), 'iteration number %s'%(i+1))
self.smoothers_[j].fit(X[:, j:j+1], residuals.reshape((n_samples,))) #reshape cause deprecation warning
shape_functions[:, j]= self.smoothers_[j].predict(X[:, j:j+1])
shape_functions[:, j] -= shape_functions[:, j].mean()
# RidgeRegression on top of the shape function in order to 're-scale' each shape functions
self.ridge.fit(shape_functions, y)
coef = self.ridge.coef_
shape_functions *= coef
y_pred = shape_functions.sum(axis=1)
y_pred -= y_pred.mean()
self.rmse_.append(met.mean_squared_error(y_pred, y))
temp=shape_functions.copy()
#plt.scatter(1, np.abs(residuals.min()), c='g', label='iteration = %s'%i)
#plt.scatter(2, np.abs(residuals.max()), c='r')
#plt.legend()
#plt.show()
return self
示例4: regularizedreg
def regularizedreg(Xtrain,Xtest,ytrain,ytest):
Rclf = RidgeCV(alphas=[1,2,20,40,50]) # RidgeCV(alphas=[0.1, 1.0, 2.0, 4.0, 20.0], cv=None, fit_intercept=True, scoring=None, normalize=False)
Rclf.fit(Xtrain,ytrain);
print("Residual sum of squares: %.2f"
% np.mean((Rclf.predict(Xtest) - ytest) ** 2))
print('Regularization choosen, alpha = %.2f' % Rclf.alpha_);
print(' Coef values = ', Rclf.coef_);
print('Variance score: %.2f' % Rclf.score(Xtest, ytest))
示例5: ridgeCV
def ridgeCV(data, targets):
"""
Returns a RidgeCV linear model for predictions with alphas [1, 10, 50, 100, 1000]
Takes the data and the associated targets as arguments.
"""
model = RidgeCV(alphas=[1, 10, 50, 100, 1000])
model.fit(data, targets)
return model
示例6: fit_Ridge
def fit_Ridge(features_train, labels_train, features_pred, alphas=(0.1, 1.0, 10.0)):
model = RidgeCV(normalize=True, store_cv_values=True, alphas=alphas)
model.fit(features_train, labels_train)
cv_errors = np.mean(model.cv_values_, axis=0)
print "RIDGE - CV error min: ", np.min(cv_errors)
# Test the model
labels_pred = model.predict(features_pred)
return labels_pred
示例7: orth_signal
def orth_signal(x, atol=1e-13, rtol=0):
"""
Returns signal orthogonal to input ensemble.
x -> input singal [n_samples, n_neurons]
"""
t = np.linspace(0, 1, x.shape[0])[:, None]
f = arange(x.shape[1]) / x.shape[1]
xt = np.sum(sin(2 * np.pi * f * 3 * t) / (f + 1), axis=1)
w = RidgeCV(np.logspace(-6, 3, 50))
w.fit(x, xt)
xt = xt - w.predict(x)
# pdb.set_trace()
return xt
示例8: RidgeCVLinear
def RidgeCVLinear(train,test):
print('starting RidgeCVLinear ...')
ridge=RidgeCV(normalize=True,cv=5)
train.reindex(np.random.permutation(train.index))
tr_X=train.drop('LogSales',axis=1)
tr_Y=train['LogSales']
cutoff=math.floor(0.7*tr_Y.size)
ridge.fit(tr_X[:cutoff],tr_Y[:cutoff])
predY=ridge.predict(tr_X[cutoff:])
mspe=rmspe(predY,tr_Y[cutoff:])
print('rmspe is %9f'% mspe)
print(train.columns)
print(ridge.coef_)
print('starting RidgeCVLinear ... completed')
return ridge
示例9: __init__
def __init__(self, num_dists=2, sigma=0.1, base_learner=None, **kwargs):
self.num_dists = num_dists
self.sigma = sigma
if base_learner is None:
base_learner = RidgeCV(fit_intercept=False, \
alphas=[0.001, 0.01, 0.1, 100, 1000], cv=None,
store_cv_values=True)
if 'fit_intercept' not in kwargs:
kwargs['fit_intercept'] = False
self.base_learner = base_learner.set_params(**kwargs)
self.R = None
self.model = None
示例10: stacking
def stacking(estimators):
# training
predictions = []
for estim in estimators:
estim.fit(X, y)
predictions.append(estim.predict(X))
agg = RidgeCV(alphas=alphas, cv=5, normalize=True, fit_intercept=True) # aggregator
agg.fit(np.array(predictions).T, y)
# test
predictions = []
for estim in estimators:
predictions.append(estim.predict(test_data))
predictions = agg.predict(np.array(predictions).T)
write_results(predictions)
示例11: validate
def validate(nPrev, nAfter, aux_temp, aux_sun, aux_prec, get_model=False):
X_Final = getFeature(nPrev, nAfter, aux_temp, aux_sun, aux_prec, TrainFiles)
data_train_target = pd.read_csv(TrainTarget, sep='\t', header=None)
y = data_train_target.loc[:,0].values
TEST_SIZE = 0.2
RANDOM_STATE = 0
X_train, X_val, y_train, y_val = train_test_split(X_Final, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
imp.fit(X_train)
X_train = imp.transform(X_train)
imp.fit(X_val)
X_val = imp.transform(X_val)
reg = RidgeCV()
reg.fit(X_train, y_train)
y_val_pred = reg.predict(X_val)
print mean_squared_error(y_val, y_val_pred)
if get_model:
imp.fit(X_Final)
X_Final = imp.transform(X_Final)
reg_submit = RidgeCV()
reg_submit.fit(X_Final, y)
return reg_submit
return mean_squared_error(y_val, y_val_pred)
示例12: build
def build(path):
"""
Computes a linear regression using Ridge regularization.
"""
print "Building the linear model using Ridge regression"
start = time.time()
# Load the data, the target is the last column.
data = np.loadtxt(path, delimiter=',')
y = data[:,-1]
X = data[:,0:-1]
# Instantiate and fit the model.
model = RidgeCV()
model.fit(X, y)
print "Finished training the linear model in {:0.3f} seconds".format(time.time() - start)
return model
示例13: ridgeRegression
def ridgeRegression(X,Y):
"""
:param X: data consisting of features (excluding class variable)
:param Y: column vector consisting of class variable
:return: report best RMSE value for tuned alpha in ridge regression
"""
tuningAlpha = [0.1,0.01,0.001]
# can change to model on the entire dataset but by convention splitting the dataset is a better option
# X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size = 0.10, random_state = 5)
ridge = RidgeCV(normalize=True,scoring='mean_squared_error', alphas=tuningAlpha, cv=10)
ridge.fit(X, Y)
prediction = ridge.predict(X)
print "RIDGE REGRESSION"
print "Best Alpha value for Ridge Regression : " + str(ridge.alpha_)
print 'Best RMSE for corresponding Alpha =', np.sqrt(mean_squared_error(Y, prediction))
示例14: fit_mapping
def fit_mapping(self):
"""
Fits the mappings from one distributions to the other
"""
X1 = self.X1
n1, p1 = X1.shape
X2 = self.X2
n2, p2 = X2.shape
P = self.P
c = self.c
r = self.r
reg_mapping = self.reg_mapping
# mapping from X1 to X2
self.model1to2 = RidgeCV(alphas=np.logspace(-3, 3, 7))
self.model1to2.fit(X1, (P * c.reshape((-1, 1))) @ X2)
# mapping from X2 to X1
self.model2to1 = RidgeCV(alphas=np.logspace(-3, 3, 7))
self.model2to1.fit(X2, (P.T * r.reshape((-1, 1))) @ X2)
示例15: map_vector_spaces
def map_vector_spaces(self):
"""
Perform linear regression upon the semantic embeddings.
- Semantic embeddings obtained from vector space of corresponding
bilingual words of the same language.
"""
self.logger.info('Learning transformation between Vector Spaces.')
self.lt = RidgeCV()
self.lt.fit(self.vector_1_list, self.vector_2_list)