本文整理汇总了Python中sklearn.linear_model.RidgeCV.predict方法的典型用法代码示例。如果您正苦于以下问题:Python RidgeCV.predict方法的具体用法?Python RidgeCV.predict怎么用?Python RidgeCV.predict使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model.RidgeCV
的用法示例。
在下文中一共展示了RidgeCV.predict方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: validate
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def validate(nPrev, nAfter, aux_temp, aux_sun, aux_prec, get_model=False):
X_Final = getFeature(nPrev, nAfter, aux_temp, aux_sun, aux_prec, TrainFiles)
data_train_target = pd.read_csv(TrainTarget, sep='\t', header=None)
y = data_train_target.loc[:,0].values
TEST_SIZE = 0.2
RANDOM_STATE = 0
X_train, X_val, y_train, y_val = train_test_split(X_Final, y, test_size=TEST_SIZE, random_state=RANDOM_STATE)
imp.fit(X_train)
X_train = imp.transform(X_train)
imp.fit(X_val)
X_val = imp.transform(X_val)
reg = RidgeCV()
reg.fit(X_train, y_train)
y_val_pred = reg.predict(X_val)
print mean_squared_error(y_val, y_val_pred)
if get_model:
imp.fit(X_Final)
X_Final = imp.transform(X_Final)
reg_submit = RidgeCV()
reg_submit.fit(X_Final, y)
return reg_submit
return mean_squared_error(y_val, y_val_pred)
示例2: ridge_predict
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def ridge_predict(train_data, train_target, test_data):
# Prep modeller
alpha_ranges = [1e-3, 1e-2, 1e-1, 1, 1e2, 1e3,
2e3, 2.5e3, 3e3, 3.5e3, 4e3,
5e3, 6e3, 6.1e3, 6.15e3, 6.25e3, 6.3e3, 6.4e3, 7e3,
7.75e3, 7.9e3, 8e3, 8.1e3, 8.2e3, 8.25e3, 8.3e3, 8.4e3, 8.5e3, 8.75e3, 9e3, 9.25e3, 9.4e3, 9.5e3, 9.6e3, 9.75e3,
1e4, 1.25e4, 1.4e4, 1.5e4, 1.55e4, 1.58e4, 1.6e4, 1.625e4, 1.65e4, 1.7e4, 1.725e4, 1.74e4, 1.75e4, 1.76e4, 1.78e4, 1.85e4,
2e4, 2.25e4, 2.5e4, 3e4, 4e4,
0.5e5, 0.75e5, 1e5, 1.25e5, 1.5e5,
0.8e6, 0.9e6, 1e6, 1.1e6, 1.2e6, 1.25e6, 1.28e6, 1.3e6, 1.32e6, 1.33e6, 1.34e6, 1.4e6, 1.5e6, 2e6,
1e7, 1e8, 1e9, 5e9, 1e10, 5e10, 1e11, 1e12, 1e13]
clf = RidgeCV(alphas=alpha_ranges,
normalize=True, cv=None, fit_intercept=False, store_cv_values=True)
# Fit
clf.fit(train_data, train_target)
# print("alpha range:", alpha_ranges)
# print("CV per alpha:",np.mean(clf.cv_values_, axis=0))
# print("alpha used:", clf.alpha_)
# print("fit score:", clf.score(train_data, train_target))
# Prediction
predictions = clf.predict(test_data)
return predictions
示例3: regularizedreg
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def regularizedreg(Xtrain,Xtest,ytrain,ytest):
Rclf = RidgeCV(alphas=[1,2,20,40,50]) # RidgeCV(alphas=[0.1, 1.0, 2.0, 4.0, 20.0], cv=None, fit_intercept=True, scoring=None, normalize=False)
Rclf.fit(Xtrain,ytrain);
print("Residual sum of squares: %.2f"
% np.mean((Rclf.predict(Xtest) - ytest) ** 2))
print('Regularization choosen, alpha = %.2f' % Rclf.alpha_);
print(' Coef values = ', Rclf.coef_);
print('Variance score: %.2f' % Rclf.score(Xtest, ytest))
示例4: create_firststage_preds
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def create_firststage_preds(train, valid, testing):
"""
This handles the first stage of a true stacking procedure using
random forests to create first stage predictions in the train, test,
and validation. Splits train into two sections, run random forest
on both and predicts from one half into other (and visa versa). Then
random forest is run on whole model and predicted into both validation
and test.
"""
np.random.seed(42)
# Get vector of de-dupped values of ids
id_dat = pd.DataFrame(train["tube_assembly_id"].drop_duplicates())
# Create random vector to split train val on
vect_len = len(id_dat.ix[:, 0])
id_dat["rand_vals"] = np.array(np.random.rand(vect_len, 1))
df = pd.merge(train, id_dat, on="tube_assembly_id")
# Create model for both halves of df
frst1 = RandomForestRegressor(n_estimators=300, n_jobs=7)
is_first_half = df.rand_vals > 0.5
is_scnd_half = df.rand_vals < 0.5
frst1.fit(df.ix[is_first_half, feats], df.ix[is_first_half, "target"])
frst2 = RandomForestRegressor(n_estimators=300, n_jobs=7)
frst2.fit(df.ix[is_scnd_half, feats], df.ix[is_scnd_half, "target"])
# Predict frst1 onto forst2 data set and visa versa
train["forest"] = 0
train["forest"][is_scnd_half] = frst1.predict(df.ix[is_scnd_half, feats])
train["forest"][is_first_half] = frst2.predict(df.ix[is_first_half, feats])
# Create forest in full data for validation and test
frst = RandomForestRegressor(n_estimators=300, n_jobs=7)
frst.fit(df[feats], df.target)
valid["forest"] = frst.predict(valid[feats])
testing["forest"] = frst.predict(testing[feats])
# Create model for both halves of df
rdg1 = RidgeCV(alphas=[0.5, 0.75, 1, 1.25])
rdg2 = RidgeCV(alphas=[0.5, 0.75, 1, 1.25])
rdg1.fit(df.ix[is_first_half, feats], df.ix[is_first_half, "target"])
rdg2.fit(df.ix[is_scnd_half, feats], df.ix[is_scnd_half, "target"])
# Predict frst1 onto forst2 data set and visa versa
train["ridge"] = 0
train["ridge"][is_scnd_half] = rdg1.predict(df.ix[is_scnd_half, feats])
train["ridge"][is_first_half] = rdg2.predict(df.ix[is_first_half, feats])
# Create forest in full data for validation and test
rdg = RidgeCV(alphas=[0.5, 0.75, 1, 1.25])
rdg.fit(df[feats], df.target)
valid["ridge"] = rdg.predict(valid[feats])
testing["ridge"] = rdg.predict(testing[feats])
示例5: fit_Ridge
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def fit_Ridge(features_train, labels_train, features_pred, alphas=(0.1, 1.0, 10.0)):
model = RidgeCV(normalize=True, store_cv_values=True, alphas=alphas)
model.fit(features_train, labels_train)
cv_errors = np.mean(model.cv_values_, axis=0)
print "RIDGE - CV error min: ", np.min(cv_errors)
# Test the model
labels_pred = model.predict(features_pred)
return labels_pred
示例6: ensemble
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def ensemble(Method,alphas,blend_train, blend_test, Y_dev, Y_test, n_folds):
if (Method==1):
bclf = RidgeCV(alphas=alphas, normalize=True, cv=n_folds)
bclf.fit(blend_train, Y_dev)
print ("Best alpha = ", bclf.alpha_)
Y_test_predict = bclf.predict(blend_test)
elif(Method==2):
bclf = ElasticNetCV(alphas=alphas, normalize=True, cv=n_folds)
bclf.fit(blend_train, Y_dev)
print ("Best alpha = ", bclf.alpha_)
Y_test_predict = bclf.predict(blend_test)
else:
bclf = LassoCV(alphas=alphas, normalize=True, cv=n_folds)
bclf.fit(blend_train, Y_dev)
print ("Best alpha = ", bclf.alpha_)
Y_test_predict = bclf.predict(blend_test)
score1 = metrics.mean_absolute_error(Y_test, Y_test_predict)
score = normalized_gini(Y_test, Y_test_predict)
return score1, score
示例7: orth_signal
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def orth_signal(x, atol=1e-13, rtol=0):
"""
Returns signal orthogonal to input ensemble.
x -> input singal [n_samples, n_neurons]
"""
t = np.linspace(0, 1, x.shape[0])[:, None]
f = arange(x.shape[1]) / x.shape[1]
xt = np.sum(sin(2 * np.pi * f * 3 * t) / (f + 1), axis=1)
w = RidgeCV(np.logspace(-6, 3, 50))
w.fit(x, xt)
xt = xt - w.predict(x)
# pdb.set_trace()
return xt
示例8: RidgeCVLinear
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def RidgeCVLinear(train,test):
print('starting RidgeCVLinear ...')
ridge=RidgeCV(normalize=True,cv=5)
train.reindex(np.random.permutation(train.index))
tr_X=train.drop('LogSales',axis=1)
tr_Y=train['LogSales']
cutoff=math.floor(0.7*tr_Y.size)
ridge.fit(tr_X[:cutoff],tr_Y[:cutoff])
predY=ridge.predict(tr_X[cutoff:])
mspe=rmspe(predY,tr_Y[cutoff:])
print('rmspe is %9f'% mspe)
print(train.columns)
print(ridge.coef_)
print('starting RidgeCVLinear ... completed')
return ridge
示例9: stacking
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def stacking(estimators):
# training
predictions = []
for estim in estimators:
estim.fit(X, y)
predictions.append(estim.predict(X))
agg = RidgeCV(alphas=alphas, cv=5, normalize=True, fit_intercept=True) # aggregator
agg.fit(np.array(predictions).T, y)
# test
predictions = []
for estim in estimators:
predictions.append(estim.predict(test_data))
predictions = agg.predict(np.array(predictions).T)
write_results(predictions)
示例10: ridgeRegression
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def ridgeRegression(X,Y):
"""
:param X: data consisting of features (excluding class variable)
:param Y: column vector consisting of class variable
:return: report best RMSE value for tuned alpha in ridge regression
"""
tuningAlpha = [0.1,0.01,0.001]
# can change to model on the entire dataset but by convention splitting the dataset is a better option
# X_train, X_test, Y_train, Y_test = cross_validation.train_test_split(X, Y, test_size = 0.10, random_state = 5)
ridge = RidgeCV(normalize=True,scoring='mean_squared_error', alphas=tuningAlpha, cv=10)
ridge.fit(X, Y)
prediction = ridge.predict(X)
print "RIDGE REGRESSION"
print "Best Alpha value for Ridge Regression : " + str(ridge.alpha_)
print 'Best RMSE for corresponding Alpha =', np.sqrt(mean_squared_error(Y, prediction))
示例11: fitLakeLevels
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
def fitLakeLevels( self, flowData, lakeData, **kwargs ):
# model lake levels from stream flows
xTrain = self.setDelay( flowData, kwargs[ 'nDays' ] )
flowScaler = preprocessing.StandardScaler().fit( xTrain )
xTrain = flowScaler.transform( xTrain )
self.flowScaler = flowScaler
# fit to daily changes in elevation
yTrain = lakeData - np.roll( lakeData, 1 )
yTrain[ 0 ] = 0.
if kwargs[ 'simpleModel' ]:
model = RidgeCV( alphas = np.logspace( -2., 2. ) )
else:
model = ExtraTreesRegressor( n_estimators = 50, n_jobs = 4,
random_state = 42 )
model.fit( xTrain, yTrain )
self.lakeModel = model
ypreds = model.predict( xTrain )
lakePreds = lakeData[ 0 ] + np.cumsum( ypreds )
plt.clf()
plt.plot( self.dates, yTrain + lakeData, label = 'Actual' )
plt.plot( self.dates, lakePreds, label = 'Predicted' )
plt.xlabel( 'Date' )
plt.ylabel( 'Lake Travis Elevation (ft)' )
plt.legend()
plt.savefig( 'lakelevels.png' )
示例12: MAE
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
#else:
#model = GPy.models.GPRegression(X_train, Y_train, kernel=k)
icmk = GPy.util.multioutput.ICM(input_dim=X.shape[1], num_outputs=6,
kernel=k, W_rank=args.rank)
model = GPy.models.GPCoregionalizedRegression(X_train_list,
Y_train_list,
kernel=icmk)
model.optimize(messages=True, max_iters=100)
print model
# Get predictions
info_dict = {}
preds_list = []
vars_list = []
if args.model == 'ridge' or args.model == 'svr':
preds = model.predict(X_test)
if args.label_preproc == 'scale':
preds = Y_scaler.inverse_transform(preds)
elif args.label_preproc == 'warp':
preds += 50
info_dict['mae'] = MAE(preds, Y_test.flatten())
info_dict['rmse'] = np.sqrt(MSE(preds, Y_test.flatten()))
info_dict['pearsonr'] = pearsonr(preds, Y_test.flatten())
else:
# TODO: check if this makes sense
#preds, vars = model.predict(X_test)
#X_test_pred, Y_test_pred, index = GPy.util.multioutput.build_XY(X_test_list, Y_test_list)
#noise_dict = {'output_index': X_test_pred[:,-1:].astype(int)}
#preds, vars = model.predict_noiseless(X_test, Y_metadata=noise_dict)
for emo_id, emo in enumerate(EMOS):
# TODO: preprocessing
示例13: getPca
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
trainingFeatures['network'] = predictions
predictions = net.activateOnDataset(testDs)
testingFeatures['network'] = predictions
#%%
trainingFeaturesPca, testingFeaturesPca = getPca(trainingFeatures, trainingTarget, testingFeatures, 3)
for col in trainingFeaturesPca.columns:
trainingFeatures[col] = trainingFeaturesPca[col]
testingFeatures[col] = testingFeaturesPca[col]
#%%
model = RidgeCV(alphas=[0.01, 1.0, 10.0])
model.fit(trainingFeatures, trainingTarget)
predictions = model.predict(trainingFeatures)
trainingFeatures['RidgeCV'] = predictions
predictions = model.predict(testingFeatures)
testingFeatures['RidgeCV'] = predictions
#%%
model = SGDRegressor()
model.fit(trainingFeatures, trainingTarget)
predictions = model.predict(trainingFeatures)
trainingFeatures['SGDRegressor'] = predictions
predictions = model.predict(testingFeatures)
testingFeatures['SGDRegressor'] = predictions
#%%
示例14: print
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
scoring='mean_absolute_error', cv=10)
ozone_ridgecv_reg = ozone_ridgecv_reg.fit(ozone_train.drop('ozone', axis=1),
ozone_train['ozone'])
## Compare regularization models
print("Linear Coef: " + str(ozone_ln_reg.coef_)
+ "\nRidge Coef: " + str(ozone_ridge_reg.coef_)
+ "\nLasso Coef: " + str(ozone_lasso_reg.coef_)
+ "\nCV Coef: " + str(ozone_ridgecv_reg.coef_)
+ "\nCV alpha: " + str(ozone_ridgecv_reg.alpha_))
# Predict using models and evaluate
ozone_ln_pred = ozone_ln_reg.predict(ozone_test.drop('ozone', axis=1))
ozone_ridge_pred = ozone_ridge_reg.predict(ozone_test.drop('ozone', axis=1))
ozone_lasso_pred = ozone_lasso_reg.predict(ozone_test.drop('ozone', axis=1))
ozone_ridgecv_pred = ozone_ridgecv_reg.predict(ozone_test.drop('ozone', axis=1))
## Calculate MAE, RMSE, and R-squared for all models
ozone_ln_mae = metrics.mean_absolute_error(ozone_test['ozone'], ozone_ln_pred)
ozone_ln_rmse = sqrt(metrics.mean_squared_error(ozone_test['ozone'], ozone_ln_pred))
ozone_ln_r2 = metrics.r2_score(ozone_test['ozone'], ozone_ln_pred)
ozone_ridge_mae = metrics.mean_absolute_error(ozone_test['ozone'], ozone_ridge_pred)
ozone_ridge_rmse = sqrt(metrics.mean_squared_error(ozone_test['ozone'], ozone_ridge_pred))
ozone_ridge_r2 = metrics.r2_score(ozone_test['ozone'], ozone_ridge_pred)
ozone_lasso_mae = metrics.mean_absolute_error(ozone_test['ozone'], ozone_lasso_pred)
ozone_lasso_rmse = sqrt(metrics.mean_squared_error(ozone_test['ozone'], ozone_lasso_pred))
ozone_lasso_r2 = metrics.r2_score(ozone_test['ozone'], ozone_lasso_pred)
ozone_ridgecv_mae = metrics.mean_absolute_error(ozone_test['ozone'], ozone_ridgecv_pred)
示例15: range
# 需要导入模块: from sklearn.linear_model import RidgeCV [as 别名]
# 或者: from sklearn.linear_model.RidgeCV import predict [as 别名]
ds = range(20150701, 20150732) + range(20150801, 20150831)
X3 = np.reshape(np.array(range(122, 184)), (-1, 1)).astype(np.int)
for song_id in song_ids:
# Model training
sql = "select ds, play_times from music_datas " + \
"where song_id=='%s' and ds<'20150701'" % song_id
cu2 = conn.cursor()
cu2.execute(sql)
ret = cu2.fetchall()
X1, Y1 = generate_np_data(ret)
# print X1, Y1
clf.fit(X1, Y1)
# Predict
Y3 = clf.predict(X3).tolist()
# break
predicts = []
for (x, y) in zip(ds, Y3):
if y < 0:
y = 0
predicts.append((song_id[0], x, round(y)))
cu2.executemany('insert into music_prediction values (?, ?, ?)', predicts)
# process
pro.ins().show()
print "alpha: %f" % clf.alpha_
conn.commit()
conn.close()