本文整理匯總了Python中sklearn.linear_model.RidgeCV方法的典型用法代碼示例。如果您正苦於以下問題:Python linear_model.RidgeCV方法的具體用法?Python linear_model.RidgeCV怎麽用?Python linear_model.RidgeCV使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.linear_model
的用法示例。
在下文中一共展示了linear_model.RidgeCV方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def build(path):
"""
Computes a linear regression using Ridge regularization.
"""
print "Building the linear model using Ridge regression"
start = time.time()
# Load the data, the target is the last column.
data = np.loadtxt(path, delimiter=',')
y = data[:,-1]
X = data[:,0:-1]
# Instantiate and fit the model.
model = RidgeCV()
model.fit(X, y)
print "Finished training the linear model in {:0.3f} seconds".format(time.time() - start)
return model
示例2: make_pipeline
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def make_pipeline(encoding_method):
# static transformers from the other columns
transformers = [(enc + '_' + col, encoders_dict[enc], [col])
for col, enc in clean_columns.items()]
# adding the encoded column
transformers += [(encoding_method, encoders_dict[encoding_method],
[dirty_column])]
pipeline = Pipeline([
# Use ColumnTransformer to combine the features
('union', ColumnTransformer(
transformers=transformers,
remainder='drop')),
('scaler', StandardScaler(with_mean=False)),
('clf', RidgeCV())
])
return pipeline
#########################################################################
# Fitting each encoding methods with a RidgeCV
# --------------------------------------------
# Eventually, we loop over the different encoding methods,
# instantiate each time a new pipeline, fit it
# and store the returned cross-validation score:
示例3: load_default
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def load_default(self, machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm']):
"""
Loads 4 different scikit-learn regressors by default.
Parameters
----------
machine_list: optional, list of strings
List of default machine names to be loaded.
"""
for machine in machine_list:
try:
if machine == 'lasso':
self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'tree':
self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'ridge':
self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_)
if machine == 'random_forest':
self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'svm':
self.estimators_['svm'] = SVR().fit(self.X_k_, self.y_k_)
except ValueError:
continue
示例4: plot_reconstruction_for_different_freqs
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def plot_reconstruction_for_different_freqs(event_id, electrode, two_electrodes, from_t, to_t, time_split,
gk_sigma=3, bipolar=True, electrodes_positive=False, electrodes_normalize=False, njobs=4):
cond = utils.first_key(event_id)
electrodes = get_all_electrodes_names(bipolar)
elec_data = load_electrodes_data(event_id, bipolar, electrodes, from_t, to_t,
subtract_min=electrodes_positive, normalize_data=electrodes_normalize)
meg_data_dic = load_all_dics(freqs_bin, event_id, bipolar, electrodes, from_t, to_t, gk_sigma, njobs=njobs)
reconstruct_meg(event_id, [electrode], from_t, to_t, time_split, plot_results=True, all_meg_data=meg_data_dic,
elec_data=elec_data, title='{}: {}'.format(cond, electrode))
reconstruct_meg(event_id, two_electrodes, from_t, to_t, time_split, optimization_method='RidgeCV',
plot_results=True, all_meg_data=meg_data_dic,elec_data=elec_data,
title='{}: {} and {}'.format(cond, two_electrodes[0], two_electrodes[1]))
freqs_inds = np.array([2, 6, 9, 10, 11, 15, 16])
plt.plot(elec_data[electrode][cond])
plt.plot(meg_data_dic[electrode][freqs_inds, :].T, '--')
plt.legend([electrode] + np.array(CSD_FREQS)[freqs_inds].tolist())
# plt.title('{}: {}'.format(cond, electrode))
plt.show()
示例5: get_new_clf
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def get_new_clf(solver, folds=3, alphas=100):
kf=KFold(n_splits=folds,shuffle=False)
if "linear" == solver:
clf = linear_model.LinearRegression(fit_intercept=False)
if "ridge" == solver:
alphas = np.arange(1/alphas, 10+ 1/alphas, 10/alphas)
clf = linear_model.RidgeCV(alphas=alphas, fit_intercept=False, cv=kf)
elif "lasso" == solver:
clf=linear_model.LassoCV(n_alphas=alphas, fit_intercept=False, cv=kf)
elif "elastic" == solver:
clf = linear_model.ElasticNetCV(n_alphas=alphas, fit_intercept=False, cv=kf)
return clf
示例6: _ridge
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def _ridge(self):
"""Function to do ridge regression."""
# Fit a linear ridge regression model.
regr = RidgeCV(fit_intercept=True, normalize=True)
model = regr.fit(X=self.train_matrix, y=self.train_target)
coeff = regr.coef_
# Make the linear prediction.
pred = None
if self.predict:
data = model.predict(self.test_matrix)
pred = get_error(prediction=data,
target=self.test_target)['average']
return coeff, pred
示例7: lambda_to_alpha
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def lambda_to_alpha(lambda_value, samples):
return (lambda_value * samples) / 2.0
# Convert RidgeCV alpha back into a lambda value
示例8: calculate_rapm
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def calculate_rapm(train_x, train_y, possessions, lambdas, name, players):
# convert our lambdas to alphas
alphas = [lambda_to_alpha(l, train_x.shape[0]) for l in lambdas]
# create a 5 fold CV ridgeCV model. Our target data is not centered at 0, so we want to fit to an intercept.
clf = RidgeCV(alphas=alphas, cv=5, fit_intercept=True, normalize=False)
# fit our training data
model = clf.fit(train_x, train_y, sample_weight=possessions)
# convert our list of players into a mx1 matrix
player_arr = np.transpose(np.array(players).reshape(1, len(players)))
# extract our coefficients into the offensive and defensive parts
coef_offensive_array = np.transpose(model.coef_[:, 0:len(players)])
coef_defensive_array = np.transpose(model.coef_[:, len(players):])
# concatenate the offensive and defensive values with the playey ids into a mx3 matrix
player_id_with_coef = np.concatenate([player_arr, coef_offensive_array, coef_defensive_array], axis=1)
# build a dataframe from our matrix
players_coef = pd.DataFrame(player_id_with_coef)
intercept = model.intercept_
# apply new column names
players_coef.columns = ['playerId', '{0}__Off'.format(name), '{0}__Def'.format(name)]
# Add the offesnive and defensive components together (we should really be weighing this to the number of offensive and defensive possession played as they are often not equal).
players_coef[name] = players_coef['{0}__Off'.format(name)] + players_coef['{0}__Def'.format(name)]
# rank the values
players_coef['{0}_Rank'.format(name)] = players_coef[name].rank(ascending=False)
players_coef['{0}__Off_Rank'.format(name)] = players_coef['{0}__Off'.format(name)].rank(ascending=False)
players_coef['{0}__Def_Rank'.format(name)] = players_coef['{0}__Def'.format(name)].rank(ascending=False)
return players_coef, intercept
示例9: calculate_rapm
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def calculate_rapm(train_x, train_y, possessions, lambdas, name, players):
# convert our lambdas to alphas
alphas = [lambda_to_alpha(l, train_x.shape[0]) for l in lambdas]
# create a 5 fold CV ridgeCV model. Our target data is not centered at 0, so we want to fit to an intercept.
clf = RidgeCV(alphas=alphas, cv=5, fit_intercept=True, normalize=False)
# fit our training data
model = clf.fit(train_x, train_y, sample_weight=possessions)
# convert our list of players into a mx1 matrix
player_arr = np.transpose(np.array(players).reshape(1, len(players)))
# extract our coefficients into the offensive and defensive parts
coef_offensive_array = np.transpose(model.coef_[:, 0:len(players)])
coef_defensive_array = np.transpose(model.coef_[:, len(players):])
# concatenate the offensive and defensive values with the playey ids into a mx3 matrix
player_id_with_coef = np.concatenate([player_arr, coef_offensive_array, coef_defensive_array], axis=1)
# build a dataframe from our matrix
players_coef = pd.DataFrame(player_id_with_coef)
intercept = model.intercept_
# apply new column names
players_coef.columns = ['playerId', '{0}__Off'.format(name), '{0}__Def'.format(name)]
# Add the offesnive and defensive components together (we should really be weighing this to the number of offensive and defensive possession played as they are often not equal).
players_coef[name] = players_coef['{0}__Off'.format(name)] + players_coef['{0}__Def'.format(name)]
# rank the values
players_coef['{0}_Rank'.format(name)] = players_coef[name].rank(ascending=False)
players_coef['{0}__Off_Rank'.format(name)] = players_coef['{0}__Off'.format(name)].rank(ascending=False)
players_coef['{0}__Def_Rank'.format(name)] = players_coef['{0}__Def'.format(name)].rank(ascending=False)
# add the intercept for reference
players_coef['{0}__intercept'.format(name)] = intercept[0]
return players_coef, intercept
# a list of lambdas for cross validation
示例10: load_default
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def load_default(self, machine_list='basic'):
"""
Loads 4 different scikit-learn regressors by default. The advanced list adds more machines.
Parameters
----------
machine_list: optional, list of strings
List of default machine names to be loaded.
Returns
-------
self : returns an instance of self.
"""
if machine_list == 'basic':
machine_list = ['tree', 'ridge', 'random_forest', 'svm']
if machine_list == 'advanced':
machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm', 'bayesian_ridge', 'sgd']
self.estimators_ = {}
for machine in machine_list:
try:
if machine == 'lasso':
self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'tree':
self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'ridge':
self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_)
if machine == 'random_forest':
self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'svm':
self.estimators_['svm'] = LinearSVR(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'sgd':
self.estimators_['sgd'] = linear_model.SGDRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'bayesian_ridge':
self.estimators_['bayesian_ridge'] = linear_model.BayesianRidge().fit(self.X_k_, self.y_k_)
except ValueError:
continue
return self
示例11: load_default
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def load_default(self, machine_list='basic'):
"""
Loads 4 different scikit-learn regressors by default. The advanced list adds more machines.
Parameters
----------
machine_list: optional, list of strings
List of default machine names to be loaded.
Default is basic,
Returns
-------
self : returns an instance of self.
"""
if machine_list == 'basic':
machine_list = ['tree', 'ridge', 'random_forest', 'svm']
if machine_list == 'advanced':
machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm', 'bayesian_ridge', 'sgd']
self.estimators_ = {}
for machine in machine_list:
try:
if machine == 'lasso':
self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'tree':
self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'ridge':
self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_)
if machine == 'random_forest':
self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'svm':
self.estimators_['svm'] = SVR().fit(self.X_k_, self.y_k_)
if machine == 'sgd':
self.estimators_['sgd'] = linear_model.SGDRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'bayesian_ridge':
self.estimators_['bayesian_ridge'] = linear_model.BayesianRidge().fit(self.X_k_, self.y_k_)
except ValueError:
continue
return self
示例12: test_objectmapper
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.linear_model.ARDRegression, lm.ARDRegression)
self.assertIs(df.linear_model.BayesianRidge, lm.BayesianRidge)
self.assertIs(df.linear_model.ElasticNet, lm.ElasticNet)
self.assertIs(df.linear_model.ElasticNetCV, lm.ElasticNetCV)
self.assertIs(df.linear_model.HuberRegressor, lm.HuberRegressor)
self.assertIs(df.linear_model.Lars, lm.Lars)
self.assertIs(df.linear_model.LarsCV, lm.LarsCV)
self.assertIs(df.linear_model.Lasso, lm.Lasso)
self.assertIs(df.linear_model.LassoCV, lm.LassoCV)
self.assertIs(df.linear_model.LassoLars, lm.LassoLars)
self.assertIs(df.linear_model.LassoLarsCV, lm.LassoLarsCV)
self.assertIs(df.linear_model.LassoLarsIC, lm.LassoLarsIC)
self.assertIs(df.linear_model.LinearRegression, lm.LinearRegression)
self.assertIs(df.linear_model.LogisticRegression, lm.LogisticRegression)
self.assertIs(df.linear_model.LogisticRegressionCV, lm.LogisticRegressionCV)
self.assertIs(df.linear_model.MultiTaskLasso, lm.MultiTaskLasso)
self.assertIs(df.linear_model.MultiTaskElasticNet, lm.MultiTaskElasticNet)
self.assertIs(df.linear_model.MultiTaskLassoCV, lm.MultiTaskLassoCV)
self.assertIs(df.linear_model.MultiTaskElasticNetCV, lm.MultiTaskElasticNetCV)
self.assertIs(df.linear_model.OrthogonalMatchingPursuit, lm.OrthogonalMatchingPursuit)
self.assertIs(df.linear_model.OrthogonalMatchingPursuitCV, lm.OrthogonalMatchingPursuitCV)
self.assertIs(df.linear_model.PassiveAggressiveClassifier, lm.PassiveAggressiveClassifier)
self.assertIs(df.linear_model.PassiveAggressiveRegressor, lm.PassiveAggressiveRegressor)
self.assertIs(df.linear_model.Perceptron, lm.Perceptron)
self.assertIs(df.linear_model.RandomizedLasso, lm.RandomizedLasso)
self.assertIs(df.linear_model.RandomizedLogisticRegression, lm.RandomizedLogisticRegression)
self.assertIs(df.linear_model.RANSACRegressor, lm.RANSACRegressor)
self.assertIs(df.linear_model.Ridge, lm.Ridge)
self.assertIs(df.linear_model.RidgeClassifier, lm.RidgeClassifier)
self.assertIs(df.linear_model.RidgeClassifierCV, lm.RidgeClassifierCV)
self.assertIs(df.linear_model.RidgeCV, lm.RidgeCV)
self.assertIs(df.linear_model.SGDClassifier, lm.SGDClassifier)
self.assertIs(df.linear_model.SGDRegressor, lm.SGDRegressor)
self.assertIs(df.linear_model.TheilSenRegressor, lm.TheilSenRegressor)
示例13: calc_optimization_features
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def calc_optimization_features(optimization_method, freqs_bins, cond, meg_data_dic, elec_data, electrodes, from_t, to_t, optimization_params={}):
# scorer = make_scorer(rol_corr, False)
cv_parameters = []
if optimization_method in ['Ridge', 'RidgeCV', 'Lasso', 'LassoCV', 'ElasticNet', 'ElasticNetCV']:
# vstack all meg data, such that X.shape = T*n X F, where n is the electrodes num
# Y is T*n * 1
X = np.hstack((meg_data_dic[electrode][:, from_t:to_t] for electrode in electrodes))
Y = np.hstack((elec_data[electrode][cond][from_t:to_t] for electrode in electrodes))
funcs_dic = {'Ridge': Ridge(alpha=0.1), 'RidgeCV':RidgeCV(np.logspace(0, -10, 11)), # scoring=scorer
'Lasso': Lasso(alpha=1.0/X.shape[0]), 'LassoCV':LassoCV(alphas=np.logspace(0, -10, 11), max_iter=1000),
'ElasticNetCV': ElasticNetCV(alphas= np.logspace(0, -10, 11), l1_ratio=np.linspace(0, 1, 11))}
clf = funcs_dic[optimization_method]
clf.fit(X.T, Y)
p = clf.coef_
if len(p) != len(freqs_bins):
raise Exception('{} (len(clf.coef)) != {} (len(freqs_bin))!!!'.format(len(p), len(freqs_bins)))
if optimization_method in ['RidgeCV', 'LassoCV']:
cv_parameters = clf.alpha_
elif optimization_method == 'ElasticNetCV':
cv_parameters = [clf.alpha_, clf.l1_ratio_]
args = [(meg_pred(p, meg_data_dic[electrode][:, from_t:to_t]), elec_data[electrode][cond][from_t:to_t]) for electrode in electrodes]
p0 = leastsq(post_ridge_err_func, [1], args=args, maxfev=0)[0]
p = np.hstack((p0, p))
elif optimization_method in ['leastsq', 'dtw', 'minmax', 'diff_rms', 'rol_corr']:
args = ([(meg_data_dic[electrode][:, from_t:to_t], elec_data[electrode][cond][from_t:to_t]) for electrode in electrodes], optimization_params)
p0 = np.ones((1, len(freqs_bins)+1))
funcs_dic = {'leastsq': partial(leastsq, func=err_func, x0=p0, args=args),
'dtw': partial(minimize, fun=dtw_err_func, x0=p0, args=args),
'minmax': partial(minimize, fun=minmax_err_func, x0=p0, args=args),
'diff_rms': partial(minimize, fun=min_diff_rms_err_func, x0=p0, args=args),
'rol_corr': partial(minimize, fun=max_rol_corr, x0=p0, args=args)}
res = funcs_dic[optimization_method]()
p = res[0] if optimization_method=='leastsq' else res.x
cv_parameters = optimization_params
else:
raise Exception('Unknown optimization_method! {}'.format(optimization_method))
return p, cv_parameters
示例14: find_best_freqs_subset
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def find_best_freqs_subset(event_id, bipolar, freqs_bins, from_t, to_t, time_split, combs,
optimization_method='RidgeCV', optimization_params={}, k=3, gk_sigma=3, njobs=6):
freqs_bins = sorted(freqs_bins)
all_electrodes = get_all_electrodes_names(bipolar)
elec_data = load_electrodes_data(event_id, bipolar, all_electrodes, from_t, to_t,
subtract_min=False, normalize_data=False)
meg_data_dic = load_all_dics(freqs_bins, event_id, bipolar, all_electrodes, from_t, to_t, gk_sigma,
dont_calc_new_csd=True, njobs=njobs)
uuid = utils.rand_letters(5)
results_fol = get_results_fol(optimization_method)
partial_results_fol = os.path.join(results_fol, 'best_freqs_subset_{}'.format(uuid))
utils.make_dir(results_fol)
utils.make_dir(partial_results_fol)
cond = utils.first_key(event_id)
all_freqs_bins_subsets = list(utils.superset(freqs_bins))
random.shuffle(all_freqs_bins_subsets)
N = len(all_freqs_bins_subsets)
print('There are {} freqs subsets'.format(N))
all_freqs_bins_subsets_chunks = utils.chunks(all_freqs_bins_subsets, int(len(all_freqs_bins_subsets) / njobs))
params = [Bunch(event_id=event_id, bipolar=bipolar, freqs_bins_chunks=freqs_bins_subsets_chunk, cond=cond,
from_t=from_t, to_t=to_t, freqs_bins=freqs_bins, partial_results_fol=partial_results_fol,
time_split=time_split, only_sig_electrodes=False, only_from_same_lead=True, electrodes_positive=False,
electrodes_normalize=False, gk_sigma=gk_sigma, k=k, do_plot_results=False, do_save_partial_results=False,
optimization_params=optimization_params, check_only_pred_score=True, njobs=1, N=int(N / njobs),
elec_data=elec_data, meg_data_dic=meg_data_dic, all_electrodes=all_electrodes,
optimization_method=optimization_method, error_calc_method='rol_corr', error_threshold=30, combs=combs) for
freqs_bins_subsets_chunk in all_freqs_bins_subsets_chunks]
results = utils.run_parallel(_find_best_freqs_subset_parallel, params, njobs)
all_results = []
for chunk_results in results:
all_results.extend(chunk_results)
params_suffix = utils.params_suffix(optimization_params)
output_file = os.path.join(results_fol, 'best_freqs_subset_{}_{}_{}{}.pkl'.format(cond, uuid, k, params_suffix))
print('saving results to {}'.format(output_file))
utils.save((chunk_results, freqs_bins), output_file)
示例15: regularization_m
# 需要導入模塊: from sklearn import linear_model [as 別名]
# 或者: from sklearn.linear_model import RidgeCV [as 別名]
def regularization_m(X_re,y_re,predFeat=False):
n_alphas=200
alphas=np.logspace(1, 8, n_alphas)
coefs=[]
n=0
for a in alphas:
n+=1
ridge=Ridge(alpha=a, fit_intercept=False)
ridge.fit(X_re,y_re)
coefs.append(ridge.coef_)
# print(n,coefs)
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
ridge=Ridge(alpha=28.6) #Ridge預先確定a值
ridge.fit(X_re,y_re)
print(ridge.coef_,ridge.intercept_,ridge.alpha)
redgecv=RidgeCV(alphas=alphas) #輸入多個a值,模型自行擇優選取
redgecv.fit(X_re,y_re)
print(redgecv.coef_,redgecv.intercept_,redgecv.alpha_)
lasso=Lasso(alpha=0.01)
lasso.fit(X_re,y_re)
print(lasso.coef_,lasso.intercept_ ,lasso.alpha)
elasticnet=ElasticNet(alpha=1.0,l1_ratio=0.5)
elasticnet.fit(X_re,y_re)
print(elasticnet.coef_,elasticnet.intercept_ ,elasticnet.alpha)
if type(predFeat).__module__=='numpy':
return redgecv.predict(predFeat)