本文整理汇总了Python中sklearn.linear_model.BayesianRidge方法的典型用法代码示例。如果您正苦于以下问题:Python linear_model.BayesianRidge方法的具体用法?Python linear_model.BayesianRidge怎么用?Python linear_model.BayesianRidge使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.linear_model
的用法示例。
在下文中一共展示了linear_model.BayesianRidge方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_iterative_imputer_estimators
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def test_iterative_imputer_estimators(estimator):
rng = np.random.RandomState(0)
n = 100
d = 10
X = sparse_random_matrix(n, d, density=0.10, random_state=rng).toarray()
imputer = IterativeImputer(missing_values=0,
max_iter=1,
estimator=estimator,
random_state=rng)
imputer.fit_transform(X)
# check that types are correct for estimators
hashes = []
for triplet in imputer.imputation_sequence_:
expected_type = (type(estimator) if estimator is not None
else type(BayesianRidge()))
assert isinstance(triplet.estimator, expected_type)
hashes.append(id(triplet.estimator))
# check that each estimator is unique
assert len(set(hashes)) == len(hashes)
示例2: getModels
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def getModels():
result = []
result.append("LinearRegression")
result.append("BayesianRidge")
result.append("ARDRegression")
result.append("ElasticNet")
result.append("HuberRegressor")
result.append("Lasso")
result.append("LassoLars")
result.append("Rigid")
result.append("SGDRegressor")
result.append("SVR")
result.append("MLPClassifier")
result.append("KNeighborsClassifier")
result.append("SVC")
result.append("GaussianProcessClassifier")
result.append("DecisionTreeClassifier")
result.append("RandomForestClassifier")
result.append("AdaBoostClassifier")
result.append("GaussianNB")
result.append("LogisticRegression")
result.append("QuadraticDiscriminantAnalysis")
return result
示例3: build_model
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def build_model(self):
# Direct passing model parameters can be used
return linear_model.BayesianRidge(normalize=True, verbose=True, compute_score=True)
# ----- END first stage stacking model -----
# ----- Second stage stacking model -----
示例4: __init__
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def __init__(self):
self.reg = linear_model.BayesianRidge()
示例5: load_default
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def load_default(self, machine_list='basic'):
"""
Loads 4 different scikit-learn regressors by default. The advanced list adds more machines.
Parameters
----------
machine_list: optional, list of strings
List of default machine names to be loaded.
Returns
-------
self : returns an instance of self.
"""
if machine_list == 'basic':
machine_list = ['tree', 'ridge', 'random_forest', 'svm']
if machine_list == 'advanced':
machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm', 'bayesian_ridge', 'sgd']
self.estimators_ = {}
for machine in machine_list:
try:
if machine == 'lasso':
self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'tree':
self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'ridge':
self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_)
if machine == 'random_forest':
self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'svm':
self.estimators_['svm'] = LinearSVR(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'sgd':
self.estimators_['sgd'] = linear_model.SGDRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'bayesian_ridge':
self.estimators_['bayesian_ridge'] = linear_model.BayesianRidge().fit(self.X_k_, self.y_k_)
except ValueError:
continue
return self
示例6: load_default
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def load_default(self, machine_list='basic'):
"""
Loads 4 different scikit-learn regressors by default. The advanced list adds more machines.
Parameters
----------
machine_list: optional, list of strings
List of default machine names to be loaded.
Default is basic,
Returns
-------
self : returns an instance of self.
"""
if machine_list == 'basic':
machine_list = ['tree', 'ridge', 'random_forest', 'svm']
if machine_list == 'advanced':
machine_list=['lasso', 'tree', 'ridge', 'random_forest', 'svm', 'bayesian_ridge', 'sgd']
self.estimators_ = {}
for machine in machine_list:
try:
if machine == 'lasso':
self.estimators_['lasso'] = linear_model.LassoCV(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'tree':
self.estimators_['tree'] = DecisionTreeRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'ridge':
self.estimators_['ridge'] = linear_model.RidgeCV().fit(self.X_k_, self.y_k_)
if machine == 'random_forest':
self.estimators_['random_forest'] = RandomForestRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'svm':
self.estimators_['svm'] = SVR().fit(self.X_k_, self.y_k_)
if machine == 'sgd':
self.estimators_['sgd'] = linear_model.SGDRegressor(random_state=self.random_state).fit(self.X_k_, self.y_k_)
if machine == 'bayesian_ridge':
self.estimators_['bayesian_ridge'] = linear_model.BayesianRidge().fit(self.X_k_, self.y_k_)
except ValueError:
continue
return self
示例7: test_model_bayesian_ridge
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def test_model_bayesian_ridge(self):
model, X = fit_regression_model(linear_model.BayesianRidge())
model_onnx = convert_sklearn(
model, "bayesian ridge",
[("input", FloatTensorType([None, X.shape[1]]))])
self.assertIsNotNone(model_onnx)
dump_data_and_model(
X,
model,
model_onnx,
basename="SklearnBayesianRidge-Dec4",
allow_failure="StrictVersion("
"onnxruntime.__version__)"
"<= StrictVersion('0.2.1')",
)
示例8: test_objectmapper
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.linear_model.ARDRegression, lm.ARDRegression)
self.assertIs(df.linear_model.BayesianRidge, lm.BayesianRidge)
self.assertIs(df.linear_model.ElasticNet, lm.ElasticNet)
self.assertIs(df.linear_model.ElasticNetCV, lm.ElasticNetCV)
self.assertIs(df.linear_model.HuberRegressor, lm.HuberRegressor)
self.assertIs(df.linear_model.Lars, lm.Lars)
self.assertIs(df.linear_model.LarsCV, lm.LarsCV)
self.assertIs(df.linear_model.Lasso, lm.Lasso)
self.assertIs(df.linear_model.LassoCV, lm.LassoCV)
self.assertIs(df.linear_model.LassoLars, lm.LassoLars)
self.assertIs(df.linear_model.LassoLarsCV, lm.LassoLarsCV)
self.assertIs(df.linear_model.LassoLarsIC, lm.LassoLarsIC)
self.assertIs(df.linear_model.LinearRegression, lm.LinearRegression)
self.assertIs(df.linear_model.LogisticRegression, lm.LogisticRegression)
self.assertIs(df.linear_model.LogisticRegressionCV, lm.LogisticRegressionCV)
self.assertIs(df.linear_model.MultiTaskLasso, lm.MultiTaskLasso)
self.assertIs(df.linear_model.MultiTaskElasticNet, lm.MultiTaskElasticNet)
self.assertIs(df.linear_model.MultiTaskLassoCV, lm.MultiTaskLassoCV)
self.assertIs(df.linear_model.MultiTaskElasticNetCV, lm.MultiTaskElasticNetCV)
self.assertIs(df.linear_model.OrthogonalMatchingPursuit, lm.OrthogonalMatchingPursuit)
self.assertIs(df.linear_model.OrthogonalMatchingPursuitCV, lm.OrthogonalMatchingPursuitCV)
self.assertIs(df.linear_model.PassiveAggressiveClassifier, lm.PassiveAggressiveClassifier)
self.assertIs(df.linear_model.PassiveAggressiveRegressor, lm.PassiveAggressiveRegressor)
self.assertIs(df.linear_model.Perceptron, lm.Perceptron)
self.assertIs(df.linear_model.RandomizedLasso, lm.RandomizedLasso)
self.assertIs(df.linear_model.RandomizedLogisticRegression, lm.RandomizedLogisticRegression)
self.assertIs(df.linear_model.RANSACRegressor, lm.RANSACRegressor)
self.assertIs(df.linear_model.Ridge, lm.Ridge)
self.assertIs(df.linear_model.RidgeClassifier, lm.RidgeClassifier)
self.assertIs(df.linear_model.RidgeClassifierCV, lm.RidgeClassifierCV)
self.assertIs(df.linear_model.RidgeCV, lm.RidgeCV)
self.assertIs(df.linear_model.SGDClassifier, lm.SGDClassifier)
self.assertIs(df.linear_model.SGDRegressor, lm.SGDRegressor)
self.assertIs(df.linear_model.TheilSenRegressor, lm.TheilSenRegressor)
示例9: __init__
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def __init__(self, descriptor: Union[BaseFeaturizer, BaseDescriptor], *, targets={}, **estimators: BaseEstimator):
"""
Gaussian loglikelihood.
Parameters
----------
descriptor: BaseFeaturizer or BaseDescriptor
Descriptor calculator.
estimators: BaseEstimator
Gaussian estimators follow the scikit-learn style.
These estimators must provide a method named ``predict`` which
accesses descriptors as input and returns ``(mean, std)`` in order.
By default, BayesianRidge_ will be used.
.. _BayesianRidge: https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.BayesianRidge.html#sklearn-linear-model-bayesianridge
targets: dictionary
Upper and lower bounds for each property to calculate the Gaussian CDF probability
"""
if estimators:
self._mdl = deepcopy(estimators)
else:
self._mdl = {}
if not isinstance(descriptor, (BaseFeaturizer, BaseDescriptor)):
raise TypeError('<descriptor> must be a subclass of <BaseFeaturizer> or <BaseDescriptor>')
self._descriptor = descriptor
self._descriptor.on_errors = 'nan'
self._targets = deepcopy(targets)
示例10: predict
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def predict(self, smiles, **kwargs):
fps = self._descriptor.transform(smiles, return_type='df')
fps_ = fps.dropna()
tmp = {}
for k, v in self._mdl.items():
if isinstance(v, BayesianRidge):
tmp[k + ': mean'], tmp[k + ': std'] = v.predict(fps_, return_std=True)
else:
tmp[k + ': mean'], tmp[k + ': std'] = v.predict(fps_, **kwargs)
tmp = pd.DataFrame(data=tmp, index=fps_.index)
return pd.DataFrame(data=tmp, index=fps.index)
# todo: implement scale function
示例11: test_gaussian_ll_1
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def test_gaussian_ll_1(data):
bre = deepcopy(data['bre'])
bre2 = data['bre2']
X, y = data['pg']
assert 'bandgap' in bre._mdl
assert 'glass_transition_temperature' in bre._mdl
assert 'refractive_index' in bre2._mdl
assert 'density' in bre2._mdl
ll = bre.log_likelihood(X.sample(10),
bandgap=(7, 8),
glass_transition_temperature=(300, 400))
assert ll.shape == (10,2)
assert isinstance(bre['bandgap'], BayesianRidge)
assert isinstance(bre['glass_transition_temperature'], BayesianRidge)
with pytest.raises(KeyError):
bre['other']
with pytest.raises(TypeError):
bre['other'] = 1
bre['other'] = BayesianRidge()
bre.remove_estimator()
assert bre._mdl == {}
示例12: _get_learner
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def _get_learner(self):
# xgboost
if self.learner_name in ["reg_xgb_linear", "reg_xgb_tree", "reg_xgb_tree_best_single_model"]:
return XGBRegressor(**self.param_dict)
if self.learner_name in ["clf_xgb_linear", "clf_xgb_tree"]:
return XGBClassifier(**self.param_dict)
# sklearn
if self.learner_name == "reg_skl_lasso":
return Lasso(**self.param_dict)
if self.learner_name == "reg_skl_ridge":
return Ridge(**self.param_dict)
if self.learner_name == "reg_skl_random_ridge":
return RandomRidge(**self.param_dict)
if self.learner_name == "reg_skl_bayesian_ridge":
return BayesianRidge(**self.param_dict)
if self.learner_name == "reg_skl_svr":
return SVR(**self.param_dict)
if self.learner_name == "reg_skl_lsvr":
return LinearSVR(**self.param_dict)
if self.learner_name == "reg_skl_knn":
return KNNRegressor(**self.param_dict)
if self.learner_name == "reg_skl_etr":
return ExtraTreesRegressor(**self.param_dict)
if self.learner_name == "reg_skl_rf":
return RandomForestRegressor(**self.param_dict)
if self.learner_name == "reg_skl_gbm":
return GradientBoostingRegressor(**self.param_dict)
if self.learner_name == "reg_skl_adaboost":
return AdaBoostRegressor(**self.param_dict)
# keras
if self.learner_name == "reg_keras_dnn":
try:
return KerasDNNRegressor(**self.param_dict)
except:
return None
# rgf
if self.learner_name == "reg_rgf":
return RGFRegressor(**self.param_dict)
# ensemble
if self.learner_name == "reg_ensemble":
return EnsembleLearner(**self.param_dict)
return None
示例13: getSKLearnModel
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def getSKLearnModel(modelName):
if modelName == 'LinearRegression':
model = linear_model.LinearRegression()
elif modelName == 'BayesianRidge':
model = linear_model.BayesianRidge()
elif modelName == 'ARDRegression':
model = linear_model.ARDRegression()
elif modelName == 'ElasticNet':
model = linear_model.ElasticNet()
elif modelName == 'HuberRegressor':
model = linear_model.HuberRegressor()
elif modelName == 'Lasso':
model = linear_model.Lasso()
elif modelName == 'LassoLars':
model = linear_model.LassoLars()
elif modelName == 'Rigid':
model = linear_model.Ridge()
elif modelName == 'SGDRegressor':
model = linear_model.SGDRegressor()
elif modelName == 'SVR':
model = SVR()
elif modelName=='MLPClassifier':
model = MLPClassifier()
elif modelName=='KNeighborsClassifier':
model = KNeighborsClassifier()
elif modelName=='SVC':
model = SVC()
elif modelName=='GaussianProcessClassifier':
model = GaussianProcessClassifier()
elif modelName=='DecisionTreeClassifier':
model = DecisionTreeClassifier()
elif modelName=='RandomForestClassifier':
model = RandomForestClassifier()
elif modelName=='AdaBoostClassifier':
model = AdaBoostClassifier()
elif modelName=='GaussianNB':
model = GaussianNB()
elif modelName=='LogisticRegression':
model = linear_model.LogisticRegression()
elif modelName=='QuadraticDiscriminantAnalysis':
model = QuadraticDiscriminantAnalysis()
return model
示例14: lets_try
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def lets_try(train, labels):
results = {}
def test_model(clf):
cv = KFold(n_splits=5, shuffle=True, random_state=45)
r2 = make_scorer(r2_score)
r2_val_score = cross_val_score(clf, train, labels, cv=cv, scoring=r2)
scores = [r2_val_score.mean()]
return scores
clf = linear_model.LinearRegression()
results["Linear"] = test_model(clf)
clf = linear_model.Ridge()
results["Ridge"] = test_model(clf)
clf = linear_model.BayesianRidge()
results["Bayesian Ridge"] = test_model(clf)
clf = linear_model.HuberRegressor()
results["Hubber"] = test_model(clf)
clf = linear_model.Lasso(alpha=1e-4)
results["Lasso"] = test_model(clf)
clf = BaggingRegressor()
results["Bagging"] = test_model(clf)
clf = RandomForestRegressor()
results["RandomForest"] = test_model(clf)
clf = AdaBoostRegressor()
results["AdaBoost"] = test_model(clf)
clf = svm.SVR()
results["SVM RBF"] = test_model(clf)
clf = svm.SVR(kernel="linear")
results["SVM Linear"] = test_model(clf)
results = pd.DataFrame.from_dict(results, orient='index')
results.columns = ["R Square Score"]
# results = results.sort(columns=["R Square Score"], ascending=False)
results.plot(kind="bar", title="Model Scores")
axes = plt.gca()
axes.set_ylim([0.5, 1])
return results
示例15: fit
# 需要导入模块: from sklearn import linear_model [as 别名]
# 或者: from sklearn.linear_model import BayesianRidge [as 别名]
def fit(self, smiles, y=None, *, X_scaler=None, y_scaler=None, **kwargs):
"""
Default - automatically remove NaN data rows
Parameters
----------
smiles: list[str]
SMILES for training.
y: pandas.DataFrame
Target properties for training.
X_scaler: Scaler (optional, not implement)
Scaler for transform X.
y_scaler: Scaler (optional, not implement)
Scaler for transform y.
kwargs: dict
Parameters pass to BayesianRidge initialization.
"""
if self._mdl:
raise RuntimeError('estimators have been set.'
'If you want to re-train these estimators,'
'please use `remove_estimator()` method first.')
if not isinstance(y, (pd.DataFrame, pd.Series)):
raise TypeError('please package all properties into a pd.DataFrame or pd.Series')
# remove NaN from X
desc = self._descriptor.transform(smiles, return_type='df').reset_index(drop=True)
y = y.reset_index(drop=True)
desc.dropna(inplace=True)
y = pd.DataFrame(y.loc[desc.index])
for c in y:
y_ = y[c] # get target property.
# remove NaN from y_
y_.dropna(inplace=True)
desc_ = desc.loc[y_.index]
desc_ = desc_.values
mdl = BayesianRidge(compute_score=True, **kwargs)
mdl.fit(desc_, y_)
self._mdl[c] = mdl
# log_likelihood returns a dataframe of log-likelihood values of each property & sample