本文整理汇总了Python中sklearn.svm.LinearSVR方法的典型用法代码示例。如果您正苦于以下问题:Python svm.LinearSVR方法的具体用法?Python svm.LinearSVR怎么用?Python svm.LinearSVR使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.svm
的用法示例。
在下文中一共展示了svm.LinearSVR方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ensure_many_models
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def ensure_many_models(self):
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.linear_model import ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.svm import SVR, LinearSVR
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings('ignore', category=ConvergenceWarning)
for learner in [GradientBoostingRegressor, RandomForestRegressor, MLPRegressor,
ElasticNet, RANSACRegressor, HuberRegressor, PassiveAggressiveRegressor,
KNeighborsRegressor, SVR, LinearSVR]:
learner = learner()
learner_name = str(learner).split("(", maxsplit=1)[0]
with self.subTest("Test fit using {learner}".format(learner=learner_name)):
model = self.estimator.__class__(learner)
model.fit(self.data_lin["X"], self.data_lin["a"], self.data_lin["y"])
self.assertTrue(True) # Fit did not crash
示例2: test_15_linearsvr
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_15_linearsvr(self):
print("\ntest 15 (linear svr without preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = LinearSVR()
pipeline_obj = Pipeline([
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test15sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
示例3: test_16_linearsvr
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_16_linearsvr(self):
print("\ntest 16 (linear svr with preprocessing)\n")
X, X_test, y, features, target, test_file = self.data_utility.get_data_for_regression()
model = LinearSVR()
pipeline_obj = Pipeline([
("scaler", MinMaxScaler()),
("model", model)
])
pipeline_obj.fit(X,y)
file_name = 'test16sklearn.pmml'
skl_to_pmml(pipeline_obj, features, target, file_name)
model_name = self.adapa_utility.upload_to_zserver(file_name)
predictions, _ = self.adapa_utility.score_in_zserver(model_name, test_file)
model_pred = pipeline_obj.predict(X_test)
self.assertEqual(self.adapa_utility.compare_predictions(predictions, model_pred), True)
示例4: meta_model_fit
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def meta_model_fit(X_train, y_train, svm_hardness, fit_intercept, number_of_threads, regressor_type="LinearSVR"):
"""
Trains meta-labeler for predicting number of labels for each user.
Based on: Tang, L., Rajan, S., & Narayanan, V. K. (2009, April).
Large scale multi-label classification via metalabeler.
In Proceedings of the 18th international conference on World wide web (pp. 211-220). ACM.
"""
if regressor_type == "LinearSVR":
if X_train.shape[0] > X_train.shape[1]:
dual = False
else:
dual = True
model = LinearSVR(C=svm_hardness, random_state=0, dual=dual,
fit_intercept=fit_intercept)
y_train_meta = y_train.sum(axis=1)
model.fit(X_train, y_train_meta)
else:
print("Invalid regressor type.")
raise RuntimeError
return model
示例5: predict_features
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def predict_features(self, df_features, df_target, idx=0, C=.1, **kwargs):
"""For one variable, predict its neighbouring nodes.
Args:
df_features (pandas.DataFrame):
df_target (pandas.Series):
idx (int): (optional) for printing purposes
kwargs (dict): additional options for algorithms
C (float): Penalty parameter of the error term
Returns:
list: scores of each feature relatively to the target
"""
lsvc = LinearSVR(C=C).fit(df_features.values, np.ravel(df_target.values))
return np.abs(lsvc.coef_)
示例6: test_svr
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
示例7: test_glm_regressor
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_glm_regressor(self):
X, y = make_regression(n_features=4, random_state=0)
lr = LinearRegression()
lr.fit(X, y)
lr_coreml = coremltools.converters.sklearn.convert(lr)
lr_onnx = convert(lr_coreml.get_spec())
self.assertTrue(lr_onnx is not None)
dump_data_and_model(X.astype(numpy.float32), lr, lr_onnx, basename="CmlLinearRegression-Dec4")
svr = LinearSVR()
svr.fit(X, y)
svr_coreml = coremltools.converters.sklearn.convert(svr)
svr_onnx = convert(svr_coreml.get_spec())
self.assertTrue(svr_onnx is not None)
dump_data_and_model(X.astype(numpy.float32), svr, svr_onnx, basename="CmlLinearSvr-Dec4")
示例8: test_svr
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR(gamma='scale').fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
示例9: test_linearsvr
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(svr.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
示例10: test_linearsvr_fit_sampleweight
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_linearsvr_fit_sampleweight():
# check correct result when sample_weight is 1
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
n_samples = len(diabetes.target)
unit_weight = np.ones(n_samples)
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=unit_weight)
score1 = lsvr.score(diabetes.data, diabetes.target)
lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)
assert_allclose(np.linalg.norm(lsvr.coef_),
np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
assert_almost_equal(score1, score2, 2)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
random_weight = random_state.randint(0, 10, n_samples)
lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
sample_weight=random_weight)
score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
sample_weight=random_weight)
X_flat = np.repeat(diabetes.data, random_weight, axis=0)
y_flat = np.repeat(diabetes.target, random_weight, axis=0)
lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
score4 = lsvr_flat.score(X_flat, y_flat)
assert_almost_equal(score3, score4, 2)
示例11: test_linearsvx_loss_penalty_deprecations
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_hinge", "loss='l2'", "1.0"),
svm.LinearSVC(loss="l2").fit, X, y)
# LinearSVR
# loss l1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l1", "epsilon_insensitive", "loss='l1'",
"1.0"),
svm.LinearSVR(loss="l1").fit, X, y)
# loss l2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
示例12: test_linear_svm_convergence_warnings
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_linear_svm_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(random_state=0, max_iter=2)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
lsvr = svm.LinearSVR(random_state=0, max_iter=2)
assert_warns(ConvergenceWarning, lsvr.fit, iris.data, iris.target)
assert_equal(lsvr.n_iter_, 2)
示例13: fit
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def fit(self, X, y, sample_weight=None, eval_set=None, sample_weight_eval_set=None, **kwargs):
X = dt.Frame(X)
orig_cols = list(X.names)
if self.num_classes >= 2:
mod = linsvc(random_state=self.random_state, C=self.params["C"], penalty=self.params["penalty"],
loss=self.params["loss"], dual=self.params["dual"])
kf = StratifiedKFold(n_splits=3, shuffle=True, random_state=self.random_state)
model = CalibratedClassifierCV(base_estimator=mod, method='isotonic', cv=kf)
lb = LabelEncoder()
lb.fit(self.labels)
y = lb.transform(y)
else:
model = LinearSVR(epsilon=self.params["epsilon"], C=self.params["C"], loss=self.params["loss"],
dual=self.params["dual"], random_state=self.random_state)
self.means = dict()
self.standard_scaler = StandardScaler()
for col in X.names:
XX = X[:, col]
self.means[col] = XX.mean1()
if self.means[col] is None:
self.means[col] = 0
XX.replace(None, self.means[col])
X[:, col] = XX
assert X[dt.isna(dt.f[col]), col].nrows == 0
X = X.to_numpy()
X = self.standard_scaler.fit_transform(X)
model.fit(X, y, sample_weight=sample_weight)
importances = np.array([0.0 for k in range(len(orig_cols))])
if self.num_classes >= 2:
for classifier in model.calibrated_classifiers_:
importances += np.array(abs(classifier.base_estimator.get_coeff()))
else:
importances += np.array(abs(model.coef_[0]))
self.set_model_properties(model=model,
features=orig_cols,
importances=importances.tolist(), # abs(model.coef_[0])
iterations=0)
示例14: test_validate_sklearn_linarsvr_models_regression
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_validate_sklearn_linarsvr_models_regression(self):
model = LinearSVR()
pipe = Pipeline([
('model',model)
])
pipe.fit(self.X_reg, self.y_reg)
file_name = 'linearsvr_model_regression.pmml'
skl_to_pmml(pipe, self.features_reg, 'target',file_name)
self.assertEqual(self.schema.is_valid(file_name), True)
示例15: test_sklearn_21
# 需要导入模块: from sklearn import svm [as 别名]
# 或者: from sklearn.svm import LinearSVR [as 别名]
def test_sklearn_21(self):
df = pd.read_csv('nyoka/tests/auto-mpg.csv')
X = df.drop(['mpg', 'car name'], axis=1)
y = df['mpg']
features = X.columns
target = 'mpg'
f_name = "linearsvr_pmml.pmml"
model = LinearSVR()
pipeline_obj = Pipeline([
('model', model)
])
pipeline_obj.fit(X, y)
skl_to_pmml(pipeline_obj, features, target, f_name)
pmml_obj = pml.parse(f_name, True)
# 1
self.assertEqual(os.path.isfile(f_name), True)
# 2
self.assertEqual("{:.16f}".format(model.intercept_[0]),
"{:.16f}".format(pmml_obj.RegressionModel[0].RegressionTable[0].intercept))
# 3
reg_tab = pmml_obj.RegressionModel[0].RegressionTable[0].NumericPredictor
for model_val, pmml_val in zip(model.coef_, reg_tab):
self.assertEqual("{:.16f}".format(model_val), "{:.16f}".format(pmml_val.coefficient))