本文整理汇总了Python中sklearn.clone方法的典型用法代码示例。如果您正苦于以下问题:Python sklearn.clone方法的具体用法?Python sklearn.clone怎么用?Python sklearn.clone使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn
的用法示例。
在下文中一共展示了sklearn.clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def __init__(self,
test_indices=None,
estimator={'object': sklearn.linear_model.Lasso(alpha=20),
'fit': {}},
**kwargs):
self.test_indices = np.asarray(test_indices)
self.estimator = sklearn.clone(estimator['object'])
self.estimator_fit = estimator.get('fit', {})
self.models = [] # leave empty, fill in during `fit`
self.n_record = 0
self.record = []
self.n_series, self.n_features = 0, 0
self.px = kwargs.get('px', 0)
self.py = kwargs.get('py', 0)
示例2: fit
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def fit(self, X, y, **kwargs):
"""Fit the estimator.
If `prefit` is set to `True` then the base estimator is kept as is.
Otherwise it is fitted from the provided arguments.
"""
if self.estimator is None:
raise ValueError(BASE_ESTIMATOR_NONE_ERROR_MESSAGE)
if not self.prefit:
self.estimator_ = clone(self.estimator).fit(X, y, **kwargs)
else:
try:
check_is_fitted(self.estimator)
except NotFittedError:
warn(BASE_ESTIMATOR_NOT_FITTED_WARNING.format(type(self).__name__))
self.estimator_ = self.estimator
return self
示例3: fit
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def fit(self, X, y):
"""
Fit the data after adapting the same weight.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:param y: array-like, shape=(n_samples,) training data.
:return: Returns an instance of self.
"""
X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
self.weights_ = np.cumprod(np.ones(X.shape[0]) * self.decay)[::-1]
self.estimator_ = clone(self.model)
try:
self.estimator_.fit(X, y, sample_weight=self.weights_)
except TypeError as e:
if "sample_weight" in str(e):
raise TypeError(
f"Model {type(self.model).__name__}.fit() does not have 'sample_weight'"
)
if self._is_classifier():
self.classes_ = self.estimator_.classes_
return self
示例4: fit
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def fit(self, X, y):
"""
Fit the data.
:param X: array-like, shape=(n_columns, n_samples,) training data.
:param y: array-like, shape=(n_samples,) training data.
:return: Returns an instance of self.
"""
X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
self.estimator_ = clone(self.model)
if not isinstance(self.estimator_, ProbabilisticClassifier):
raise ValueError(
"The Thresholder meta model only works on classifcation models with .predict_proba."
)
self.estimator_.fit(X, y)
self.classes_ = self.estimator_.classes_
if len(self.classes_) != 2:
raise ValueError(
"The Thresholder meta model only works on models with two classes."
)
return self
示例5: test_auto_init
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def test_auto_init(n_samples, n_features, n_classes, n_components):
# Test that auto choose the init as expected with every configuration
# of order of n_samples, n_features, n_classes and n_components.
rng = np.random.RandomState(42)
nca_base = NeighborhoodComponentsAnalysis(init='auto',
n_components=n_components,
max_iter=1,
random_state=rng)
if n_classes >= n_samples:
pass
# n_classes > n_samples is impossible, and n_classes == n_samples
# throws an error from lda but is an absurd case
else:
X = rng.randn(n_samples, n_features)
y = np.tile(range(n_classes), n_samples // n_classes + 1)[:n_samples]
if n_components > n_features:
# this would return a ValueError, which is already tested in
# test_params_validation
pass
else:
nca = clone(nca_base)
nca.fit(X, y)
if n_components <= min(n_classes - 1, n_features):
nca_other = clone(nca_base).set_params(init='lda')
elif n_components < min(n_features, n_samples):
nca_other = clone(nca_base).set_params(init='pca')
else:
nca_other = clone(nca_base).set_params(init='identity')
nca_other.fit(X, y)
assert_array_almost_equal(nca.components_, nca_other.components_)
示例6: fit
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def fit(self, X, y):
try:
check_is_fitted(self.logistic_regression_estimator)
self.logistic_regression_estimator_ = self.logistic_regression_estimator
except NotFittedError:
self.logistic_regression_estimator_ = clone(
self.logistic_regression_estimator
).fit(X, y)
return self
示例7: __fit_grouped_estimator
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def __fit_grouped_estimator(self, X, y, value_columns, group_columns):
# Reset indices such that they are the same in X and y
X, y = X.reset_index(drop=True), y.reset_index(drop=True)
group_indices = X.groupby(group_columns).indices
grouped_estimations = {
group: clone(self.estimator).fit(
X.loc[indices, value_columns], y.loc[indices]
)
for group, indices in group_indices.items()
}
return grouped_estimations
示例8: fit
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def fit(self, X, y):
"""Fits the estimator"""
X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
self.estimator_ = clone(self.estimator)
self.estimator_.fit(X, y)
return self
示例9: fit
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def fit(self, X, y=None):
self.estimator_ = clone(self.outlier_detector)
if self.refit:
super().fit(X, y)
self.estimator_.fit(X, y)
return self
示例10: test_values_uniform
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def test_values_uniform(random_xy_dataset_clf):
X, y = random_xy_dataset_clf
X, y = check_X_y(X, y)
clf = DummyClassifier(strategy="most_frequent")
transformer = EstimatorTransformer(clone(clf))
transformed = transformer.fit(X, y).transform(X)
assert transformed.shape == (y.shape[0], 1)
assert np.all(transformed == clf.fit(X, y).predict(X))
示例11: test_splits_not_fitted
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def test_splits_not_fitted(cluster_method, random_xy_dataset_regr):
cluster_method = clone(cluster_method)
X, y = random_xy_dataset_regr
kf = KlusterFoldValidation(cluster_method=cluster_method)
for train_index, test_index in kf.split(X):
assert len(train_index) > 0
assert len(test_index) > 0
示例12: test_splits_fitted
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def test_splits_fitted(cluster_method, random_xy_dataset_regr):
cluster_method = clone(cluster_method)
X, y = random_xy_dataset_regr
cluster_method = cluster_method.fit(X)
kf = KlusterFoldValidation(cluster_method=cluster_method)
for train_index, test_index in kf.split(X):
assert len(train_index) > 0
assert len(test_index) > 0
示例13: check_clone
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def check_clone(Estimator):
# Check we can call clone from scikit-learn
estimator = _construct_instance(Estimator)
clone(estimator)
示例14: test_estimators_compatibility
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def test_estimators_compatibility(self):
"""Tests that dislib estimators are compatible with GridSearchCV.
GridSearchCV uses sklearn.clone(estimator), that requires estimators to
have methods get_params() and set_params() working properly. This is
what this test checks, and it can be easily achieved by making the
estimators inherit from sklearn BaseEstimator"""
estimators = (CascadeSVM, RandomForestClassifier,
DBSCAN, KMeans, GaussianMixture,
PCA, NearestNeighbors, ALS, LinearRegression)
for estimator_class in estimators:
self.assertIsInstance(estimator_class, type)
est = estimator_class()
# test __repr__
repr(est)
# test cloning
cloned = clone(est)
# test that set_params returns self
self.assertIs(cloned.set_params(), cloned)
# Checks if get_params(deep=False) is a subset of
# get_params(deep=True)
shallow_params = est.get_params(deep=False)
deep_params = est.get_params(deep=True)
self.assertTrue(all(item in deep_params.items()
for item in shallow_params.items()))
示例15: setup_dummy_YATSM
# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import clone [as 别名]
def setup_dummy_YATSM(X, Y, dates, i_breaks):
""" Setup a dummy YATSM model
Args:
X (np.ndarray): n x p features
Y (np.ndarray): n_series x n independent data
dates (np.ndarray): n dates
i_breaks (iterable): indices of ``dates`` representing break dates
(can be zero or nonzero, but len(i_breaks) is len(yatsm.record))
Returns:
YATSM model
"""
n = dates.size
yatsm = YATSM()
yatsm.X, yatsm.Y, yatsm.dates = X, Y, dates
yatsm.n_coef, yatsm.n_series = X.shape[1], Y.shape[0]
yatsm.models = np.array([sklearn.clone(yatsm.estimator)
for i in range(yatsm.n_series)])
yatsm.test_indices = np.arange(yatsm.n_series)
n_models = len(i_breaks)
yatsm.record = np.hstack([yatsm.record_template] * n_models)
def populate_record(yatsm, i_rec, i_start, i_end, i_break):
yatsm.record[i_rec]['start'] = yatsm.dates[i_start]
yatsm.record[i_rec]['end'] = yatsm.dates[i_end]
yatsm.record[i_rec]['break'] = (yatsm.dates[i_break] if i_break
else i_break)
yatsm.fit_models(X[i_start:i_end, :], Y[:, i_start:i_end])
for i, m in enumerate(yatsm.models):
yatsm.record[i_rec]['coef'][:, i] = m.coef
yatsm.record[i_rec]['rmse'][i] = m.rmse
return yatsm
i_start = 0
i_end = i_breaks[0] - 1 if i_breaks[0] else n - 1
i_break = i_breaks[0]
yatsm = populate_record(yatsm, 0, i_start, i_end, i_break)
for idx, i_break in enumerate(i_breaks[1:]):
i_start = i_breaks[idx] + 1
i_end = i_break - 1 if i_break else n - 1
yatsm = populate_record(yatsm, idx + 1, i_start, i_end, i_break)
return yatsm