本文整理汇总了Python中sklearn.model_selection.LeavePOut方法的典型用法代码示例。如果您正苦于以下问题:Python model_selection.LeavePOut方法的具体用法?Python model_selection.LeavePOut怎么用?Python model_selection.LeavePOut使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.model_selection
的用法示例。
在下文中一共展示了model_selection.LeavePOut方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_2d_y
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import LeavePOut [as 别名]
def test_2d_y():
# smoke test for 2d y and multi-label
n_samples = 30
rng = np.random.RandomState(1)
X = rng.randint(0, 3, size=(n_samples, 2))
y = rng.randint(0, 3, size=(n_samples,))
y_2d = y.reshape(-1, 1)
y_multilabel = rng.randint(0, 2, size=(n_samples, 3))
groups = rng.randint(0, 3, size=(n_samples,))
splitters = [LeaveOneOut(), LeavePOut(p=2), KFold(), StratifiedKFold(),
RepeatedKFold(), RepeatedStratifiedKFold(),
ShuffleSplit(), StratifiedShuffleSplit(test_size=.5),
GroupShuffleSplit(), LeaveOneGroupOut(),
LeavePGroupsOut(n_groups=2), GroupKFold(), TimeSeriesSplit(),
PredefinedSplit(test_fold=groups)]
for splitter in splitters:
list(splitter.split(X, y, groups))
list(splitter.split(X, y_2d, groups))
try:
list(splitter.split(X, y_multilabel, groups))
except ValueError as e:
allowed_target_types = ('binary', 'multiclass')
msg = "Supported target types are: {}. Got 'multilabel".format(
allowed_target_types)
assert msg in str(e)
示例2: test_leave_p_out_empty_trainset
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import LeavePOut [as 别名]
def test_leave_p_out_empty_trainset():
# No need to check LeavePGroupsOut
cv = LeavePOut(p=2)
X, y = [[1], [2]], [0, 3] # 2 samples
with pytest.raises(
ValueError,
match='p=2 must be strictly less than the number of samples=2'):
next(cv.split(X, y, groups=[1, 2]))
示例3: test_objectmapper
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import LeavePOut [as 别名]
def test_objectmapper(self):
df = pdml.ModelFrame([])
# Splitter Classes
self.assertIs(df.model_selection.KFold, ms.KFold)
self.assertIs(df.model_selection.GroupKFold, ms.GroupKFold)
self.assertIs(df.model_selection.StratifiedKFold, ms.StratifiedKFold)
self.assertIs(df.model_selection.LeaveOneGroupOut, ms.LeaveOneGroupOut)
self.assertIs(df.model_selection.LeavePGroupsOut, ms.LeavePGroupsOut)
self.assertIs(df.model_selection.LeaveOneOut, ms.LeaveOneOut)
self.assertIs(df.model_selection.LeavePOut, ms.LeavePOut)
self.assertIs(df.model_selection.ShuffleSplit, ms.ShuffleSplit)
self.assertIs(df.model_selection.GroupShuffleSplit,
ms.GroupShuffleSplit)
# self.assertIs(df.model_selection.StratifiedShuffleSplit,
# ms.StratifiedShuffleSplit)
self.assertIs(df.model_selection.PredefinedSplit, ms.PredefinedSplit)
self.assertIs(df.model_selection.TimeSeriesSplit, ms.TimeSeriesSplit)
# Splitter Functions
# Hyper-parameter optimizers
self.assertIs(df.model_selection.GridSearchCV, ms.GridSearchCV)
self.assertIs(df.model_selection.RandomizedSearchCV, ms.RandomizedSearchCV)
self.assertIs(df.model_selection.ParameterGrid, ms.ParameterGrid)
self.assertIs(df.model_selection.ParameterSampler, ms.ParameterSampler)
# Model validation
示例4: test_objectmapper_abbr
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import LeavePOut [as 别名]
def test_objectmapper_abbr(self):
df = pdml.ModelFrame([])
# Splitter Classes
self.assertIs(df.ms.KFold, ms.KFold)
self.assertIs(df.ms.GroupKFold, ms.GroupKFold)
self.assertIs(df.ms.StratifiedKFold, ms.StratifiedKFold)
self.assertIs(df.ms.LeaveOneGroupOut, ms.LeaveOneGroupOut)
self.assertIs(df.ms.LeavePGroupsOut, ms.LeavePGroupsOut)
self.assertIs(df.ms.LeaveOneOut, ms.LeaveOneOut)
self.assertIs(df.ms.LeavePOut, ms.LeavePOut)
self.assertIs(df.ms.ShuffleSplit, ms.ShuffleSplit)
self.assertIs(df.ms.GroupShuffleSplit,
ms.GroupShuffleSplit)
# self.assertIs(df.ms.StratifiedShuffleSplit,
# ms.StratifiedShuffleSplit)
self.assertIs(df.ms.PredefinedSplit, ms.PredefinedSplit)
self.assertIs(df.ms.TimeSeriesSplit, ms.TimeSeriesSplit)
# Splitter Functions
# Hyper-parameter optimizers
self.assertIs(df.ms.GridSearchCV, ms.GridSearchCV)
self.assertIs(df.ms.RandomizedSearchCV, ms.RandomizedSearchCV)
self.assertIs(df.ms.ParameterGrid, ms.ParameterGrid)
self.assertIs(df.ms.ParameterSampler, ms.ParameterSampler)
# Model validation
示例5: test_cross_validator_with_default_params
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import LeavePOut [as 别名]
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, "
"test_size=None, train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
# ValueError for get_n_splits methods
msg = "The 'X' parameter should not be None."
assert_raise_message(ValueError, msg,
loo.get_n_splits, None, y, groups)
assert_raise_message(ValueError, msg,
lpo.get_n_splits, None, y, groups)
示例6: test_cross_validator_with_default_params
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import LeavePOut [as 别名]
def test_cross_validator_with_default_params():
n_samples = 4
n_unique_groups = 4
n_splits = 2
p = 2
n_shuffle_splits = 10 # (the default value)
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
X_1d = np.array([1, 2, 3, 4])
y = np.array([1, 1, 2, 2])
groups = np.array([1, 2, 3, 4])
loo = LeaveOneOut()
lpo = LeavePOut(p)
kf = KFold(n_splits)
skf = StratifiedKFold(n_splits)
lolo = LeaveOneGroupOut()
lopo = LeavePGroupsOut(p)
ss = ShuffleSplit(random_state=0)
ps = PredefinedSplit([1, 1, 2, 2]) # n_splits = np of unique folds = 2
loo_repr = "LeaveOneOut()"
lpo_repr = "LeavePOut(p=2)"
kf_repr = "KFold(n_splits=2, random_state=None, shuffle=False)"
skf_repr = "StratifiedKFold(n_splits=2, random_state=None, shuffle=False)"
lolo_repr = "LeaveOneGroupOut()"
lopo_repr = "LeavePGroupsOut(n_groups=2)"
ss_repr = ("ShuffleSplit(n_splits=10, random_state=0, "
"test_size='default',\n train_size=None)")
ps_repr = "PredefinedSplit(test_fold=array([1, 1, 2, 2]))"
n_splits_expected = [n_samples, comb(n_samples, p), n_splits, n_splits,
n_unique_groups, comb(n_unique_groups, p),
n_shuffle_splits, 2]
for i, (cv, cv_repr) in enumerate(zip(
[loo, lpo, kf, skf, lolo, lopo, ss, ps],
[loo_repr, lpo_repr, kf_repr, skf_repr, lolo_repr, lopo_repr,
ss_repr, ps_repr])):
# Test if get_n_splits works correctly
assert_equal(n_splits_expected[i], cv.get_n_splits(X, y, groups))
# Test if the cross-validator works as expected even if
# the data is 1d
np.testing.assert_equal(list(cv.split(X, y, groups)),
list(cv.split(X_1d, y, groups)))
# Test that train, test indices returned are integers
for train, test in cv.split(X, y, groups):
assert_equal(np.asarray(train).dtype.kind, 'i')
assert_equal(np.asarray(train).dtype.kind, 'i')
# Test if the repr works without any errors
assert_equal(cv_repr, repr(cv))
# ValueError for get_n_splits methods
msg = "The 'X' parameter should not be None."
assert_raise_message(ValueError, msg,
loo.get_n_splits, None, y, groups)
assert_raise_message(ValueError, msg,
lpo.get_n_splits, None, y, groups)