本文整理汇总了Python中sklearn.model_selection.BaseCrossValidator方法的典型用法代码示例。如果您正苦于以下问题:Python model_selection.BaseCrossValidator方法的具体用法?Python model_selection.BaseCrossValidator怎么用?Python model_selection.BaseCrossValidator使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.model_selection
的用法示例。
在下文中一共展示了model_selection.BaseCrossValidator方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_split_dict
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 别名]
def build_split_dict(X: pd.DataFrame, split_obj: Type[BaseCrossValidator]) -> dict:
"""
Get dictionary of cross-validation training dataset split metadata
Parameters
----------
X: pd.DataFrame
The training dataset that will be split during cross-validation.
split_obj: Type[sklearn.model_selection.BaseCrossValidator]
The cross-validation object that returns train, test indices for splitting.
Returns
-------
split_metadata: Dict[str,Any]
Dictionary of cross-validation train/test split metadata
"""
split_metadata: Dict[str, Any] = dict()
for i, (train_ind, test_ind) in enumerate(split_obj.split(X)):
split_metadata.update(
{
f"fold-{i+1}-train-start": X.index[train_ind[0]],
f"fold-{i+1}-train-end": X.index[train_ind[-1]],
f"fold-{i+1}-test-start": X.index[test_ind[0]],
f"fold-{i+1}-test-end": X.index[test_ind[-1]],
}
)
split_metadata.update({f"fold-{i+1}-n-train": len(train_ind)})
split_metadata.update({f"fold-{i+1}-n-test": len(test_ind)})
return split_metadata
示例2: check_cv
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 别名]
def check_cv(cv: Union[int, Iterable, BaseCrossValidator] = 5,
y: Optional[Union[pd.Series, np.ndarray]] = None,
stratified: bool = False,
random_state: int = 0):
if cv is None:
cv = 5
if isinstance(cv, numbers.Integral):
if stratified and (y is not None) and (type_of_target(y) in ('binary', 'multiclass')):
return StratifiedKFold(cv, shuffle=True, random_state=random_state)
else:
return KFold(cv, shuffle=True, random_state=random_state)
return model_selection.check_cv(cv, y, stratified)
示例3: __init__
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 别名]
def __init__(self, n: int, base_validator: BaseCrossValidator):
self.base_validator = base_validator
self.n = n
示例4: __init__
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 别名]
def __init__(self, base_transformer: BaseEstimator,
cv: Optional[Union[int, Iterable, BaseCrossValidator]] = None, return_same_type: bool = True,
groups: Optional[pd.Series] = None):
self.cv = cv
self.base_transformer = base_transformer
self.n_splits = None
self.transformers = None
self.return_same_type = return_same_type
self.groups = groups
示例5: __init__
# 需要导入模块: from sklearn import model_selection [as 别名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 别名]
def __init__(
self,
params: Dict[str, Any],
train_set: "lgb.Dataset",
num_boost_round: int = 1000,
folds: Optional[
Union[
Generator[Tuple[int, int], None, None],
Iterator[Tuple[int, int]],
"BaseCrossValidator",
]
] = None,
nfold: int = 5,
stratified: bool = True,
shuffle: bool = True,
fobj: Optional[Callable[..., Any]] = None,
feval: Optional[Callable[..., Any]] = None,
feature_name: str = "auto",
categorical_feature: str = "auto",
early_stopping_rounds: Optional[int] = None,
fpreproc: Optional[Callable[..., Any]] = None,
verbose_eval: Optional[Union[bool, int]] = True,
show_stdv: bool = True,
seed: int = 0,
callbacks: Optional[List[Callable[..., Any]]] = None,
time_budget: Optional[int] = None,
sample_size: Optional[int] = None,
study: Optional[optuna.study.Study] = None,
optuna_callbacks: Optional[List[Callable[[Study, FrozenTrial], None]]] = None,
verbosity: int = 1,
) -> None:
super(LightGBMTunerCV, self).__init__(
params,
train_set,
num_boost_round,
fobj=fobj,
feval=feval,
feature_name=feature_name,
categorical_feature=categorical_feature,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
callbacks=callbacks,
time_budget=time_budget,
sample_size=sample_size,
study=study,
optuna_callbacks=optuna_callbacks,
verbosity=verbosity,
)
self.lgbm_kwargs["folds"] = folds
self.lgbm_kwargs["nfold"] = nfold
self.lgbm_kwargs["stratified"] = stratified
self.lgbm_kwargs["shuffle"] = shuffle
self.lgbm_kwargs["show_stdv"] = show_stdv
self.lgbm_kwargs["seed"] = seed
self.lgbm_kwargs["fpreproc"] = fpreproc