本文整理匯總了Python中sklearn.model_selection.BaseCrossValidator方法的典型用法代碼示例。如果您正苦於以下問題:Python model_selection.BaseCrossValidator方法的具體用法?Python model_selection.BaseCrossValidator怎麽用?Python model_selection.BaseCrossValidator使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類sklearn.model_selection
的用法示例。
在下文中一共展示了model_selection.BaseCrossValidator方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build_split_dict
# 需要導入模塊: from sklearn import model_selection [as 別名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 別名]
def build_split_dict(X: pd.DataFrame, split_obj: Type[BaseCrossValidator]) -> dict:
"""
Get dictionary of cross-validation training dataset split metadata
Parameters
----------
X: pd.DataFrame
The training dataset that will be split during cross-validation.
split_obj: Type[sklearn.model_selection.BaseCrossValidator]
The cross-validation object that returns train, test indices for splitting.
Returns
-------
split_metadata: Dict[str,Any]
Dictionary of cross-validation train/test split metadata
"""
split_metadata: Dict[str, Any] = dict()
for i, (train_ind, test_ind) in enumerate(split_obj.split(X)):
split_metadata.update(
{
f"fold-{i+1}-train-start": X.index[train_ind[0]],
f"fold-{i+1}-train-end": X.index[train_ind[-1]],
f"fold-{i+1}-test-start": X.index[test_ind[0]],
f"fold-{i+1}-test-end": X.index[test_ind[-1]],
}
)
split_metadata.update({f"fold-{i+1}-n-train": len(train_ind)})
split_metadata.update({f"fold-{i+1}-n-test": len(test_ind)})
return split_metadata
示例2: check_cv
# 需要導入模塊: from sklearn import model_selection [as 別名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 別名]
def check_cv(cv: Union[int, Iterable, BaseCrossValidator] = 5,
y: Optional[Union[pd.Series, np.ndarray]] = None,
stratified: bool = False,
random_state: int = 0):
if cv is None:
cv = 5
if isinstance(cv, numbers.Integral):
if stratified and (y is not None) and (type_of_target(y) in ('binary', 'multiclass')):
return StratifiedKFold(cv, shuffle=True, random_state=random_state)
else:
return KFold(cv, shuffle=True, random_state=random_state)
return model_selection.check_cv(cv, y, stratified)
示例3: __init__
# 需要導入模塊: from sklearn import model_selection [as 別名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 別名]
def __init__(self, n: int, base_validator: BaseCrossValidator):
self.base_validator = base_validator
self.n = n
示例4: __init__
# 需要導入模塊: from sklearn import model_selection [as 別名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 別名]
def __init__(self, base_transformer: BaseEstimator,
cv: Optional[Union[int, Iterable, BaseCrossValidator]] = None, return_same_type: bool = True,
groups: Optional[pd.Series] = None):
self.cv = cv
self.base_transformer = base_transformer
self.n_splits = None
self.transformers = None
self.return_same_type = return_same_type
self.groups = groups
示例5: __init__
# 需要導入模塊: from sklearn import model_selection [as 別名]
# 或者: from sklearn.model_selection import BaseCrossValidator [as 別名]
def __init__(
self,
params: Dict[str, Any],
train_set: "lgb.Dataset",
num_boost_round: int = 1000,
folds: Optional[
Union[
Generator[Tuple[int, int], None, None],
Iterator[Tuple[int, int]],
"BaseCrossValidator",
]
] = None,
nfold: int = 5,
stratified: bool = True,
shuffle: bool = True,
fobj: Optional[Callable[..., Any]] = None,
feval: Optional[Callable[..., Any]] = None,
feature_name: str = "auto",
categorical_feature: str = "auto",
early_stopping_rounds: Optional[int] = None,
fpreproc: Optional[Callable[..., Any]] = None,
verbose_eval: Optional[Union[bool, int]] = True,
show_stdv: bool = True,
seed: int = 0,
callbacks: Optional[List[Callable[..., Any]]] = None,
time_budget: Optional[int] = None,
sample_size: Optional[int] = None,
study: Optional[optuna.study.Study] = None,
optuna_callbacks: Optional[List[Callable[[Study, FrozenTrial], None]]] = None,
verbosity: int = 1,
) -> None:
super(LightGBMTunerCV, self).__init__(
params,
train_set,
num_boost_round,
fobj=fobj,
feval=feval,
feature_name=feature_name,
categorical_feature=categorical_feature,
early_stopping_rounds=early_stopping_rounds,
verbose_eval=verbose_eval,
callbacks=callbacks,
time_budget=time_budget,
sample_size=sample_size,
study=study,
optuna_callbacks=optuna_callbacks,
verbosity=verbosity,
)
self.lgbm_kwargs["folds"] = folds
self.lgbm_kwargs["nfold"] = nfold
self.lgbm_kwargs["stratified"] = stratified
self.lgbm_kwargs["shuffle"] = shuffle
self.lgbm_kwargs["show_stdv"] = show_stdv
self.lgbm_kwargs["seed"] = seed
self.lgbm_kwargs["fpreproc"] = fpreproc