本文整理汇总了Python中sklearn.pipeline._name_estimators方法的典型用法代码示例。如果您正苦于以下问题:Python pipeline._name_estimators方法的具体用法?Python pipeline._name_estimators怎么用?Python pipeline._name_estimators使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类sklearn.pipeline
的用法示例。
在下文中一共展示了pipeline._name_estimators方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_sparkunion
# 需要导入模块: from sklearn import pipeline [as 别名]
# 或者: from sklearn.pipeline import _name_estimators [as 别名]
def make_sparkunion(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return SparkFeatureUnion(_name_estimators(transformers))
示例2: __init__
# 需要导入模块: from sklearn import pipeline [as 别名]
# 或者: from sklearn.pipeline import _name_estimators [as 别名]
def __init__(self, classifiers, vote='classlabel', weights=None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key, value
in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
示例3: make_pipeline
# 需要导入模块: from sklearn import pipeline [as 别名]
# 或者: from sklearn.pipeline import _name_estimators [as 别名]
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators. This is a shorthand for
the Pipeline constructor; it does not require, and does not permit, naming
the estimators. Instead, their names will be set to the lowercase of their
types automatically.
Parameters
----------
*steps : list
List of estimators.
Returns
-------
p : Pipeline
Examples
--------
>>> from kenchi.outlier_detection import MiniBatchKMeans
>>> from kenchi.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> scaler = StandardScaler()
>>> det = MiniBatchKMeans()
>>> pipeline = make_pipeline(scaler, det)
"""
return Pipeline(_name_estimators(steps))
示例4: make_union_mp
# 需要导入模块: from sklearn import pipeline [as 别名]
# 或者: from sklearn.pipeline import _name_estimators [as 别名]
def make_union_mp(*transformers, **kwargs):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
Returns
-------
f : FeatureUnion
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca',
PCA(copy=True, iterated_power='auto',
n_components=None, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
"""
n_jobs = kwargs.pop('n_jobs', 1)
if kwargs:
# We do not currently support `transformer_weights` as we may want to
# change its type spec in make_union
raise TypeError('Unknown keyword arguments: "{}"'
.format(list(kwargs.keys())[0]))
return FeatureUnionMP(_name_estimators(transformers), n_jobs=n_jobs)
示例5: make_union
# 需要导入模块: from sklearn import pipeline [as 别名]
# 或者: from sklearn.pipeline import _name_estimators [as 别名]
def make_union(*transformers, **kwargs):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Parameters
----------
*transformers : list of estimators
n_jobs : int, optional
Number of jobs to run in parallel (default 1).
Returns
-------
f : FeatureUnion
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> from sklearn.pipeline import make_union
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca',
PCA(copy=True, iterated_power='auto',
n_components=None, random_state=None,
svd_solver='auto', tol=0.0, whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
"""
n_jobs = kwargs.pop('n_jobs', 1)
concatenate = kwargs.pop('concatenate', True)
if kwargs:
# We do not currently support `transformer_weights` as we may want to
# change its type spec in make_union
raise TypeError('Unknown keyword arguments: "{}"'
.format(list(kwargs.keys())[0]))
return FeatureUnion(_name_estimators(transformers), n_jobs= n_jobs, concatenate= concatenate)
示例6: make_debug_pipeline
# 需要导入模块: from sklearn import pipeline [as 别名]
# 或者: from sklearn.pipeline import _name_estimators [as 别名]
def make_debug_pipeline(*steps, **kwargs):
"""Construct a DebugPipeline from the given estimators.
This is a shorthand for the DebugPipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, their names will be set
to the lowercase of their types automatically.
Parameters
----------
*steps : list of estimators.
memory : None, str or object with the joblib.Memory interface, optional
Used to cache the fitted transformers of the pipeline. By default,
no caching is performed. If a string is given, it is the path to
the caching directory. Enabling caching triggers a clone of
the transformers before fitting. Therefore, the transformer
instance given to the pipeline cannot be inspected
directly. Use the attribute ``named_steps`` or ``steps`` to
inspect estimators within the pipeline. Caching the
transformers is advantageous when fitting is time consuming.
verbose : boolean, default=False
If True, the time elapsed while fitting each step will be printed as it
is completed.
log_callback: string, default=None.
The callback function that logs information in between each
intermediate step. Defaults to None. If set to `'default'`,
:func:`default_log_callback` is used.
See :func:`default_log_callback` for an example.
See Also
--------
sklego.pipeline.DebugPipeline : Class for creating a pipeline of
transforms with a final estimator.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_debug_pipeline(StandardScaler(), GaussianNB(priors=None))
DebugPipeline(steps=[('standardscaler', StandardScaler()),
('gaussiannb', GaussianNB())])
Returns
-------
p : DebugPipeline
"""
memory = kwargs.pop('memory', None)
verbose = kwargs.pop('verbose', False)
log_callback = kwargs.pop('log_callback', None)
if kwargs:
raise TypeError('Unknown keyword arguments: "{}"'
.format(list(kwargs.keys())[0]))
return DebugPipeline(_name_estimators(steps), memory=memory, verbose=verbose, log_callback=log_callback)