当前位置: 首页>>代码示例>>Python>>正文


Python neighbors.KernelDensity方法代码示例

本文整理汇总了Python中sklearn.neighbors.KernelDensity方法的典型用法代码示例。如果您正苦于以下问题:Python neighbors.KernelDensity方法的具体用法?Python neighbors.KernelDensity怎么用?Python neighbors.KernelDensity使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn.neighbors的用法示例。


在下文中一共展示了neighbors.KernelDensity方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def __init__(self, D_d_sample, D_delta_t_sample, kde_type='scipy_gaussian', bandwidth=1):
        """

        :param D_d_sample: 1-d numpy array of angular diameter distances to the lens plane
        :param D_delta_t_sample: 1-d numpy array of time-delay distances
        kde_type : string
            The kernel to use.  Valid kernels are
            'scipy_gaussian' or
            ['gaussian'|'tophat'|'epanechnikov'|'exponential'|'linear'|'cosine']
            Default is 'gaussian'.
        :param bandwidth: width of kernel (in same units as the angular diameter quantities)
        """
        values = np.vstack([D_d_sample, D_delta_t_sample])
        if kde_type == 'scipy_gaussian':
            self._PDF_kernel = stats.gaussian_kde(values)
        else:
            from sklearn.neighbors import KernelDensity
            self._kde = KernelDensity(bandwidth=bandwidth, kernel=kde_type)
            values = np.vstack([D_d_sample, D_delta_t_sample])
            self._kde.fit(values.T)
        self._kde_type = kde_type 
开发者ID:sibirrer,项目名称:lenstronomy,代码行数:23,代码来源:kde_likelihood.py

示例2: __init__

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def __init__(self, hybrid=False, kernel='gaussian', n_jobs=-1, seed=None, **kwargs):
        """Init Kernel Density Estimation instance."""
        self.kernel = kernel
        self.n_jobs = n_jobs
        self.seed = seed

        self.model = KernelDensity(kernel=kernel, **kwargs)
        self.bandwidth = self.model.bandwidth

        self.hybrid = hybrid
        self.ae_net = None  # autoencoder network for the case of a hybrid model

        self.results = {
            'train_time': None,
            'test_time': None,
            'test_auc': None,
            'test_scores': None
        } 
开发者ID:lukasruff,项目名称:Deep-SAD-PyTorch,代码行数:20,代码来源:kde.py

示例3: gen_exp_name

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def gen_exp_name(model_class, model_kwargs):
    """Generates experiment name from model class and parameters.

    :param model_class: (type) the class, one of GaussianMixture, PCAPreDensity or KernelDensity.
    :param model_kwargs: (dict) constructor arguments to the class.
    :return A string succinctly encoding the class and parameters."""
    if model_class == GaussianMixture:
        n_components = model_kwargs.get("n_components", 1)
        covariance_type = model_kwargs.get("covariance_type", "full")
        return f"gmm_{n_components}_components_{covariance_type}"
    elif model_class == PCAPreDensity:
        if model_kwargs["density_class"] == KernelDensity:
            return "pca_kde"
        elif model_kwargs["density_class"] == GaussianMixture:
            return "pca_gmm"
        else:
            return "pca_unknown"
    elif model_class == KernelDensity:
        return "kde"
    else:
        return "default" 
开发者ID:HumanCompatibleAI,项目名称:adversarial-policies,代码行数:23,代码来源:fit_density.py

示例4: test_kde_badargs

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_kde_badargs():
    assert_raises(ValueError, KernelDensity,
                  algorithm='blah')
    assert_raises(ValueError, KernelDensity,
                  bandwidth=0)
    assert_raises(ValueError, KernelDensity,
                  kernel='blah')
    assert_raises(ValueError, KernelDensity,
                  metric='blah')
    assert_raises(ValueError, KernelDensity,
                  algorithm='kd_tree', metric='blah')
    kde = KernelDensity()
    assert_raises(ValueError, kde.fit, np.random.random((200, 10)),
                  sample_weight=np.random.random((200, 10)))
    assert_raises(ValueError, kde.fit, np.random.random((200, 10)),
                  sample_weight=-np.random.random(200)) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:18,代码来源:test_kde.py

示例5: test_pickling

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_pickling(tmpdir, sample_weight):
    # Make sure that predictions are the same before and after pickling. Used
    # to be a bug because sample_weights wasn't pickled and the resulting tree
    # would miss some info.

    kde = KernelDensity()
    data = np.reshape([1., 2., 3.], (-1, 1))
    kde.fit(data, sample_weight=sample_weight)

    X = np.reshape([1.1, 2.1], (-1, 1))
    scores = kde.score_samples(X)

    file_path = str(tmpdir.join('dump.pkl'))
    _joblib.dump(kde, file_path)
    kde = _joblib.load(file_path)
    scores_pickled = kde.score_samples(X)

    assert_allclose(scores, scores_pickled) 
开发者ID:PacktPublishing,项目名称:Mastering-Elasticsearch-7.0,代码行数:20,代码来源:test_kde.py

示例6: kde_sklearn

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def kde_sklearn(data, grid, **kwargs):
    """
    Kernel Density Estimation with Scikit-learn

    Parameters
    ----------
    data : numpy.array
        Data points used to compute a density estimator. It
        has `n x p` dimensions, representing n points and p
        variables.
    grid : numpy.array
        Data points at which the desity will be estimated. It
        has `m x p` dimensions, representing m points and p
        variables.

    Returns
    -------
    out : numpy.array
        Density estimate. Has `m x 1` dimensions
    """
    kde_skl = KernelDensity(**kwargs)
    kde_skl.fit(data)
    # score_samples() returns the log-likelihood of the samples
    log_pdf = kde_skl.score_samples(grid)
    return np.exp(log_pdf) 
开发者ID:has2k1,项目名称:plotnine,代码行数:27,代码来源:density.py

示例7: test_optuna_search_invalid_param_dist

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_optuna_search_invalid_param_dist():
    # type: () -> None

    X, y = make_blobs(n_samples=10)
    est = KernelDensity()
    param_dist = ["kernel", distributions.CategoricalDistribution(("gaussian", "linear"))]
    optuna_search = integration.OptunaSearchCV(
        est,
        param_dist,  # type: ignore
        cv=3,
        error_score="raise",
        random_state=0,
        return_train_score=True,
    )

    with pytest.raises(ValueError, match="param_distributions must be a dictionary."):
        optuna_search.fit(X) 
开发者ID:optuna,项目名称:optuna,代码行数:19,代码来源:test_sklearn.py

示例8: test_optuna_search_pruning_without_partial_fit

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_optuna_search_pruning_without_partial_fit():
    # type: () -> None

    X, y = make_blobs(n_samples=10)
    est = KernelDensity()
    param_dist = {}  # type: ignore
    optuna_search = integration.OptunaSearchCV(
        est,
        param_dist,
        cv=3,
        enable_pruning=True,
        error_score="raise",
        random_state=0,
        return_train_score=True,
    )

    with pytest.raises(ValueError, match="estimator must support partial_fit."):
        optuna_search.fit(X) 
开发者ID:optuna,项目名称:optuna,代码行数:20,代码来源:test_sklearn.py

示例9: test_optuna_search_negative_max_iter

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_optuna_search_negative_max_iter():
    # type: () -> None

    X, y = make_blobs(n_samples=10)
    est = KernelDensity()
    param_dist = {}  # type: ignore
    optuna_search = integration.OptunaSearchCV(
        est,
        param_dist,
        cv=3,
        max_iter=-1,
        error_score="raise",
        random_state=0,
        return_train_score=True,
    )

    with pytest.raises(ValueError, match="max_iter must be > 0"):
        optuna_search.fit(X) 
开发者ID:optuna,项目名称:optuna,代码行数:20,代码来源:test_sklearn.py

示例10: test_optuna_search_tuple_instead_of_distribution

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_optuna_search_tuple_instead_of_distribution():
    # type: () -> None

    X, y = make_blobs(n_samples=10)
    est = KernelDensity()
    param_dist = {"kernel": ("gaussian", "linear")}
    optuna_search = integration.OptunaSearchCV(
        est,
        param_dist,  # type: ignore
        cv=3,
        error_score="raise",
        random_state=0,
        return_train_score=True,
    )

    with pytest.raises(ValueError, match="must be a optuna distribution."):
        optuna_search.fit(X) 
开发者ID:optuna,项目名称:optuna,代码行数:19,代码来源:test_sklearn.py

示例11: test_optuna_search_verbosity

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_optuna_search_verbosity(verbose):
    # type: (int) -> None

    X, y = make_blobs(n_samples=10)
    est = KernelDensity()
    param_dist = {}  # type: ignore
    optuna_search = integration.OptunaSearchCV(
        est,
        param_dist,
        cv=3,
        error_score="raise",
        random_state=0,
        return_train_score=True,
        verbose=verbose,
    )
    optuna_search.fit(X) 
开发者ID:optuna,项目名称:optuna,代码行数:18,代码来源:test_sklearn.py

示例12: test_optuna_search_subsample

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_optuna_search_subsample():
    # type: () -> None

    X, y = make_blobs(n_samples=10)
    est = KernelDensity()
    param_dist = {}  # type: ignore
    optuna_search = integration.OptunaSearchCV(
        est,
        param_dist,
        cv=3,
        error_score="raise",
        random_state=0,
        return_train_score=True,
        subsample=5,
    )
    optuna_search.fit(X) 
开发者ID:optuna,项目名称:optuna,代码行数:18,代码来源:test_sklearn.py

示例13: test_objectmapper

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_objectmapper(self):
        df = pdml.ModelFrame([])
        self.assertIs(df.neighbors.NearestNeighbors,
                      neighbors.NearestNeighbors)
        self.assertIs(df.neighbors.KNeighborsClassifier,
                      neighbors.KNeighborsClassifier)
        self.assertIs(df.neighbors.RadiusNeighborsClassifier,
                      neighbors.RadiusNeighborsClassifier)
        self.assertIs(df.neighbors.KNeighborsRegressor,
                      neighbors.KNeighborsRegressor)
        self.assertIs(df.neighbors.RadiusNeighborsRegressor,
                      neighbors.RadiusNeighborsRegressor)
        self.assertIs(df.neighbors.NearestCentroid, neighbors.NearestCentroid)
        self.assertIs(df.neighbors.BallTree, neighbors.BallTree)
        self.assertIs(df.neighbors.KDTree, neighbors.KDTree)
        self.assertIs(df.neighbors.DistanceMetric, neighbors.DistanceMetric)
        self.assertIs(df.neighbors.KernelDensity, neighbors.KernelDensity) 
开发者ID:pandas-ml,项目名称:pandas-ml,代码行数:19,代码来源:test_neighbors.py

示例14: display

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def display(self, output_filename):
        fig, (self.ax) = plt.subplots(1, 1)
        self.kde = KernelDensity(kernel='gaussian', bandwidth=self.bandwidth)
        has_legend = False
        for dataset in self.datasets:
            self._display_dataset(dataset)
            if dataset.label is not None:
                has_legend = True
        if self.title is not None:
            self.ax.set_xlabel(self.title)
        self.ax.set_ylabel('Density')
        if has_legend:
            self.ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3,
                           mode='expand', borderaxespad=0.)
        fig.savefig(output_filename)
        plt.close(fig) 
开发者ID:ANSSI-FR,项目名称:SecuML,代码行数:18,代码来源:density.py

示例15: test_kde_algorithm_metric_choice

# 需要导入模块: from sklearn import neighbors [as 别名]
# 或者: from sklearn.neighbors import KernelDensity [as 别名]
def test_kde_algorithm_metric_choice():
    # Smoke test for various metrics and algorithms
    rng = np.random.RandomState(0)
    X = rng.randn(10, 2)    # 2 features required for haversine dist.
    Y = rng.randn(10, 2)

    for algorithm in ['auto', 'ball_tree', 'kd_tree']:
        for metric in ['euclidean', 'minkowski', 'manhattan',
                       'chebyshev', 'haversine']:
            if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
                assert_raises(ValueError, KernelDensity,
                              algorithm=algorithm, metric=metric)
            else:
                kde = KernelDensity(algorithm=algorithm, metric=metric)
                kde.fit(X)
                y_dens = kde.score_samples(Y)
                assert_equal(y_dens.shape, Y.shape[:1]) 
开发者ID:alvarobartt,项目名称:twitter-stock-recommendation,代码行数:19,代码来源:test_kde.py


注:本文中的sklearn.neighbors.KernelDensity方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。