当前位置: 首页>>代码示例>>Python>>正文


Python validation.FLOAT_DTYPES属性代码示例

本文整理汇总了Python中sklearn.utils.validation.FLOAT_DTYPES属性的典型用法代码示例。如果您正苦于以下问题:Python validation.FLOAT_DTYPES属性的具体用法?Python validation.FLOAT_DTYPES怎么用?Python validation.FLOAT_DTYPES使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在sklearn.utils.validation的用法示例。


在下文中一共展示了validation.FLOAT_DTYPES属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fit

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def fit(self, X, y=None):
        """
        Fit the model using X as training data.

        :param X: array-like, shape=(n_columns, n_samples,) training data.
        :param y: ignored but kept in for pipeline support
        :return: Returns an instance of self.
        """
        X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
        if self.n_components < 2:
            raise ValueError("Number of components must be at least two.")
        if not self.threshold:
            raise ValueError(f"The `threshold` value cannot be `None`.")

        self.umap_ = umap.UMAP(
            n_components=self.n_components,
            n_neighbors=self.n_neighbors,
            min_dist=self.min_dist,
            metric=self.metric,
            random_state=self.random_state,
        )
        self.umap_.fit(X, y)
        self.offset_ = -self.threshold
        return self 
开发者ID:koaning,项目名称:scikit-lego,代码行数:26,代码来源:umap_reconstruction.py

示例2: predict_proba

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def predict_proba(self, X: np.array):
        check_is_fitted(self, ["gmms_", "classes_", "num_fit_cols_"])
        X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
        if self.num_fit_cols_ != X.shape[1]:
            raise ValueError(
                f"number of columns {X.shape[1]} does not match fit size {self.num_fit_cols_}"
            )
        check_is_fitted(self, ["gmms_", "classes_"])
        probs = np.zeros((X.shape[0], len(self.classes_)))
        for k, v in self.gmms_.items():
            class_idx = int(np.argwhere(self.classes_ == k))
            probs[:, class_idx] = np.array(
                [
                    m.score_samples(np.expand_dims(X[:, idx], 1))
                    for idx, m in enumerate(v)
                ]
            ).sum(axis=0)
        likelihood = np.exp(probs)
        return likelihood / likelihood.sum(axis=1).reshape(-1, 1) 
开发者ID:koaning,项目名称:scikit-lego,代码行数:21,代码来源:naive_bayes.py

示例3: _preprocess_data

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def _preprocess_data(X, y, fit_intercept, epsilon=1.0, bounds_X=None, bounds_y=None, copy=True, check_input=True,
                     **unused_args):
    warn_unused_args(unused_args)

    if check_input:
        X = check_array(X, copy=copy, accept_sparse=False, dtype=FLOAT_DTYPES)
    elif copy:
        X = X.copy(order='K')

    y = np.asarray(y, dtype=X.dtype)
    X_scale = np.ones(X.shape[1], dtype=X.dtype)

    if fit_intercept:
        bounds_X = check_bounds(bounds_X, X.shape[1])
        bounds_y = check_bounds(bounds_y, y.shape[1] if y.ndim > 1 else 1)

        X = clip_to_bounds(X, bounds_X)
        y = clip_to_bounds(y, bounds_y)

        X_offset = mean(X, axis=0, bounds=bounds_X, epsilon=epsilon, accountant=BudgetAccountant())
        X -= X_offset
        y_offset = mean(y, axis=0, bounds=bounds_y, epsilon=epsilon, accountant=BudgetAccountant())
        y = y - y_offset
    else:
        X_offset = np.zeros(X.shape[1], dtype=X.dtype)
        if y.ndim == 1:
            y_offset = X.dtype.type(0)
        else:
            y_offset = np.zeros(y.shape[1], dtype=X.dtype)

    return X, y, X_offset, y_offset, X_scale


# noinspection PyPep8Naming,PyAttributeOutsideInit 
开发者ID:IBM,项目名称:differential-privacy-library,代码行数:36,代码来源:linear_regression.py

示例4: transform

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def transform(self, X=None, copy=True, is_query=False):
        """
        Parameters
        ----------
        X : sparse matrix, [n_samples, n_features]
            document-term query matrix
        copy : boolean, optional (default=True)
        query: boolean (default=False)
            whether to transform a query or the documents database

        Returns
        -------
        vectors : sparse matrix, [n_samples, n_features]

        """
        if is_query:
            X = check_array(X, accept_sparse="csr", dtype=FLOAT_DTYPES, copy=copy)
            if not sp.issparse(X):
                X = sp.csr_matrix(X, dtype=np.float64)

            n_samples, n_features = X.shape

            expected_n_features = self._doc_matrix.shape[1]
            if n_features != expected_n_features:
                raise ValueError(
                    "Input has n_features=%d while the model"
                    " has been trained with n_features=%d"
                    % (n_features, expected_n_features)
                )

            if self.use_idf:
                check_is_fitted(self, "_idf_diag", "idf vector is not fitted")
                X = sp.csr_matrix(X.toarray() * self._idf_diag.diagonal())

            return X

        else:
            return self._doc_matrix 
开发者ID:cdqa-suite,项目名称:cdQA,代码行数:40,代码来源:text_transformers.py

示例5: _validate_input

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def _validate_input(self, X):
        allowed_strategies = ["mean", "median", "most_frequent", "constant"]
        if self.strategy not in allowed_strategies:
            raise ValueError("Can only use these strategies: {0} "
                             " got strategy={1}".format(allowed_strategies,
                                                        self.strategy))

        if self.strategy in ("most_frequent", "constant"):
            dtype = None
        else:
            dtype = FLOAT_DTYPES

        if not is_scalar_nan(self.missing_values):
            force_all_finite = True
        else:
            force_all_finite = False # "allow-nan"

        try:
            X = check_array(X, accept_sparse='csc', dtype=dtype,
                            force_all_finite=force_all_finite, copy=self.copy)
        except ValueError as ve:
            if "could not convert" in str(ve):
                raise ValueError("Cannot use {0} strategy with non-numeric "
                                 "data. Received datatype :{1}."
                                 "".format(self.strategy, X.dtype.kind))
            else:
                raise ve

        _check_inputs_dtype(X, self.missing_values)
        if X.dtype.kind not in ("i", "u", "f", "O"):
            raise ValueError("_SimpleImputer does not support data with dtype "
                             "{0}. Please provide either a numeric array (with"
                             " a floating point or integer dtype) or "
                             "categorical data represented either as an array "
                             "with integer dtype or an array of string values "
                             "with an object dtype.".format(X.dtype))

        return X 
开发者ID:YyzHarry,项目名称:ME-Net,代码行数:40,代码来源:iterative_imputer.py

示例6: fit

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def fit(self, X, y=None):
        """Fit RobustStandardScaler to X.

        If input is sparse, `fit` overrides `self.with_mean` to standardize without subtracting mean (avoids breaking
        for sparse matrix)

        If the data is dense, the mean is adjusted for sparse features and the scaled with mean.

        Parameters
        ----------
        X : array-like, shape [n_samples, n_features]
            The data to standardize.

        Returns
        -------
        self : RobustStandardScaler
        """
        X = check_array(
            X, accept_sparse=("csr", "csc"), estimator=self, dtype=FLOAT_DTYPES, force_all_finite="allow-nan"
        )

        with_mean = True
        if issparse(X):
            with_mean = False

        self.scaler_ = StandardScaler(with_mean=with_mean, with_std=True, copy=self.copy)
        self.scaler_.fit(X)

        if self.scaler_.with_mean:
            nnz_mean_mask = np.where(np.count_nonzero(X, axis=0) / X.shape[0] > 0.3, 1, 0)
            self.scaler_.mean_ = self.scaler_.mean_ * nnz_mean_mask

        return self 
开发者ID:aws,项目名称:sagemaker-scikit-learn-extension,代码行数:35,代码来源:data.py

示例7: fit

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def fit(self, X: np.ndarray, y: np.ndarray):
        """
        Fit the model using X, y as training data.

        :param X: array-like, shape=(n_features, n_samples)
        :param y: array-like, shape=(n_samples)
        :return: Returns an instance of self
        """
        X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)

        self.classes_ = unique_labels(y)
        self.models_, self.priors_logp_ = {}, {}
        for target_label in self.classes_:
            x_subset = X[y == target_label]

            # Computing joint distribution
            self.models_[target_label] = KernelDensity(
                bandwidth=self.bandwidth,
                kernel=self.kernel,
                algorithm=self.algorithm,
                metric=self.metric,
                atol=self.atol,
                rtol=self.rtol,
                breadth_first=self.breath_first,
                leaf_size=self.leaf_size,
                metric_params=self.metric_params,
            ).fit(x_subset)

            # Computing target class prior
            self.priors_logp_[target_label] = np.log(len(x_subset) / len(X))

        return self 
开发者ID:koaning,项目名称:scikit-lego,代码行数:34,代码来源:neighbors.py

示例8: predict_proba

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def predict_proba(self, X):
        """
        Probability estimates.

        The returned estimates for all classes are in the same order found in the `.classes_` attribute.

        :param X: array-like of shape (n_samples, n_features)
        :return: array-like of shape (n_samples, n_classes)
            Returns the probability of the sample for each class in the model,
            where classes are ordered as they are in self.classes_.
        """
        check_is_fitted(self)
        X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)

        log_prior = np.array(
            [self.priors_logp_[target_label] for target_label in self.classes_]
        )

        log_likelihood = np.array(
            [
                self.models_[target_label].score_samples(X)
                for target_label in self.classes_
            ]
        ).T

        log_likelihood_and_prior = np.exp(log_likelihood + log_prior)
        evidence = log_likelihood_and_prior.sum(axis=1, keepdims=True)
        posterior = log_likelihood_and_prior / evidence
        return posterior 
开发者ID:koaning,项目名称:scikit-lego,代码行数:31,代码来源:neighbors.py

示例9: predict

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def predict(self, X):
        """
        Predict class labels for samples in X.

        :param X: array_like, shape (n_samples, n_features)
        :return: array, shape (n_samples)
        """
        check_is_fitted(self)
        X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)

        return self.classes_[np.argmax(self.predict_proba(X), 1)] 
开发者ID:koaning,项目名称:scikit-lego,代码行数:13,代码来源:neighbors.py

示例10: score_samples

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def score_samples(self, X):
        X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
        check_is_fitted(self, ["gmm_", "likelihood_threshold_"])
        if len(X.shape) == 1:
            X = np.expand_dims(X, 1)

        return -self.gmm_.score_samples(X) 
开发者ID:koaning,项目名称:scikit-lego,代码行数:9,代码来源:gmm_outlier_detector.py

示例11: fit

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def fit(self, X: np.array, y: np.array) -> "GMMClassifier":
        """
        Fit the model using X, y as training data.

        :param X: array-like, shape=(n_columns, n_samples, ) training data.
        :param y: array-like, shape=(n_samples, ) training data.
        :return: Returns an instance of self.
        """
        X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
        if X.ndim == 1:
            X = np.expand_dims(X, 1)

        self.gmms_ = {}
        self.classes_ = unique_labels(y)
        for c in self.classes_:
            subset_x, subset_y = X[y == c], y[y == c]
            mixture = GaussianMixture(
                n_components=self.n_components,
                covariance_type=self.covariance_type,
                tol=self.tol,
                reg_covar=self.reg_covar,
                max_iter=self.max_iter,
                n_init=self.n_init,
                init_params=self.init_params,
                weights_init=self.weights_init,
                means_init=self.means_init,
                precisions_init=self.precisions_init,
                random_state=self.random_state,
                warm_start=self.warm_start,
                verbose=self.verbose,
                verbose_interval=self.verbose_interval,
            )
            self.gmms_[c] = mixture.fit(subset_x, subset_y)
        return self 
开发者ID:koaning,项目名称:scikit-lego,代码行数:36,代码来源:gmm_classifier.py

示例12: predict_proba

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def predict_proba(self, X):
        X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
        check_is_fitted(self, ["gmms_", "classes_"])
        res = np.zeros((X.shape[0], self.classes_.shape[0]))
        for idx, c in enumerate(self.classes_):
            res[:, idx] = self.gmms_[c].score_samples(X)
        return np.exp(res) / np.exp(res).sum(axis=1)[:, np.newaxis] 
开发者ID:koaning,项目名称:scikit-lego,代码行数:9,代码来源:gmm_classifier.py

示例13: score_samples

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def score_samples(self, X):
        X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
        check_is_fitted(self, ["gmm_", "likelihood_threshold_"])
        if len(X.shape) == 1:
            X = np.expand_dims(X, 1)

        return self.gmm_.score_samples(X) * -1 
开发者ID:koaning,项目名称:scikit-lego,代码行数:9,代码来源:bayesian_gmm_detector.py

示例14: fit

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def fit(self, X: np.array, y: np.array) -> "BayesianGMMClassifier":
        """
        Fit the model using X, y as training data.

        :param X: array-like, shape=(n_columns, n_samples, ) training data.
        :param y: array-like, shape=(n_samples, ) training data.
        :return: Returns an instance of self.
        """
        X, y = check_X_y(X, y, estimator=self, dtype=FLOAT_DTYPES)
        if X.ndim == 1:
            X = np.expand_dims(X, 1)

        self.gmms_ = {}
        self.classes_ = unique_labels(y)
        for c in self.classes_:
            subset_x, subset_y = X[y == c], y[y == c]
            mixture = BayesianGaussianMixture(
                n_components=self.n_components,
                covariance_type=self.covariance_type,
                tol=self.tol,
                reg_covar=self.reg_covar,
                max_iter=self.max_iter,
                n_init=self.n_init,
                init_params=self.init_params,
                weight_concentration_prior_type=self.weight_concentration_prior_type,
                weight_concentration_prior=self.weight_concentration_prior,
                mean_precision_prior=self.mean_precision_prior,
                mean_prior=self.mean_prior,
                degrees_of_freedom_prior=self.degrees_of_freedom_prior,
                covariance_prior=self.covariance_prior,
                random_state=self.random_state,
                warm_start=self.warm_start,
                verbose=self.verbose,
                verbose_interval=self.verbose_interval,
            )
            self.gmms_[c] = mixture.fit(subset_x, subset_y)
        return self 
开发者ID:koaning,项目名称:scikit-lego,代码行数:39,代码来源:bayesian_gmm_classifier.py

示例15: predict

# 需要导入模块: from sklearn.utils import validation [as 别名]
# 或者: from sklearn.utils.validation import FLOAT_DTYPES [as 别名]
def predict(self, X):
        check_is_fitted(self, ["gmms_", "classes_"])
        X = check_array(X, estimator=self, dtype=FLOAT_DTYPES)
        return self.classes_[self.predict_proba(X).argmax(axis=1)] 
开发者ID:koaning,项目名称:scikit-lego,代码行数:6,代码来源:bayesian_gmm_classifier.py


注:本文中的sklearn.utils.validation.FLOAT_DTYPES属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。