当前位置: 首页>>代码示例>>Python>>正文


Python sklearn.metrics方法代码示例

本文整理汇总了Python中sklearn.metrics方法的典型用法代码示例。如果您正苦于以下问题:Python sklearn.metrics方法的具体用法?Python sklearn.metrics怎么用?Python sklearn.metrics使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在sklearn的用法示例。


在下文中一共展示了sklearn.metrics方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: accuracy_score

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def accuracy_score(y, y_pred):
  """Compute accuracy score

  Computes accuracy score for classification tasks. Works for both
  binary and multiclass classification.

  Parameters
  ----------
  y: np.ndarray
    Of shape `(N_samples,)`
  y_pred: np.ndarray
    Of shape `(N_samples,)`

  Returns
  -------
  score: float
    The fraction of correctly classified samples. A number between 0
    and 1.
  """
  y = _ensure_class_labels(y)
  y_pred = _ensure_class_labels(y_pred)
  return sklearn.metrics.accuracy_score(y, y_pred) 
开发者ID:deepchem,项目名称:deepchem,代码行数:24,代码来源:__init__.py

示例2: calculate_regression_metrics

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def calculate_regression_metrics(trained_sklearn_estimator, x_test, y_test):
    """
    Given a trained estimator, calculate metrics.

    Args:
        trained_sklearn_estimator (sklearn.base.BaseEstimator): a scikit-learn estimator that has been `.fit()`
        y_test (numpy.ndarray): A 1d numpy array of the y_test set (predictions)
        x_test (numpy.ndarray): A 2d numpy array of the x_test set (features)

    Returns:
        dict: A dictionary of metrics objects
    """
    # Get predictions
    predictions = trained_sklearn_estimator.predict(x_test)

    # Calculate individual metrics
    mean_squared_error = skmetrics.mean_squared_error(y_test, predictions)
    mean_absolute_error = skmetrics.mean_absolute_error(y_test, predictions)

    result = {'mean_squared_error': mean_squared_error, 'mean_absolute_error': mean_absolute_error}

    return result 
开发者ID:HealthCatalyst,项目名称:healthcareai-py,代码行数:24,代码来源:model_eval.py

示例3: log

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def log(self, main_keys, metric_keys, values):
        """
        Actually log new values in csv and Progress Saver dict internally.
        Args:
            main_keys:      Main key in which data will be stored. Normally is either 'train' for training metrics or 'val' for validation metrics.
            metric_keys:    Needs to follow the list length of self.progress_saver[main_key(s)]. List of metric keys that are extended with new values.
            values:         Needs to be a list of the same structure as metric_keys. Actual values that are appended.
        """
        if not isinstance(main_keys, list):   main_keys = [main_keys]
        if not isinstance(metric_keys, list): metric_keys = [metric_keys]
        if not isinstance(values, list):      values = [values]

        #Log data to progress saver dict.
        for main_key in main_keys:
            for value, metric_key in zip(values, metric_keys):
                self.progress_saver[main_key][metric_key].append(value)

        #Append data to csv.
        self.csv_loggers[main_key].log(values) 
开发者ID:Confusezius,项目名称:Deep-Metric-Learning-Baselines,代码行数:21,代码来源:auxiliaries.py

示例4: optimize_model

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def optimize_model(task, param_name, test_size: float, binary=False) -> None:
    x, y = task.create_train_data()

    def objective(trial):
        train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=test_size)
        param = redshells.factory.get_optuna_param(param_name, trial)
        model = task.create_model()
        model.set_params(**param)
        model.fit(train_x, train_y)
        predictions = model.predict(test_x)

        if binary:
            predictions = np.rint(predictions)

        return 1.0 - sklearn.metrics.accuracy_score(test_y, predictions)

    study = optuna.create_study()
    study.optimize(objective, n_trials=100)
    task.dump(dict(best_params=study.best_params, best_value=study.best_value)) 
开发者ID:m3dev,项目名称:redshells,代码行数:21,代码来源:utils.py

示例5: compute_perf_metrics

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def compute_perf_metrics(self, per_task=False):
        """Returns the ROC_AUC metrics for each task based on the accumulated predictions. If
        per_task is False, returns the average ROC AUC over tasks.
        
        Args:
            per_task (bool): Whether to return individual ROC AUC scores for each task

        Returns:
            A tuple (roc_auc, std):
                roc_auc: A numpy array of ROC AUC scores, if per_task is True. Otherwise,
                         a float giving the mean ROC AUC score over tasks.

                std:     Placeholder for an array of standard deviations. Always None for this class.

        """
        roc_auc_scores = self.perf_metrics[0]
        if per_task or self.num_tasks == 1:
            return (roc_auc_scores, None)
        else:
            return (roc_auc_scores.mean(), None) 
开发者ID:ATOMconsortium,项目名称:AMPL,代码行数:22,代码来源:perf_data.py

示例6: trash_small_cluster

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def trash_small_cluster(self, **kargs):
        cleancluster.trash_small_cluster(self, **kargs)

    #~ def compute_spike_waveforms_similarity(self, method='cosine_similarity', size_max = 1e7):
        #~ """This compute the similarity spike by spike.
        #~ """
        #~ spike_waveforms_similarity = None
        #~ if self.some_waveforms is not None:
            #~ wf = self.some_waveforms
            #~ wf = wf.reshape(wf.shape[0], -1)
            #~ if wf.size<size_max:
                #~ spike_waveforms_similarity = metrics.compute_similarity(wf, method)
        
        #~ if spike_waveforms_similarity is None:
            #~ self.arrays.detach_array('spike_waveforms_similarity')
            #~ self.spike_waveforms_similarity = None
        #~ else:
            #~ self.arrays.add_array('spike_waveforms_similarity', spike_waveforms_similarity.astype('float32'), self.memory_mode)

        #~ return self.spike_waveforms_similarity 
开发者ID:tridesclous,项目名称:tridesclous,代码行数:22,代码来源:catalogueconstructor.py

示例7: compute_cluster_similarity

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def compute_cluster_similarity(self, method='cosine_similarity_with_max'):
        if self.centroids_median is None:
            self.compute_all_centroid()
        
        #~ t1 = time.perf_counter()
        
        labels = self.cluster_labels
        mask = labels>=0
        
        wfs = self.centroids_median[mask, :,  :]
        wfs = wfs.reshape(wfs.shape[0], -1)
        
        if wfs.size == 0:
            cluster_similarity = None
        else:
            cluster_similarity = metrics.cosine_similarity_with_max(wfs)

        if cluster_similarity is None:
            self.arrays.detach_array('cluster_similarity')
            self.cluster_similarity = None
        else:
            self.arrays.add_array('cluster_similarity', cluster_similarity.astype('float32'), self.memory_mode)

        #~ t2 = time.perf_counter()
        #~ print('compute_cluster_similarity', t2-t1) 
开发者ID:tridesclous,项目名称:tridesclous,代码行数:27,代码来源:catalogueconstructor.py

示例8: compute_spike_silhouette

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def compute_spike_silhouette(self, size_max=1e7):
        #~ t1 = time.perf_counter()
        
        spike_silhouette = None
        #~ wf = self.some_waveforms
        if self.some_peaks_index is not None:
            wf = self.get_some_waveforms(peaks_index=self.some_peaks_index)
            wf = wf.reshape(wf.shape[0], -1)
            labels = self.all_peaks['cluster_label'][self.some_peaks_index]
            if wf.size<size_max:
                spike_silhouette = metrics.compute_silhouette(wf, labels, metric='euclidean')

        if spike_silhouette is None:
            self.arrays.detach_array('spike_silhouette')
            self.spike_silhouette = None
        else:
            self.arrays.add_array('spike_silhouette', spike_silhouette.astype('float32'), self.memory_mode)


        #~ t2 = time.perf_counter()
        #~ print('compute_spike_silhouette', t2-t1) 
开发者ID:tridesclous,项目名称:tridesclous,代码行数:23,代码来源:catalogueconstructor.py

示例9: convert_sklearn_metric_function

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def convert_sklearn_metric_function(scoring):
    """If ``scoring`` is a sklearn metric function, convert it to a
    sklearn scorer and return it. Otherwise, return ``scoring`` unchanged."""
    if callable(scoring):
        module = getattr(scoring, '__module__', None)

        # those are scoring objects returned by make_scorer starting
        # from sklearn 0.22
        scorer_names = ('_PredictScorer', '_ProbaScorer', '_ThresholdScorer')
        if (
                hasattr(module, 'startswith') and
                module.startswith('sklearn.metrics.') and
                not module.startswith('sklearn.metrics.scorer') and
                not module.startswith('sklearn.metrics.tests.') and
                not scoring.__class__.__name__ in scorer_names
        ):
            return make_scorer(scoring)
    return scoring 
开发者ID:skorch-dev,项目名称:skorch,代码行数:20,代码来源:scoring.py

示例10: apply_lens

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def apply_lens(df, lens='pca', dist='euclidean', n_dim=2, **kwargs):
    """
    input: N x F dataframe of observations
    output: N x n_dim image of input data under lens function
    """
    if n_dim != 2:
        raise 'error: image of data set must be two-dimensional'
    if dist not in ['euclidean', 'correlation']:
        raise 'error: only euclidean and correlation distance metrics are supported'
    if lens == 'pca' and dist != 'euclidean':
        raise 'error: PCA requires the use of euclidean distance metric'

    if lens == 'pca':
        df_lens = pd.DataFrame(decomposition.PCA(n_components=n_dim, **kwargs).fit_transform(df), df.index)
    elif lens == 'mds':
        D = metrics.pairwise.pairwise_distances(df, metric=dist)
        df_lens = pd.DataFrame(manifold.MDS(n_components=n_dim, **kwargs).fit_transform(D), df.index)
    elif lens == 'neighbor':
        D = metrics.pairwise.pairwise_distances(df, metric=dist)
        df_lens = pd.DataFrame(manifold.SpectralEmbedding(n_components=n_dim, **kwargs).fit_transform(D), df.index)
    else:
        raise 'error: only PCA, MDS, neighborhood lenses are supported'
    
    return df_lens 
开发者ID:szairis,项目名称:sakmapper,代码行数:26,代码来源:lens.py

示例11: score

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def score(metrics, pred, ref):
    """ Function to score and print custom metrics """
    score_dict = OrderedDict()
    if metrics:
        for metric in metrics:
            if metric == 'pc':
                score_dict[metric] = pearson_correlation(pred,ref)
            elif metric == 'mae':
                score_dict[metric] = mean_absolute_error(pred, ref)
            elif metric == 'mse':
                score_dict[metric] = mean_squared_error(pred, ref)
            elif metric == 'rmse':
                score_dict[metric] = root_mean_squared_error(pred, ref)
            else:
                logger.error('Invalid metric: %s',metric)

    return score_dict 
开发者ID:nusnlp,项目名称:neuqe,代码行数:19,代码来源:metric_utils.py

示例12: __init__

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def __init__(
            self,
            metric_name: str,
            reduce_group: Any = group.WORLD,
            reduce_op: Any = ReduceOp.SUM,
            **kwargs,
    ):
        """
        Args:
            metric_name: the metric name to import and compute from scikit-learn.metrics
            reduce_group: the process group for DDP reduces (only needed for DDP training).
                Defaults to all processes (world)
            reduce_op: the operation to perform during reduction within DDP (only needed for DDP training).
                Defaults to sum.
            **kwargs: additonal keyword arguments (will be forwarded to metric call)
        """
        super().__init__(name=metric_name,
                         reduce_group=reduce_group,
                         reduce_op=reduce_op)

        self.metric_kwargs = kwargs
        lightning_logger.debug(
            f'Metric {self.__class__.__name__} is using Sklearn as backend, meaning that'
            ' every metric call will cause a GPU synchronization, which may slow down your code'
        ) 
开发者ID:PyTorchLightning,项目名称:pytorch-lightning,代码行数:27,代码来源:sklearns.py

示例13: analyze

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def analyze(probas, target):
  """Analyzes predictions and returns results.

  Computes different metrics (specified by `constants.METRICS`) comparing
  predictions to true labels.

  Args:
    probas: `np.array` with predicted probabilities.
    target: `np.array` of `int` with true labels.

  Returns:
    Dictionary of `str` to `float` mapping metric names to the corresponding
      scores.
  """

  results = {}
  for metric_type, sub_metrics in _METRICS.iteritems():
    for metric_name in sub_metrics:
      metric = getattr(metrics, metric_name)

      results[metric_name] = metric(
          target,
          (probas if metric_type == _CONTINUOUS_TYPE
           else probas > _ACCURACY_THRESHOLD))
  return results 
开发者ID:GoogleCloudPlatform,项目名称:professional-services,代码行数:27,代码来源:scoring.py

示例14: compute_metrics_cv

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def compute_metrics_cv(self, X, Y):
        """Compute cross-validated metrics.

        Trains this model on data X with labels Y.

        Returns a MetricList with the name, scoring type, and value for each
        Metric. Note that these values may be numpy floating points, and should
        be converted prior to insertion in a database.

        Parameters
        ----------
        X : numpy array-like or pd.DataFrame
            data
        Y : numpy array-like or pd.DataFrame or pd.DataSeries
            labels
        """

        scorings, scorings_ = self._get_scorings()

        # compute scores
        scores = self.cv_score_mean(X, Y, scorings_)

        # unpack into MetricList
        metric_list = self.scores_to_metriclist(scorings, scores)
        return metric_list 
开发者ID:HDI-Project,项目名称:FeatureHub,代码行数:27,代码来源:model.py

示例15: compute_metrics_train_test

# 需要导入模块: import sklearn [as 别名]
# 或者: from sklearn import metrics [as 别名]
def compute_metrics_train_test(self, X, Y, n):
        """Compute metrics on test set.
        """

        X, Y = Model._format_matrices(X, Y)

        X_train, Y_train = X[:n], Y[:n]
        X_test, Y_test = X[n:], Y[n:]

        scorings, scorings_ = self._get_scorings()

        # Determine binary/multiclass classification
        classes = np.unique(Y)
        params = self._get_params(classes)

        # fit model on entire training set
        self.model.fit(X_train, Y_train)

        scores = {}
        for scoring in scorings_:
            scores[scoring] = self._do_scoring(scoring, params, self.model,
                    X_test, Y_test)

        metric_list = self.scores_to_metriclist(scorings, scores)
        return metric_list 
开发者ID:HDI-Project,项目名称:FeatureHub,代码行数:27,代码来源:model.py


注:本文中的sklearn.metrics方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。