當前位置: 首頁>>代碼示例>>Python>>正文


Python stats.entropy方法代碼示例

本文整理匯總了Python中scipy.stats.entropy方法的典型用法代碼示例。如果您正苦於以下問題:Python stats.entropy方法的具體用法?Python stats.entropy怎麽用?Python stats.entropy使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.stats的用法示例。


在下文中一共展示了stats.entropy方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_consensus_entropy

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def test_consensus_entropy(self):
        for n_samples in range(1, 10):
            for n_classes in range(2, 10):
                for true_query_idx in range(n_samples):
                    # 1. fitted committee
                    proba = np.zeros(shape=(n_samples, n_classes))
                    proba[:, 0] = 1.0
                    proba[true_query_idx] = np.ones(n_classes)/n_classes
                    committee = mock.MockCommittee(predict_proba_return=proba)
                    consensus_entropy = modAL.disagreement.consensus_entropy(
                        committee, np.random.rand(n_samples, n_classes)
                    )
                    true_entropy = np.zeros(shape=(n_samples,))
                    true_entropy[true_query_idx] = entropy(np.ones(n_classes) / n_classes)
                    np.testing.assert_array_almost_equal(consensus_entropy, true_entropy)

                    # 2. unfitted committee
                    committee = mock.MockCommittee(fitted=False)
                    true_entropy = np.zeros(shape=(n_samples,))
                    consensus_entropy = modAL.disagreement.consensus_entropy(
                        committee, np.random.rand(n_samples, n_classes)
                    )
                    np.testing.assert_almost_equal(consensus_entropy, true_entropy) 
開發者ID:modAL-python,項目名稱:modAL,代碼行數:25,代碼來源:core_tests.py

示例2: text_entropy

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def text_entropy(text):
    """ Computes entropy for a given text based on UTF8 alphabet.

    Args:
        text: string to compute the text entropy for

    Returns:
        text_entropy: text entropy of the given string
    """
    # we only consider UTF8 characters to compute the text entropy
    pk = [text.count(chr(i)) for i in range(256)]
    if sum(pk) == 0:
        text_entropy = None
    else:
        text_entropy = entropy(pk, base=2)
    return text_entropy 
開發者ID:gotec,項目名稱:git2net,代碼行數:18,代碼來源:extraction.py

示例3: paga_expression_entropies

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def paga_expression_entropies(adata) -> List[float]:
    """Compute the median expression entropy for each node-group.

    Parameters
    ----------
    adata : AnnData
        Annotated data matrix.

    Returns
    -------
    Entropies of median expressions for each node.
    """
    from scipy.stats import entropy
    groups_order, groups_masks = _utils.select_groups(
        adata, key=adata.uns['paga']['groups']
    )
    entropies = []
    for mask in groups_masks:
        X_mask = adata.X[mask].todense()
        x_median = np.nanmedian(X_mask, axis=1,overwrite_input=True)
        x_probs = (x_median - np.nanmin(x_median)) / (np.nanmax(x_median) - np.nanmin(x_median))
        entropies.append(entropy(x_probs))
    return entropies 
開發者ID:theislab,項目名稱:scanpy,代碼行數:25,代碼來源:_paga.py

示例4: _nll_stats

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def _nll_stats(self, sampled_nlls, validation_nlls, training_nlls):
        self._add_histogram("nll_plot/sampled", sampled_nlls)
        self._add_histogram("nll_plot/validation", validation_nlls)
        self._add_histogram("nll_plot/training", training_nlls)

        self._add_scalars("nll/avg", {
            "sampled": sampled_nlls.mean(),
            "validation": validation_nlls.mean(),
            "training": training_nlls.mean()
        })

        self._add_scalars("nll/var", {
            "sampled": sampled_nlls.var(),
            "validation": validation_nlls.var(),
            "training": training_nlls.var()
        })

        def jsd(dists):
            min_size = min(len(dist) for dist in dists)
            dists = [dist[:min_size] for dist in dists]
            num_dists = len(dists)
            avg_dist = np.sum(dists, axis=0) / num_dists
            return np.sum([sps.entropy(dist, avg_dist) for dist in dists]) / num_dists

        self._add_scalar("nll_plot/jsd_joined", jsd([sampled_nlls, training_nlls, validation_nlls])) 
開發者ID:undeadpixel,項目名稱:reinvent-randomized,代碼行數:27,代碼來源:actions.py

示例5: test_genextreme_entropy

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def test_genextreme_entropy():
    # regression test for gh-5181
    euler_gamma = 0.5772156649015329

    h = stats.genextreme.entropy(-1.0)
    assert_allclose(h, 2*euler_gamma + 1, rtol=1e-14)

    h = stats.genextreme.entropy(0)
    assert_allclose(h, euler_gamma + 1, rtol=1e-14)

    h = stats.genextreme.entropy(1.0)
    assert_equal(h, 1)

    h = stats.genextreme.entropy(-2.0, scale=10)
    assert_allclose(h, euler_gamma*3 + np.log(10) + 1, rtol=1e-14)

    h = stats.genextreme.entropy(10)
    assert_allclose(h, -9*euler_gamma + 1, rtol=1e-14)

    h = stats.genextreme.entropy(-10)
    assert_allclose(h, 11*euler_gamma + 1, rtol=1e-14) 
開發者ID:Relph1119,項目名稱:GraphicDesignPatternByPython,代碼行數:23,代碼來源:test_distributions.py

示例6: select

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def select(self, features, freq_table):
        """ Select features via some criteria

        :type features: dict
        :param features: features vocab

        :type freq_table: 2-D numpy.array
        :param freq_table: frequency table with rows as features,
                          columns as frequency values
        """
        if self.method == 'frequency':
            feat_vals = self.frequency(features, freq_table)
        elif self.method == 'entropy':
            feat_vals = self.entropy(features, freq_table)
        elif self.method == 'freq-entropy':
            feat_vals = self.freq_entropy(features, freq_table)
        else:
            raise KeyError("Unrecognized method")
        new_features = self.rank(feat_vals)
        return new_features 
開發者ID:yizhongw,項目名稱:StageDP,代碼行數:22,代碼來源:selection.py

示例7: _run

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def _run(self):
        """
        Run qGAN training

        Returns:
            dict: with generator(discriminator) parameters & loss, relative entropy
        Raises:
            AquaError: invalid backend
        """
        if self._quantum_instance.backend_name == ('unitary_simulator' or 'clifford_simulator'):
            raise AquaError(
                'Chosen backend not supported - '
                'Set backend either to statevector_simulator, qasm_simulator'
                ' or actual quantum hardware')
        self.train()

        return self._ret 
開發者ID:Qiskit,項目名稱:qiskit-aqua,代碼行數:19,代碼來源:qgan.py

示例8: consensus_entropy

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def consensus_entropy(committee: BaseCommittee, X: modALinput, **predict_proba_kwargs) -> np.ndarray:
    """
    Calculates the consensus entropy for the Committee. First it computes the class probabilties of X for each learner
    in the Committee, then calculates the consensus probability distribution by averaging the individual class
    probabilities for each learner. The entropy of the consensus probability distribution is the vote entropy of the
    Committee, which is returned.

    Args:
        committee: The :class:`modAL.models.BaseCommittee` instance for which the consensus entropy is to be calculated.
        X: The data for which the consensus entropy is to be calculated.
        **predict_proba_kwargs: Keyword arguments for the :meth:`predict_proba` of the Committee.

    Returns:
        Consensus entropy of the Committee for the samples in X.
    """
    try:
        proba = committee.predict_proba(X, **predict_proba_kwargs)
    except NotFittedError:
        return np.zeros(shape=(X.shape[0],))

    entr = np.transpose(entropy(np.transpose(proba)))
    return entr 
開發者ID:modAL-python,項目名稱:modAL,代碼行數:24,代碼來源:disagreement.py

示例9: classifier_entropy

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def classifier_entropy(classifier: BaseEstimator, X: modALinput, **predict_proba_kwargs) -> np.ndarray:
    """
    Entropy of predictions of the for the provided samples.

    Args:
        classifier: The classifier for which the prediction entropy is to be measured.
        X: The samples for which the prediction entropy is to be measured.
        **predict_proba_kwargs: Keyword arguments to be passed for the :meth:`predict_proba` of the classifier.

    Returns:
        Entropy of the class probabilities.
    """
    try:
        classwise_uncertainty = classifier.predict_proba(X, **predict_proba_kwargs)
    except NotFittedError:
        return np.zeros(shape=(X.shape[0], ))

    return np.transpose(entropy(np.transpose(classwise_uncertainty))) 
開發者ID:modAL-python,項目名稱:modAL,代碼行數:20,代碼來源:uncertainty.py

示例10: test_vote_entropy

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def test_vote_entropy(self):
        for n_samples in range(1, 10):
            for n_classes in range(1, 10):
                for true_query_idx in range(n_samples):
                    # 1. fitted committee
                    vote_return = np.zeros(shape=(n_samples, n_classes), dtype=np.int16)
                    vote_return[true_query_idx] = np.asarray(range(n_classes), dtype=np.int16)
                    committee = mock.MockCommittee(classes_=np.asarray(range(n_classes)), vote_return=vote_return)
                    vote_entr = modAL.disagreement.vote_entropy(
                        committee, np.random.rand(n_samples, n_classes)
                    )
                    true_entropy = np.zeros(shape=(n_samples, ))
                    true_entropy[true_query_idx] = entropy(np.ones(n_classes)/n_classes)
                    np.testing.assert_array_almost_equal(vote_entr, true_entropy)

                    # 2. unfitted committee
                    committee = mock.MockCommittee(fitted=False)
                    true_entropy = np.zeros(shape=(n_samples,))
                    vote_entr = modAL.disagreement.vote_entropy(
                        committee, np.random.rand(n_samples, n_classes)
                    )
                    np.testing.assert_almost_equal(vote_entr, true_entropy) 
開發者ID:modAL-python,項目名稱:modAL,代碼行數:24,代碼來源:core_tests.py

示例11: jensen_shannon_divergence

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def jensen_shannon_divergence(P, Q):
    if np.any(P < 0) or np.any(Q < 0):
        raise ValueError('Negative values.')
    if len(P) != len(Q):
        raise ValueError('Non equal size.')

    P_ = P / np.sum(P)  # Ensure probabilities.
    Q_ = Q / np.sum(Q)

    e1 = entropy(P_, base=2)
    e2 = entropy(Q_, base=2)
    e_sum = entropy((P_ + Q_) / 2.0, base=2)
    res = e_sum - ((e1 + e2) / 2.0)

    res2 = _jsdiv(P_, Q_)

    if not np.allclose(res, res2, atol=10e-5, rtol=0):
        warnings.warn('Numerical values of two JSD methods don\'t agree.')

    return res 
開發者ID:stevenygd,項目名稱:PointFlow,代碼行數:22,代碼來源:evaluation_metrics.py

示例12: js_divergence

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def js_divergence(P, Q):
    """Jensen-Shannon divergence between `P` and `Q`.

    Parameters
    ----------

    P, Q (np.ndarray)
        Two discrete distributions represented as 1D arrays. They are
        assumed to have the same support

    Returns
    -------

    float
        The Jensen-Shannon divergence between `P` and `Q`.

    """
    M = 0.5 * (P + Q)
    return 0.5 * (sp_entropy(P, M, base=2) + sp_entropy(Q, M, base=2)) 
開發者ID:netsiphd,項目名稱:netrd,代碼行數:21,代碼來源:entropy.py

示例13: _get_optimal_thresholds

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def _get_optimal_thresholds(nd_dict, num_bins=8001, num_quantized_bins=255, logger=None):
    """Given a ndarray dict, find the optimal threshold for quantizing each value of the key."""
    if stats is None:
        raise ImportError('scipy.stats is required for running entropy mode of calculating'
                          ' the optimal thresholds for quantizing FP32 ndarrays into int8.'
                          ' Please check if the scipy python bindings are installed.')
    assert isinstance(nd_dict, dict)
    if logger is not None:
        logger.info('Calculating optimal thresholds for quantization using KL divergence'
                    ' with num_bins=%d and num_quantized_bins=%d' % (num_bins, num_quantized_bins))
    th_dict = {}
    # copy nd_dict keys since the keys() only returns a view in python3
    layer_names = list(nd_dict.keys())
    for name in layer_names:
        assert name in nd_dict
        min_val, max_val, min_divergence, opt_th = \
            _get_optimal_threshold(nd_dict[name], num_bins=num_bins,
                                   num_quantized_bins=num_quantized_bins)
        del nd_dict[name]  # release the memory of ndarray
        if min_val < 0:
            th_dict[name] = (-opt_th, opt_th)
        else:
            th_dict[name] = (0, opt_th)
        if logger is not None:
            logger.info('layer=%s, min_val=%f, max_val=%f, min_divergence=%f, optimal_threshold=%f'
                        % (name, min_val, max_val, min_divergence, opt_th))
    return th_dict 
開發者ID:awslabs,項目名稱:dynamic-training-with-apache-mxnet-on-aws,代碼行數:29,代碼來源:quantization.py

示例14: get_entropy

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def get_entropy(self, vbcodeSeries):
        """
        Helper function to return entropy calculation value
        :param vbcodeSeries: pandas series of values
        :return: entropy of the set of values.
        """
        probs = vbcodeSeries.value_counts() / len(vbcodeSeries)
        entropy = stats.entropy(probs)
        return entropy 
開發者ID:egaus,項目名稱:MaliciousMacroBot,代碼行數:11,代碼來源:mmbot.py

示例15: entroy_test

# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import entropy [as 別名]
def entroy_test(v1,v2):#original metric: the smaller the more similar
    result = stats.entropy(v1,v2)
    result = stats.wilcoxon(v1, v2).statistic
    if result != result:
        result = 0
    return result 
開發者ID:RoyZhengGao,項目名稱:edge2vec,代碼行數:8,代碼來源:transition.py


注:本文中的scipy.stats.entropy方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。