當前位置: 首頁>>代碼示例>>Python>>正文


Python special.psi方法代碼示例

本文整理匯總了Python中scipy.special.psi方法的典型用法代碼示例。如果您正苦於以下問題:Python special.psi方法的具體用法?Python special.psi怎麽用?Python special.psi使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在scipy.special的用法示例。


在下文中一共展示了special.psi方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: knn_entropy

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def knn_entropy(*args, k=None):
    """Entropy calculation

    Parameters
    ----------
    args : numpy.ndarray, shape = (n_samples, ) or (n_samples, n_dims)
        Data of which to calculate entropy. Each array must have the same
        number of samples.
    k : int
        Number of bins.

    Returns
    -------
    entropy : float
    """
    data = vstack((args)).T
    n_samples, n_dims = data.shape
    k = k if k else max(3, int(n_samples * 0.01))

    nneighbor = nearest_distances(data, k=k)
    const = psi(n_samples) - psi(k) + n_dims * log(2.)

    return (const + n_dims * log(nneighbor).mean()) 
開發者ID:msmbuilder,項目名稱:mdentropy,代碼行數:25,代碼來源:entropy.py

示例2: grassberger

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def grassberger(counts):
    """Entropy calculation using Grassberger correction.
    doi:10.1016/0375-9601(88)90193-4

    Parameters
    ----------
    counts : list
        bin counts

    Returns
    -------
    entropy : float
    """
    n_samples = npsum(counts)
    return npsum(counts * (log(n_samples) -
                           nan_to_num(psi(counts)) -
                           ((-1.) ** counts / (counts + 1.)))) / n_samples 
開發者ID:msmbuilder,項目名稱:mdentropy,代碼行數:19,代碼來源:entropy.py

示例3: entropy

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def entropy(self, alpha):
        """
        Compute the differential entropy of the dirichlet distribution.

        Parameters
        ----------
        %(_dirichlet_doc_default_callparams)s

        Returns
        -------
        h : scalar
            Entropy of the Dirichlet distribution

        """

        alpha = _dirichlet_check_parameters(alpha)

        alpha0 = np.sum(alpha)
        lnB = _lnB(alpha)
        K = alpha.shape[0]

        out = lnB + (alpha0 - K) * scipy.special.psi(alpha0) - np.sum(
            (alpha - 1) * scipy.special.psi(alpha))
        return _squeeze_output(out) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:26,代碼來源:_multivariate.py

示例4: update_expectations

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def update_expectations(self):
        """
        Since we're doing lazy updates on lambda, at any given moment
        the current state of lambda may not be accurate. This function
        updates all of the elements of lambda and Elogbeta
        so that if (for example) we want to print out the
        topics we've learned we'll get the correct behavior.
        """
        for w in xrange(self.m_W):
            self.m_lambda[:, w] *= np.exp(self.m_r[-1] -
                                          self.m_r[self.m_timestamp[w]])
        self.m_Elogbeta = sp.psi(self.m_eta + self.m_lambda) - \
            sp.psi(self.m_W * self.m_eta + self.m_lambda_sum[:, np.newaxis])

        self.m_timestamp[:] = self.m_updatect
        self.m_status_up_to_date = True 
開發者ID:largelymfs,項目名稱:topical_word_embeddings,代碼行數:18,代碼來源:hdpmodel.py

示例5: test_polygamma

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def test_polygamma(self):
        poly2 = special.polygamma(2,1)
        poly3 = special.polygamma(3,1)
        assert_almost_equal(poly2,-2.4041138063,10)
        assert_almost_equal(poly3,6.4939394023,10)

        # Test polygamma(0, x) == psi(x)
        x = [2, 3, 1.1e14]
        assert_almost_equal(special.polygamma(0, x), special.psi(x))

        # Test broadcasting
        n = [0, 1, 2]
        x = [0.5, 1.5, 2.5]
        expected = [-1.9635100260214238, 0.93480220054467933,
                    -0.23620405164172739]
        assert_almost_equal(special.polygamma(n, x), expected)
        expected = np.row_stack([expected]*2)
        assert_almost_equal(special.polygamma(n, np.row_stack([x]*2)),
                            expected)
        assert_almost_equal(special.polygamma(np.row_stack([n]*2), x),
                            expected) 
開發者ID:ktraunmueller,項目名稱:Computable,代碼行數:23,代碼來源:test_basic.py

示例6: expected_log_p

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def expected_log_p(self):
        """
        Compute the expected log probability of a connection, averaging over c
        :return:
        """
        if self.fixed:
            E_ln_p = np.log(self.P)
        else:
            E_ln_p = np.zeros((self.K, self.K))
            for c1 in range(self.C):
                for c2 in range(self.C):
                    # Get the KxK matrix of joint class assignment probabilities
                    pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

                    # Get the probability of a connection for this pair of classes
                    E_ln_p += pc1c2 * (psi(self.mf_tau1[c1,c2])
                                       - psi(self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2]))

        if not self.allow_self_connections:
            np.fill_diagonal(E_ln_p, -np.inf)

        return E_ln_p 
開發者ID:slinderman,項目名稱:pyhawkes,代碼行數:24,代碼來源:network.py

示例7: expected_log_notp

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def expected_log_notp(self):
        """
        Compute the expected log probability of NO connection, averaging over c
        :return:
        """
        if self.fixed:
            E_ln_notp = np.log(1.0 - self.P)
        else:
            E_ln_notp = np.zeros((self.K, self.K))
            for c1 in range(self.C):
                for c2 in range(self.C):
                    # Get the KxK matrix of joint class assignment probabilities
                    pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

                    # Get the probability of a connection for this pair of classes
                    E_ln_notp += pc1c2 * (psi(self.mf_tau0[c1,c2])
                                          - psi(self.mf_tau0[c1,c2] + self.mf_tau1[c1,c2]))

        if not self.allow_self_connections:
            np.fill_diagonal(E_ln_notp, 0.0)

        return E_ln_notp 
開發者ID:slinderman,項目名稱:pyhawkes,代碼行數:24,代碼來源:network.py

示例8: expected_log_v

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def expected_log_v(self):
        """
        Compute the expected log scale of a connection, averaging over c
        :return:
        """
        if self.fixed:
            return np.log(self.V)

        E_log_v = np.zeros((self.K, self.K))
        for c1 in range(self.C):
            for c2 in range(self.C):
                # Get the KxK matrix of joint class assignment probabilities
                pc1c2 = self.mf_m[:,c1][:, None] * self.mf_m[:,c2][None, :]

                # Get the probability of a connection for this pair of classes
                E_log_v += pc1c2 * (psi(self.mf_alpha[c1,c2])
                                    - np.log(self.mf_beta[c1,c2]))
        return E_log_v 
開發者ID:slinderman,項目名稱:pyhawkes,代碼行數:20,代碼來源:network.py

示例9: weighted_log_likelihood

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def weighted_log_likelihood(v_hat, m, n, reads, diff_test):
    '''This function returns the derivative of the log likelihood
       of current and adjacent windows using a triangular weight.'''
    equation = 0
    n_window = len(reads)
    baseline = numpy.mean(reads[int(n_window/2),0:m])
    for idx in range(n_window):
        x = reads[idx, 0:m]  # x/m refer the test sample
        y = reads[idx, m:(m+n)]  # y/n refer to the control sample
        weight_x = numpy.mean(x)/baseline
        weight_y = numpy.mean(y)/baseline
        if n == 1: 
            weight_y = 0
        log_likelihood = (-(m*weight_x+n*weight_y)*psi(v_hat) +
                numpy.sum(psi(v_hat+x))*weight_x + 
                numpy.sum(psi(v_hat+y))*weight_y + 
                m*numpy.log(v_hat/(v_hat+numpy.mean(x)))*weight_x + 
                n*numpy.log(v_hat/(v_hat+numpy.mean(y)))*weight_y)
        equation = (equation + 
                log_likelihood*(1-(abs(float(n_window)/2-idx-0.5)/(float(n_window)/2+1))))
    return equation 
開發者ID:shawnzhangyx,項目名稱:PePr,代碼行數:23,代碼來源:sigTests.py

示例10: normalized_entropy

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def normalized_entropy(x, tx, m=2):
    x = normalize(x, tx)
    try:
        cx = Counter(x)
    except TypeError:
        cx = Counter(x.flat)

    if len(cx) < 2:
        return 0
    xk = np.array(list(cx.keys()), dtype=float)
    xk.sort()
    delta = (xk[1:] - xk[:-1]) / m
    counter = np.array([cx[i] for i in xk], dtype=float)
    hx = np.sum(counter[1:] * np.log(delta / counter[1:])) / len(x)
    hx += (psi(len(delta)) - np.log(len(delta)))
    hx += np.log(len(x))
    hx -= (psi(m) - np.log(m))
    return hx 
開發者ID:FenTechSolutions,項目名稱:CausalDiscoveryToolbox,代碼行數:20,代碼來源:features.py

示例11: uniform_divergence

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def uniform_divergence(x, tx, m=2):
    x = normalize(x, tx)
    try:
        cx = Counter(x)
    except TypeError:
        cx = Counter(x.flat)
    xk = np.array(list(cx.keys()), dtype=float)
    xk.sort()
    delta = np.zeros(len(xk))
    if len(xk) > 1:
        delta[0] = xk[1] - xk[0]
        delta[1:-1] = (xk[m:] - xk[:-m]) / m
        delta[-1] = xk[-1] - xk[-2]
    else:
        delta = np.array(np.sqrt(12))
    counter = np.array([cx[i] for i in xk], dtype=float)
    delta = delta / np.sum(delta)
    hx = np.sum(counter * np.log(counter / delta)) / len(x)
    hx -= np.log(len(x))
    hx += (psi(m) - np.log(m))
    return hx 
開發者ID:FenTechSolutions,項目名稱:CausalDiscoveryToolbox,代碼行數:23,代碼來源:features.py

示例12: chaowangjost

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def chaowangjost(counts):
    """Entropy calculation using Chao, Wang, Jost correction.
    doi: 10.1111/2041-210X.12108

    Parameters
    ----------
    counts : list
        bin counts

    Returns
    -------
    entropy : float
    """
    n_samples = npsum(counts)
    bcbc = bincount(counts.astype(int))
    if len(bcbc) < 3:
        return grassberger(counts)
    if bcbc[2] == 0:
        if bcbc[1] == 0:
            A = 1.
        else:
            A = 2. / ((n_samples - 1.) * (bcbc[1] - 1.) + 2.)
    else:
        A = 2. * bcbc[2] / ((n_samples - 1.) * (bcbc[1] - 1.) +
                            2. * bcbc[2])
    pr = arange(1, int(n_samples))
    pr = 1. / pr * (1. - A) ** pr
    entropy = npsum(counts / n_samples * (psi(n_samples) -
                    nan_to_num(psi(counts))))

    if bcbc[1] > 0 and A != 1.:
        entropy += nan_to_num(bcbc[1] / n_samples *
                              (1 - A) ** (1 - n_samples *
                                          (-log(A) - npsum(pr))))
    return entropy 
開發者ID:msmbuilder,項目名稱:mdentropy,代碼行數:37,代碼來源:entropy.py

示例13: knn_mutinf

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def knn_mutinf(x, y, k=None, boxsize=None):
    """k-NN mutual information calculation

    Parameters
    ----------
    x : array_like, shape = (n_samples, n_dim)
        Independent variable
    y : array_like, shape = (n_samples, n_dim)
        Independent variable
    k : int
        Number of bins.
    boxsize : float (or None)
        Wrap space between [0., boxsize)

    Returns
    -------
    mi : float
    """
    data = hstack((x, y))

    k = k if k else max(3, int(data.shape[0] * 0.01))

    # Find nearest neighbors in joint space, p=inf means max-norm
    dvec = nearest_distances(data, k=k)
    a, b, c, d = (avgdigamma(atleast_2d(x).reshape(data.shape[0], -1), dvec),
                  avgdigamma(atleast_2d(y).reshape(data.shape[0], -1), dvec),
                  psi(k), psi(data.shape[0]))
    return max((-a - b + c + d), 0.) 
開發者ID:msmbuilder,項目名稱:mdentropy,代碼行數:30,代碼來源:information.py

示例14: knn_cmutinf

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def knn_cmutinf(x, y, z, k=None, boxsize=None):
    """Entropy calculation

    Parameters
    ----------
    x : array_like, shape = (n_samples, n_dim)
        Conditioned variable
    y : array_like, shape = (n_samples, n_dim)
        Conditioned variable
    z : array_like, shape = (n_samples, n_dim)
        Conditional variable
    k : int
        Number of bins.
    boxsize : float (or None)
        Wrap space between [0., boxsize)

    Returns
    -------
    cmi : float
    """
    data = hstack((x, y, z))

    k = k if k else max(3, int(data.shape[0] * 0.01))

    # Find nearest neighbors in joint space, p=inf means max-norm
    dvec = nearest_distances(data, k=k)
    a, b, c, d = (avgdigamma(hstack((x, z)), dvec),
                  avgdigamma(hstack((y, z)), dvec),
                  avgdigamma(atleast_2d(z).reshape(data.shape[0], -1), dvec),
                  psi(k))
    return max((-a - b + c + d), 0.) 
開發者ID:msmbuilder,項目名稱:mdentropy,代碼行數:33,代碼來源:information.py

示例15: _beta_mle_a

# 需要導入模塊: from scipy import special [as 別名]
# 或者: from scipy.special import psi [as 別名]
def _beta_mle_a(a, b, n, s1):
    # The zeros of this function give the MLE for `a`, with
    # `b`, `n` and `s1` given.  `s1` is the sum of the logs of
    # the data. `n` is the number of data points.
    psiab = sc.psi(a + b)
    func = s1 - n * (-psiab + sc.psi(a))
    return func 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:9,代碼來源:_continuous_distns.py


注:本文中的scipy.special.psi方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。