当前位置: 首页>>代码示例>>Python>>正文


Python norm.cdf方法代码示例

本文整理汇总了Python中scipy.stats.distributions.norm.cdf方法的典型用法代码示例。如果您正苦于以下问题:Python norm.cdf方法的具体用法?Python norm.cdf怎么用?Python norm.cdf使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.stats.distributions.norm的用法示例。


在下文中一共展示了norm.cdf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _ecdf

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def _ecdf(x):
    '''no frills empirical cdf used in fdrcorrection
    '''
    nobs = len(x)
    return np.arange(1, nobs + 1) / float(nobs) 
开发者ID:int-brain-lab,项目名称:ibllib,代码行数:7,代码来源:_statsmodels.py

示例2: compare_medians_ms

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def compare_medians_ms(group_1, group_2, axis=None):
    """
    Compares the medians from two independent groups along the given axis.

    The comparison is performed using the McKean-Schrader estimate of the
    standard error of the medians.

    Parameters
    ----------
    group_1 : array_like
        First dataset.  Has to be of size >=7.
    group_2 : array_like
        Second dataset.  Has to be of size >=7.
    axis : int, optional
        Axis along which the medians are estimated. If None, the arrays are
        flattened.  If `axis` is not None, then `group_1` and `group_2`
        should have the same shape.

    Returns
    -------
    compare_medians_ms : {float, ndarray}
        If `axis` is None, then returns a float, otherwise returns a 1-D
        ndarray of floats with a length equal to the length of `group_1`
        along `axis`.

    """
    (med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
    (std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
                      mstats.stde_median(group_2, axis=axis))
    W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
    return 1 - norm.cdf(W) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:33,代码来源:mstats_extras.py

示例3: generate_logistic

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def generate_logistic():

    # Number of clusters
    nclust = 100

    # Regression coefficients
    beta = np.array([1,-2,1], dtype=np.float64)

    ## Covariate correlations
    r = 0.4

    ## Cluster effects of covariates
    rx = 0.5

    ## Within-cluster outcome dependence
    re = 0.3

    p = len(beta)

    OUT = open("gee_logistic_1.csv", "w")

    for i in range(nclust):

        n = np.random.randint(3, 6) # Cluster size

        x = np.random.normal(size=(n,p))
        x = rx*np.random.normal() + np.sqrt(1-rx**2)*x
        x[:,2] = r*x[:,1] + np.sqrt(1-r**2)*x[:,2]
        pr = 1/(1+np.exp(-np.dot(x, beta)))
        z = re*np.random.normal() +\
            np.sqrt(1-re**2)*np.random.normal(size=n)
        u = norm.cdf(z)
        y = 1*(u < pr)

        for j in range(n):
            OUT.write("%d,%d," % (i, y[j]))
            OUT.write(",".join(["%.3f" % b for b in x[j,:]]) + "\n")

    OUT.close() 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:41,代码来源:gee_generate_tests.py

示例4: generate_ordinal

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def generate_ordinal():

    ## Regression coefficients
    beta = np.zeros(5, dtype=np.float64)
    beta[2] = 1
    beta[4] = -1

    rz = 0.5

    OUT = open("gee_ordinal_1.csv", "w")

    for i in range(200):

        n = np.random.randint(3, 6) # Cluster size

        x = np.random.normal(size=(n,5))
        for j in range(5):
            x[:,j] += np.random.normal()
        pr = np.dot(x, beta)
        pr = np.array([1,0,-0.5]) + pr[:,None]
        pr = 1 / (1 + np.exp(-pr))

        z = rz*np.random.normal() +\
            np.sqrt(1-rz**2)*np.random.normal(size=n)
        u = norm.cdf(z)

        y = (u[:,None] > pr).sum(1)

        for j in range(n):
            OUT.write("%d,%d," % (i, y[j]))
            OUT.write(",".join(["%.3f" % b for b in x[j,:]]) + "\n")

    OUT.close() 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:35,代码来源:gee_generate_tests.py

示例5: generate_nominal

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def generate_nominal():

    ## Regression coefficients
    beta1 = np.r_[0.5, 0.5]
    beta2 = np.r_[-1, -0.5]
    p = len(beta1)

    rz = 0.5

    OUT = open("gee_nominal_1.csv", "w")

    for i in range(200):

        n = np.random.randint(3, 6) # Cluster size

        x = np.random.normal(size=(n,p))
        x[:,0] = 1
        for j in range(1,x.shape[1]):
            x[:,j] += np.random.normal()
        pr1 = np.exp(np.dot(x, beta1))[:,None]
        pr2 = np.exp(np.dot(x, beta2))[:,None]
        den = 1 + pr1 + pr2
        pr = np.hstack((pr1/den, pr2/den, 1/den))
        cpr = np.cumsum(pr, 1)

        z = rz*np.random.normal() +\
            np.sqrt(1-rz**2)*np.random.normal(size=n)
        u = norm.cdf(z)

        y = (u[:,None] > cpr).sum(1)

        for j in range(n):
            OUT.write("%d,%d," % (i, y[j]))
            OUT.write(",".join(["%.3f" % b for b in x[j,:]]) + "\n")

    OUT.close() 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:38,代码来源:gee_generate_tests.py

示例6: _ecdf

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def _ecdf(x):
    '''no frills empirical cdf used in fdrcorrection
    '''
    nobs = len(x)
    return np.arange(1,nobs+1)/float(nobs) 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:7,代码来源:multitest.py

示例7: compare_medians_ms

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def compare_medians_ms(group_1, group_2, axis=None):
    """
    Compares the medians from two independent groups along the given axis.

    The comparison is performed using the McKean-Schrader estimate of the
    standard error of the medians.

    Parameters
    ----------
    group_1 : array_like
        First dataset.
    group_2 : array_like
        Second dataset.
    axis : int, optional
        Axis along which the medians are estimated. If None, the arrays are
        flattened.  If `axis` is not None, then `group_1` and `group_2`
        should have the same shape.

    Returns
    -------
    compare_medians_ms : {float, ndarray}
        If `axis` is None, then returns a float, otherwise returns a 1-D
        ndarray of floats with a length equal to the length of `group_1`
        along `axis`.

    """
    (med_1, med_2) = (ma.median(group_1,axis=axis), ma.median(group_2,axis=axis))
    (std_1, std_2) = (mstats.stde_median(group_1, axis=axis),
                      mstats.stde_median(group_2, axis=axis))
    W = np.abs(med_1 - med_2) / ma.sqrt(std_1**2 + std_2**2)
    return 1 - norm.cdf(W) 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:33,代码来源:mstats_extras.py

示例8: get_candidate_window2

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def get_candidate_window2( x, y, repx, repy, threshold):
    # using PHI = 1e6 to prescreen the genome 
    PHI = 1e6
    GAMMA_HAT = 1.0
    tau = numpy.sqrt( y*( repx*x*(PHI+y) + repy*y*(PHI+x))/repx/repy/PHI/x**3)
    gamma = y/x
    z = (numpy.log(gamma)-numpy.log(GAMMA_HAT))*gamma/tau
    pvalue = norm.cdf(-z)
    pre_idx_list = numpy.where(pvalue[10:-10]<threshold)[0]+10
    return numpy.array(pre_idx_list) 
开发者ID:shawnzhangyx,项目名称:PePr,代码行数:12,代码来源:sigTests.py

示例9: hdquantiles_sd

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None):
    """
    The standard error of the Harrell-Davis quantile estimates by jackknife.

    Parameters
    ----------
    data : array_like
        Data array.
    prob : sequence, optional
        Sequence of quantiles to compute.
    axis : int, optional
        Axis along which to compute the quantiles. If None, use a flattened
        array.

    Returns
    -------
    hdquantiles_sd : MaskedArray
        Standard error of the Harrell-Davis quantile estimates.

    See Also
    --------
    hdquantiles

    """
    def _hdsd_1D(data, prob):
        "Computes the std error for 1D arrays."
        xsorted = np.sort(data.compressed())
        n = len(xsorted)

        hdsd = np.empty(len(prob), float_)
        if n < 2:
            hdsd.flat = np.nan

        vv = np.arange(n) / float(n-1)
        betacdf = beta.cdf

        for (i,p) in enumerate(prob):
            _w = betacdf(vv, (n+1)*p, (n+1)*(1-p))
            w = _w[1:] - _w[:-1]
            mx_ = np.fromiter([np.dot(w,xsorted[np.r_[list(range(0,k)),
                                                      list(range(k+1,n))].astype(int_)])
                                  for k in range(n)], dtype=float_)
            mx_var = np.array(mx_.var(), copy=False, ndmin=1) * n / float(n-1)
            hdsd[i] = float(n-1) * np.sqrt(np.diag(mx_var).diagonal() / float(n))
        return hdsd

    # Initialization & checks
    data = ma.array(data, copy=False, dtype=float_)
    p = np.array(prob, copy=False, ndmin=1)
    # Computes quantiles along axis (or globally)
    if (axis is None):
        result = _hdsd_1D(data, p)
    else:
        if data.ndim > 2:
            raise ValueError("Array 'data' must be at most two dimensional, "
                             "but got data.ndim = %d" % data.ndim)
        result = ma.apply_along_axis(_hdsd_1D, axis, data, p)

    return ma.fix_invalid(result, copy=False).ravel() 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:61,代码来源:mstats_extras.py

示例10: mjci

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def mjci(data, prob=[0.25,0.5,0.75], axis=None):
    """
    Returns the Maritz-Jarrett estimators of the standard error of selected
    experimental quantiles of the data.

    Parameters
    ----------
    data : ndarray
        Data array.
    prob : sequence, optional
        Sequence of quantiles to compute.
    axis : int or None, optional
        Axis along which to compute the quantiles. If None, use a flattened
        array.

    """
    def _mjci_1D(data, p):
        data = np.sort(data.compressed())
        n = data.size
        prob = (np.array(p) * n + 0.5).astype(int_)
        betacdf = beta.cdf

        mj = np.empty(len(prob), float_)
        x = np.arange(1,n+1, dtype=float_) / n
        y = x - 1./n
        for (i,m) in enumerate(prob):
            W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m)
            C1 = np.dot(W,data)
            C2 = np.dot(W,data**2)
            mj[i] = np.sqrt(C2 - C1**2)
        return mj

    data = ma.array(data, copy=False)
    if data.ndim > 2:
        raise ValueError("Array 'data' must be at most two dimensional, "
                         "but got data.ndim = %d" % data.ndim)

    p = np.array(prob, copy=False, ndmin=1)
    # Computes quantiles along axis (or globally)
    if (axis is None):
        return _mjci_1D(data, p)
    else:
        return ma.apply_along_axis(_mjci_1D, axis, data, p) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:45,代码来源:mstats_extras.py

示例11: median_cihs

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def median_cihs(data, alpha=0.05, axis=None):
    """
    Computes the alpha-level confidence interval for the median of the data.

    Uses the Hettmasperger-Sheather method.

    Parameters
    ----------
    data : array_like
        Input data. Masked values are discarded. The input should be 1D only,
        or `axis` should be set to None.
    alpha : float, optional
        Confidence level of the intervals.
    axis : int or None, optional
        Axis along which to compute the quantiles. If None, use a flattened
        array.

    Returns
    -------
    median_cihs
        Alpha level confidence interval.

    """
    def _cihs_1D(data, alpha):
        data = np.sort(data.compressed())
        n = len(data)
        alpha = min(alpha, 1-alpha)
        k = int(binom._ppf(alpha/2., n, 0.5))
        gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
        if gk < 1-alpha:
            k -= 1
            gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
        gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5)
        I = (gk - 1 + alpha)/(gk - gkk)
        lambd = (n-k) * I / float(k + (n-2*k)*I)
        lims = (lambd*data[k] + (1-lambd)*data[k-1],
                lambd*data[n-k-1] + (1-lambd)*data[n-k])
        return lims
    data = ma.array(data, copy=False)
    # Computes quantiles along axis (or globally)
    if (axis is None):
        result = _cihs_1D(data, alpha)
    else:
        if data.ndim > 2:
            raise ValueError("Array 'data' must be at most two dimensional, "
                             "but got data.ndim = %d" % data.ndim)
        result = ma.apply_along_axis(_cihs_1D, axis, data, alpha)

    return result 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:51,代码来源:mstats_extras.py

示例12: test_scoretest

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def test_scoretest(self):
        # Regression tests

        np.random.seed(6432)
        n = 200  # Must be divisible by 4
        exog = np.random.normal(size=(n, 4))
        endog = exog[:, 0] + exog[:, 1] + exog[:, 2]
        endog += 3 * np.random.normal(size=n)
        group = np.kron(np.arange(n / 4), np.ones(4))

        # Test under the null.
        L = np.array([[1., -1, 0, 0]])
        R = np.array([0., ])
        family = Gaussian()
        va = Independence()
        mod1 = GEE(endog, exog, group, family=family,
                   cov_struct=va, constraint=(L, R))
        mod1.fit()
        assert_almost_equal(mod1.score_test_results["statistic"],
                            1.08126334)
        assert_almost_equal(mod1.score_test_results["p-value"],
                            0.2984151086)

        # Test under the alternative.
        L = np.array([[1., -1, 0, 0]])
        R = np.array([1.0, ])
        family = Gaussian()
        va = Independence()
        mod2 = GEE(endog, exog, group, family=family,
                   cov_struct=va, constraint=(L, R))
        mod2.fit()
        assert_almost_equal(mod2.score_test_results["statistic"],
                            3.491110965)
        assert_almost_equal(mod2.score_test_results["p-value"],
                            0.0616991659)

        # Compare to Wald tests
        exog = np.random.normal(size=(n, 2))
        L = np.array([[1, -1]])
        R = np.array([0.])
        f = np.r_[1, -1]
        for i in range(10):
            endog = exog[:, 0] + (0.5 + i / 10.) * exog[:, 1] +\
                np.random.normal(size=n)
            family = Gaussian()
            va = Independence()
            mod0 = GEE(endog, exog, group, family=family,
                       cov_struct=va)
            rslt0 = mod0.fit()
            family = Gaussian()
            va = Independence()
            mod1 = GEE(endog, exog, group, family=family,
                       cov_struct=va, constraint=(L, R))
            mod1.fit()
            se = np.sqrt(np.dot(f, np.dot(rslt0.cov_params(), f)))
            wald_z = np.dot(f, rslt0.params) / se
            wald_p = 2 * norm.cdf(-np.abs(wald_z))
            score_p = mod1.score_test_results["p-value"]
            assert_array_less(np.abs(wald_p - score_p), 0.02) 
开发者ID:birforce,项目名称:vnpy_crypto,代码行数:61,代码来源:test_gee.py

示例13: hdquantiles_sd

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def hdquantiles_sd(data, prob=list([.25,.5,.75]), axis=None):
    """
    The standard error of the Harrell-Davis quantile estimates by jackknife.

    Parameters
    ----------
    data : array_like
        Data array.
    prob : sequence
        Sequence of quantiles to compute.
    axis : int
        Axis along which to compute the quantiles. If None, use a flattened
        array.

    Returns
    -------
    hdquantiles_sd : MaskedArray
        Standard error of the Harrell-Davis quantile estimates.

    """
    def _hdsd_1D(data,prob):
        "Computes the std error for 1D arrays."
        xsorted = np.sort(data.compressed())
        n = len(xsorted)
        #.........
        hdsd = np.empty(len(prob), float_)
        if n < 2:
            hdsd.flat = np.nan
        #.........
        vv = np.arange(n) / float(n-1)
        betacdf = beta.cdf
        #
        for (i,p) in enumerate(prob):
            _w = betacdf(vv, (n+1)*p, (n+1)*(1-p))
            w = _w[1:] - _w[:-1]
            mx_ = np.fromiter([np.dot(w,xsorted[np.r_[list(range(0,k)),
                                                      list(range(k+1,n))].astype(int_)])
                                  for k in range(n)], dtype=float_)
            mx_var = np.array(mx_.var(), copy=False, ndmin=1) * n / float(n-1)
            hdsd[i] = float(n-1) * np.sqrt(np.diag(mx_var).diagonal() / float(n))
        return hdsd
    # Initialization & checks ---------
    data = ma.array(data, copy=False, dtype=float_)
    p = np.array(prob, copy=False, ndmin=1)
    # Computes quantiles along axis (or globally)
    if (axis is None):
        result = _hdsd_1D(data, p)
    else:
        if data.ndim > 2:
            raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim)
        result = ma.apply_along_axis(_hdsd_1D, axis, data, p)
    #
    return ma.fix_invalid(result, copy=False).ravel()


#####--------------------------------------------------------------------------
#---- --- Confidence intervals ---
#####-------------------------------------------------------------------------- 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:60,代码来源:mstats_extras.py

示例14: mjci

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def mjci(data, prob=[0.25,0.5,0.75], axis=None):
    """
    Returns the Maritz-Jarrett estimators of the standard error of selected
    experimental quantiles of the data.

    Parameters
    ----------
    data: ndarray
        Data array.
    prob: sequence
        Sequence of quantiles to compute.
    axis : int
        Axis along which to compute the quantiles. If None, use a flattened
        array.

    """
    def _mjci_1D(data, p):
        data = np.sort(data.compressed())
        n = data.size
        prob = (np.array(p) * n + 0.5).astype(int_)
        betacdf = beta.cdf
        #
        mj = np.empty(len(prob), float_)
        x = np.arange(1,n+1, dtype=float_) / n
        y = x - 1./n
        for (i,m) in enumerate(prob):
            (m1,m2) = (m-1, n-m)
            W = betacdf(x,m-1,n-m) - betacdf(y,m-1,n-m)
            C1 = np.dot(W,data)
            C2 = np.dot(W,data**2)
            mj[i] = np.sqrt(C2 - C1**2)
        return mj
    #
    data = ma.array(data, copy=False)
    if data.ndim > 2:
        raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim)
    p = np.array(prob, copy=False, ndmin=1)
    # Computes quantiles along axis (or globally)
    if (axis is None):
        return _mjci_1D(data, p)
    else:
        return ma.apply_along_axis(_mjci_1D, axis, data, p)

#.............................................................................. 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:46,代码来源:mstats_extras.py

示例15: median_cihs

# 需要导入模块: from scipy.stats.distributions import norm [as 别名]
# 或者: from scipy.stats.distributions.norm import cdf [as 别名]
def median_cihs(data, alpha=0.05, axis=None):
    """
    Computes the alpha-level confidence interval for the median of the data.

    Uses the Hettmasperger-Sheather method.

    Parameters
    ----------
    data : array_like
        Input data. Masked values are discarded. The input should be 1D only,
        or `axis` should be set to None.
    alpha : float
        Confidence level of the intervals.
    axis : integer
        Axis along which to compute the quantiles. If None, use a flattened
        array.

    Returns
    -------
    median_cihs :
        Alpha level confidence interval.

    """
    def _cihs_1D(data, alpha):
        data = np.sort(data.compressed())
        n = len(data)
        alpha = min(alpha, 1-alpha)
        k = int(binom._ppf(alpha/2., n, 0.5))
        gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
        if gk < 1-alpha:
            k -= 1
            gk = binom.cdf(n-k,n,0.5) - binom.cdf(k-1,n,0.5)
        gkk = binom.cdf(n-k-1,n,0.5) - binom.cdf(k,n,0.5)
        I = (gk - 1 + alpha)/(gk - gkk)
        lambd = (n-k) * I / float(k + (n-2*k)*I)
        lims = (lambd*data[k] + (1-lambd)*data[k-1],
                lambd*data[n-k-1] + (1-lambd)*data[n-k])
        return lims
    data = ma.rray(data, copy=False)
    # Computes quantiles along axis (or globally)
    if (axis is None):
        result = _cihs_1D(data.compressed(), alpha)
    else:
        if data.ndim > 2:
            raise ValueError("Array 'data' must be at most two dimensional, but got data.ndim = %d" % data.ndim)
        result = ma.apply_along_axis(_cihs_1D, axis, data, alpha)
    #
    return result

#.............................................................................. 
开发者ID:ktraunmueller,项目名称:Computable,代码行数:52,代码来源:mstats_extras.py


注:本文中的scipy.stats.distributions.norm.cdf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。