当前位置: 首页>>代码示例>>Python>>正文


Python t.ppf方法代码示例

本文整理汇总了Python中scipy.stats.t.ppf方法的典型用法代码示例。如果您正苦于以下问题:Python t.ppf方法的具体用法?Python t.ppf怎么用?Python t.ppf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在scipy.stats.t的用法示例。


在下文中一共展示了t.ppf方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _t_value

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def _t_value(self):
        r"""
        Returns the critical t-statistic given the input alpha-level (defaults to 0.05).

        Returns
        -------
        tval : float
            The critical t-value for using in computing the Least Significant Difference.

        Notes
        -----
        Scipy's :code:`t.ppf` method is used to compute the critical t-value.

        """
        tval = t.ppf(1 - self.alpha / 2, self.n - self.k)

        return tval 
开发者ID:aschleg,项目名称:hypothetical,代码行数:19,代码来源:nonparametric.py

示例2: _t

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def _t(u, rho, nu):
    d = u.shape[1]
    nu = float(nu)
    
    try:
        R = cholesky(rho)
    except LinAlgError:
        raise ValueError('Provided Rho matrix is not Positive Definite!')
    
    ticdf = t.ppf(u, nu)
    
    z = solve(R,ticdf.T)
    z = z.T
    logSqrtDetRho = np.sum(np.log(np.diag(R)))
    const = gammaln((nu+d)/2.0) + (d-1)*gammaln(nu/2.0) - d*gammaln((nu+1)/2.0) - logSqrtDetRho
    sq = np.power(z,2)
    summer = np.sum(np.power(z,2),axis=1)
    numer = -((nu+d)/2.0) * np.log(1.0 + np.sum(np.power(z,2),axis=1)/nu)
    denom = np.sum(-((nu+1)/2) * np.log(1 + (np.power(ticdf,2))/nu), axis=1)
    y = np.exp(const + numer - denom)
    
    return y 
开发者ID:stochasticresearch,项目名称:copula-py,代码行数:24,代码来源:copulapdf.py

示例3: isThresholdSimple

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def isThresholdSimple(N,M,p,c,l,myRho):
    mu = getOptimalMeanShift(c,p,l,myRho)
    theta = np.zeros(M)
    cgf = np.zeros(M)
    qZ = np.zeros([M,N])
    e = np.random.normal(0,1,[M,N])
    G = np.transpose(np.tile(np.random.normal(mu,1,M),(N,1)))
    num = (norm.ppf(p)*np.ones((M,1)))-np.sqrt(myRho)*G
    pZ = norm.cdf(np.divide(num,np.sqrt(1-myRho)))
    for n in range(0,M):
        theta[n] = vc.getSaddlePoint(pZ[n,:],c,l,0.0)
        qZ[n,:] = getQ(theta[n],c,pZ[n,:])
        cgf[n] = vc.computeCGF(theta[n],pZ[n,:],c)
    I = np.transpose(1*np.less(e,norm.ppf(qZ)))
    L = np.dot(c,I)
    rn = np.exp(-mu*G[:,0]+0.5*(mu**2))*computeRND(theta,L,cgf)
    tailProb = np.mean(np.multiply(L>l,rn)) 
    eShortfall =  np.mean(np.multiply(L*(L>l),rn))/tailProb        
    return tailProb,eShortfall 
开发者ID:djbolder,项目名称:credit-risk-modelling,代码行数:21,代码来源:varianceReduction.py

示例4: mcThresholdTDecomposition

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def mcThresholdTDecomposition(N,M,S,p,c,rho,nu,isT,myAlpha):
    contributions = np.zeros([N,S,2])
    var = np.zeros(S)
    es = np.zeros(S)
    K = myT.ppf(p,nu)*np.ones((M,1))        
    for s in range(0,S):
        print("Iteration: %d" % (s+1))
        Y = th.getY(N,M,p,rho,nu,isT)
        myD = 1*np.less(Y,K)     
        myLoss = np.sort(np.dot(myD,c),axis=None)
        el,ul,var[s],es[s]=util.computeRiskMeasures(M,myLoss,np.array([myAlpha]))
        varVector = c*myD[np.dot(myD,c)==var[s],:]
        esVector = c*myD[np.dot(myD,c)>=var[s],:]
        contributions[:,s,0] = np.sum(varVector,0)/varVector.shape[0]
        contributions[:,s,1] = np.sum(esVector,0)/esVector.shape[0]
    return contributions,var,es 
开发者ID:djbolder,项目名称:credit-risk-modelling,代码行数:18,代码来源:varContributions.py

示例5: mcThresholdGDecomposition

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def mcThresholdGDecomposition(N,M,S,p,c,rho,nu,isT,myAlpha):
    contributions = np.zeros([N,S,2])
    var = np.zeros(S)
    es = np.zeros(S)
    K = norm.ppf(p)*np.ones((M,1))        
    for s in range(0,S):
        print("Iteration: %d" % (s+1))
        Y = th.getY(N,M,p,rho,nu,isT)
        myD = 1*np.less(Y,K)     
        myLoss = np.sort(np.dot(myD,c),axis=None)
        el,ul,var[s],es[s]=util.computeRiskMeasures(M,myLoss,np.array([myAlpha]))
        varVector = c*myD[np.dot(myD,c)==var[s],:]
        esVector = c*myD[np.dot(myD,c)>=var[s],:]
        contributions[:,s,0] = np.sum(varVector,0)/varVector.shape[0]
        contributions[:,s,1] = np.sum(esVector,0)/esVector.shape[0]
    return contributions,var,es 
开发者ID:djbolder,项目名称:credit-risk-modelling,代码行数:18,代码来源:varContributions.py

示例6: getPy

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def getPy(p,y,p1,p2,whichModel,v=0):
    if whichModel==0: # Gaussian threshold
        return th.computeP(p,p1,y)
    elif whichModel==1: # beta
        return y*np.ones(len(p))
    elif whichModel==2: # CreditRisk+
        v = p*(1-p1+p1*y)
        return np.maximum(np.minimum(1-np.exp(-v),0.999),0.0001)
    elif whichModel==3: # logit
        return np.reciprocal(1+np.exp(-(p1+p2*y)))
    elif whichModel==4: # probit
        return norm.ppf(p1+p2*y)    
    elif whichModel==5: # Weibull
        return np.maximum(np.minimum(1-np.exp(-y),0.999),0.0001)*np.ones(len(p))
    if whichModel==6: # t threshold
        return th.computeP_t(p,p1,y,v,p2) 
开发者ID:djbolder,项目名称:credit-risk-modelling,代码行数:18,代码来源:varContributions.py

示例7: _clopper_pearson_interval

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def _clopper_pearson_interval(self):
        r"""
        Computes the Clopper-Pearson 'exact' confidence interval.

        References
        ----------
        Wikipedia contributors. (2018, July 14). Binomial proportion confidence interval.
            In Wikipedia, The Free Encyclopedia. Retrieved 00:40, August 15, 2018,
            from https://en.wikipedia.org/w/index.php?title=Binomial_proportion_confidence_interval&oldid=850256725

        """
        p = self.x / self.n

        if self.alternative == 'less':
            lower_bound = 0.0
            upper_bound = beta.ppf(1 - self.alpha, self.x + 1, self.n - self.x)
        elif self.alternative == 'greater':
            upper_bound = 1.0
            lower_bound = beta.ppf(self.alpha, self.x, self.n - self.x + 1)
        else:
            lower_bound = beta.ppf(self.alpha / 2, self.x, self.n - self.x + 1)
            upper_bound = beta.ppf(1 - self.alpha / 2, self.x + 1, self.n - self.x)

        clopper_pearson_interval = {
            'probability of success': p,
            'conf level': 1 - self.alpha,
            'interval': (lower_bound, upper_bound)
        }

        return clopper_pearson_interval 
开发者ID:aschleg,项目名称:hypothetical,代码行数:32,代码来源:hypothesis.py

示例8: _normal_scores

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def _normal_scores(self):
        r"""
        Calculates the normal scores used in the Van der Waerden test.

        Returns
        -------
        score_matrix : array-like
        Numpy ndarray representing the data matrix with ranked observations and computed normal test scores.

        Notes
        -----
        Let :math:`n_j`, be the number of samples for each of the :math:`k` groups where :math:`j` is the j-th group.
        :math:`N` is the number of total samples in all groups, while :math:`X_{ij}` is the i-th value of the j-th
        group. The normal scores used in the Van der Waerden test are calculated as:

        .. math::

            A_{ij} = \phi^{-1} \left( \frac{R \left( X_{ij} \right)}{N + 1} \right)

        References
        ----------
        Conover, W. J. (1999). Practical Nonparameteric Statistics (Third ed.). Wiley.

        Wikipedia contributors. "Van der Waerden test." Wikipedia, The Free Encyclopedia.
            Wikipedia, The Free Encyclopedia, 8 Feb. 2017. Web. 8 Mar. 2020.

        """
        aij = norm.ppf(list(self.ranked_matrix[:, 2] / (self.n + 1)))
        score_matrix = np.column_stack([self.ranked_matrix, aij])

        return score_matrix 
开发者ID:aschleg,项目名称:hypothetical,代码行数:33,代码来源:nonparametric.py

示例9: _bca

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def _bca(ab_estimates, sample_point, n_boot, alpha=0.05):
    """Get (1 - alpha) * 100 bias-corrected confidence interval estimate

    Note that this is similar to the "cper" module implemented in
    :py:func:`pingouin.compute_bootci`.

    Parameters
    ----------
    ab_estimates : 1d array-like
        Array with bootstrap estimates for each sample.
    sample_point : float
        Indirect effect point estimate based on full sample.
    n_boot : int
        Number of bootstrap samples
    alpha : float
        Alpha for confidence interval

    Returns
    -------
    CI : 1d array-like
        Lower limit and upper limit bias-corrected confidence interval
        estimates.
    """
    # Bias of bootstrap estimates
    z0 = norm.ppf(np.sum(ab_estimates < sample_point) / n_boot)
    # Adjusted intervals
    adjusted_ll = norm.cdf(2 * z0 + norm.ppf(alpha / 2)) * 100
    adjusted_ul = norm.cdf(2 * z0 + norm.ppf(1 - alpha / 2)) * 100
    ll = np.percentile(ab_estimates, q=adjusted_ll)
    ul = np.percentile(ab_estimates, q=adjusted_ul)
    return np.array([ll, ul]) 
开发者ID:raphaelvallat,项目名称:pingouin,代码行数:33,代码来源:regression.py

示例10: CVaR

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def CVaR(mu, sig, alpha=0.01):
    return alpha ** -1 * norm.pdf(norm.ppf(alpha)) * sig - mu


# Student T CVaR 
开发者ID:naripok,项目名称:cryptotrader,代码行数:7,代码来源:risk.py

示例11: TCVaR

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def TCVaR(mu, sig, nu, h=1, alpha=0.01):
    xanu = t.ppf(alpha, nu)
    return -1 / alpha * (1 - nu) ** (-1) * (nu - 2 + xanu ** 2) * t.pdf(xanu, nu) * sig - h * mu 
开发者ID:naripok,项目名称:cryptotrader,代码行数:5,代码来源:risk.py

示例12: cdf

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def cdf(self, x):
		self._check_dimension(x)
		return multivariate_normal.cdf([ norm.ppf(u) for u in x ], cov=self.R) 
开发者ID:blent-ai,项目名称:pycopula,代码行数:5,代码来源:copula.py

示例13: pdf

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def pdf(self, x):
		self._check_dimension(x)
		u_i = norm.ppf(x)
		return self._R_det**(-0.5) * np.exp(-0.5 * np.dot(u_i, np.dot(self._R_inv - np.identity(self.dim), u_i))) 
开发者ID:blent-ai,项目名称:pycopula,代码行数:6,代码来源:copula.py

示例14: pdf_param

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def pdf_param(self, x, R):
		self._check_dimension(x)
		if self.dim == 2 and not(hasattr(R, '__len__')):
			R = [R]
		if len(np.asarray(R).shape) == 2 and len(R) != self.dim:
			raise ValueError("Expected covariance matrix of dimension {0}.".format(self.dim))
		u = norm.ppf(x)
		
		cov = np.ones([ self.dim, self.dim ])
		idx = 0
		if len(np.asarray(R).shape) <= 1:
			if len(R) == self.dim * (self.dim - 1) / 2:
				for j in range(self.dim):
					for i in range(j + 1, self.dim):
						cov[j][i] = R[idx]
						cov[i][j] = R[idx]
						idx += 1
			else:
				raise ValueError("Expected covariance matrix, get an array.")
		
		if self.dim == 2:
			RDet = cov[0][0] * cov[1][1] - cov[0][1]**2
			RInv = 1. / RDet * np.asarray([[ cov[1][1], -cov[0][1]], [ -cov[0][1], cov[0][0] ]])
		else:
			RDet = np.linalg.det(cov)
			RInv = np.linalg.inv(cov)
		return [ RDet**(-0.5) * np.exp(-0.5 * np.dot(u_i, np.dot(RInv - np.identity(self.dim), u_i))) for u_i in u ] 
开发者ID:blent-ai,项目名称:pycopula,代码行数:29,代码来源:copula.py

示例15: quantile

# 需要导入模块: from scipy.stats import t [as 别名]
# 或者: from scipy.stats.t import ppf [as 别名]
def quantile(self,  x):
		return multivariate_normal.ppf([ norm.ppf(u) for u in x ], cov=self.R) 
开发者ID:blent-ai,项目名称:pycopula,代码行数:4,代码来源:copula.py


注:本文中的scipy.stats.t.ppf方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。