本文整理汇总了Python中scipy.stats.norm.isf方法的典型用法代码示例。如果您正苦于以下问题:Python norm.isf方法的具体用法?Python norm.isf怎么用?Python norm.isf使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.stats.norm
的用法示例。
在下文中一共展示了norm.isf方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: z_score
# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import isf [as 别名]
def z_score(pvalue):
""" Return the z-score corresponding to a given p-value.
"""
pvalue = np.minimum(np.maximum(pvalue, 1.e-300), 1. - 1.e-16)
return norm.isf(pvalue)
示例2: test_fdr
# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import isf [as 别名]
def test_fdr():
n = 100
x = np.linspace(.5 / n, 1. - .5 / n, n)
x[:10] = .0005
x = norm.isf(x)
np.random.shuffle(x)
assert_almost_equal(fdr_threshold(x, .1), norm.isf(.0005))
assert fdr_threshold(x, .001) == np.infty
with pytest.raises(ValueError):
fdr_threshold(x, -.1)
with pytest.raises(ValueError):
fdr_threshold(x, 1.5)
示例3: test_z_score
# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import isf [as 别名]
def test_z_score():
p = np.random.rand(10)
assert_array_almost_equal(norm.sf(z_score(p)), p)
# check the numerical precision
for p in [1.e-250, 1 - 1.e-16]:
assert_array_almost_equal(z_score(p), norm.isf(p))
assert_array_almost_equal(z_score(np.float32(1.e-100)), norm.isf(1.e-300))
示例4: h2_obs_to_liab
# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import isf [as 别名]
def h2_obs_to_liab(h2_obs, P, K):
'''
Converts heritability on the observed scale in an ascertained sample to heritability
on the liability scale in the population.
Parameters
----------
h2_obs : float
Heritability on the observed scale in an ascertained sample.
P : float in (0,1)
Prevalence of the phenotype in the sample.
K : float in (0,1)
Prevalence of the phenotype in the population.
Returns
-------
h2_liab : float
Heritability of liability in the population.
'''
if np.isnan(P) and np.isnan(K):
return h2_obs
if K <= 0 or K >= 1:
raise ValueError('K must be in the range (0,1)')
if P <= 0 or P >= 1:
raise ValueError('P must be in the range (0,1)')
thresh = norm.isf(K)
conversion_factor = K ** 2 * \
(1 - K) ** 2 / (P * (1 - P) * norm.pdf(thresh) ** 2)
return h2_obs * conversion_factor
示例5: draw_two_gauss
# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import isf [as 别名]
def draw_two_gauss(ix=0,extrema=500,std=50,h1=0,h2=0,gap=50,
alpha=0.15865525393145707):
im = Image.new("RGB", (512,512), "black")
draw = ImageDraw.Draw(im, 'RGBA')
fn = lambda x:300-norm.pdf(x-250,0,std)*7000
draw_curve(fn,draw)
fn2 = lambda x:300-norm.pdf(x-250,gap,std)*7000
draw_curve(fn2,draw,rgba=(138,43,226))
#draw.line((250,0,250,512),fill=(0,120,230),width=1)
#draw.line((250,0,250,512),fill=(255,255,0),width=1)
delta = norm.isf(alpha,0,std)
x1 = 250+delta
draw.line((x1,0,x1,512),fill=(255,20,147,150),width=1)
y1 = fn(x1)
pts = [(x1,y1),(x1,300),(extrema,fn(extrema))]
for xx in np.arange(extrema-1,x1,-1):
yx = fn(xx)
pts.append((xx,yx))
draw.polygon(pts,(255,255,0,100))
y2=fn2(x1)
pts = [(x1,y2),(x1,300),(180,fn2(180))]
for xx in np.arange(179+1,x1,1):
yx = fn2(xx)
pts.append((xx,yx))
draw.polygon(pts,(138,43,226,100))
draw_trtmt_hist(draw,h1=h1,h2=h2)
draw_alpha_beta_curve(draw,alpha,std=std,effect=gap)
im.save(basedir + 'im' + str(ix) + '.png')
示例6: betafn
# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import isf [as 别名]
def betafn(alpha,effect,std):
return norm.cdf(-effect+norm.isf(alpha,0,std),0,std)
示例7: extreme_values
# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import isf [as 别名]
def extreme_values(weighted_residuals, confidence_interval):
'''
This function uses extreme value theory to calculate the number of
standard deviations away from the mean at which we should expect to bracket
*all* of our n data points at a certain confidence level.
It then uses that value to identify which (if any) of the data points
lie outside that region, and calculates the corresponding probabilities
of finding a data point at least that many standard deviations away.
Parameters
----------
weighted_residuals : array of floats
Array of residuals weighted by the square root of their
variances wr_i = r_i/sqrt(var_i)
confidence_interval : float
Probability at which all the weighted residuals lie
within the confidence bounds
Returns
-------
confidence_bound : float
Number of standard deviations at which we should expect to encompass all
data at the user-defined confidence interval.
indices : array of floats
Indices of weighted residuals exceeding the confidence_interval
defined by the user
probabilities : array of floats
The probabilities that the extreme data point of the distribution lies
further from the mean than the observed position wr_i for each i in
the "indices" output array.
'''
n=len(weighted_residuals)
mean = norm.isf(1./n)
scale = 0.8/np.power(np.log(n), 1./2.) # good approximation for > 10 data points
c = 0.33/np.power(np.log(n), 3./4.) # good approximation for > 10 data points
# We now need a 1-tailed probability from the given confidence_interval
# p_total = 1. - confidence_interval = p_upper + p_lower - p_upper*p_lower
# p_total = 1. - confidence_interval = 2p - p^2, therefore:
p = 1. - np.sqrt(confidence_interval)
confidence_bound = genextreme.isf(p, c, loc=mean, scale=scale)
indices = [i for i, r in enumerate(weighted_residuals) if np.abs(r) > confidence_bound]
probabilities = 1. - np.power(genextreme.sf(np.abs(weighted_residuals[indices]), c, loc=mean, scale=scale) - 1., 2.) # Convert back to 2-tailed probabilities
return confidence_bound, indices, probabilities
示例8: get_score_df
# 需要导入模块: from scipy.stats import norm [as 别名]
# 或者: from scipy.stats.norm import isf [as 别名]
def get_score_df(self, correction_method=None):
'''
Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
'''
X = self._get_X().astype(np.float64)
X = X / X.sum(axis=1)
cat_X, ncat_X = self._get_cat_and_ncat(X)
def normal_apx(u, x, y):
# from https://stats.stackexchange.com/questions/116315/problem-with-mann-whitney-u-test-in-scipy
m_u = len(x) * len(y) / 2
sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
z = (u - m_u) / sigma_u
return 2*norm.cdf(z)
scores = []
for i in range(cat_X.shape[1]):
cat_list = cat_X.T[i].A1
ncat_list = ncat_X.T[i].A1
try:
if cat_list.mean() > ncat_list.mean():
mw = mannwhitneyu(cat_list, ncat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, cat_list, ncat_list)
scores.append({'mwu': mw.statistic, 'mwu_p': mw.pvalue, 'mwu_z': norm.isf(float(mw.pvalue)), 'valid':True})
else:
mw = mannwhitneyu(ncat_list, cat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, ncat_list, cat_list)
scores.append({'mwu': -mw.statistic, 'mwu_p': 1 - mw.pvalue, 'mwu_z': 1. - norm.isf(float(mw.pvalue)), 'valid':True})
except:
scores.append({'mwu': 0, 'mwu_p': 0, 'mwu_z': 0, 'valid':False})
score_df = pd.DataFrame(scores, index=self.corpus_.get_terms()).fillna(0)
if correction_method is not None:
from statsmodels.stats.multitest import multipletests
for method in ['mwu']:
valid_pvals = score_df[score_df.valid].mwu_p
valid_pvals_abs = np.min([valid_pvals, 1-valid_pvals], axis=0)
valid_pvals_abs_corr = multipletests(valid_pvals_abs, method=correction_method)[1]
score_df[method + '_p_corr'] = 0.5
valid_pvals_abs_corr[valid_pvals > 0.5] = 1. - valid_pvals_abs_corr[valid_pvals > 0.5]
valid_pvals_abs_corr[valid_pvals < 0.5] = valid_pvals_abs_corr[valid_pvals < 0.5]
score_df.loc[score_df.valid, method + '_p_corr'] = valid_pvals_abs_corr
score_df[method + '_z'] = -norm.ppf(score_df[method + '_p_corr'])
return score_df