本文整理匯總了Python中scipy.stats.beta方法的典型用法代碼示例。如果您正苦於以下問題:Python stats.beta方法的具體用法?Python stats.beta怎麽用?Python stats.beta使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類scipy.stats
的用法示例。
在下文中一共展示了stats.beta方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: gen_x_draws
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def gen_x_draws(k):
"""
Returns a flat array containing k independent draws from the
distribution of X, the underlying random variable. This distribution is
itself a convex combination of three beta distributions.
"""
bdraws = beta_dist.rvs((3, k))
# == Transform rows, so each represents a different distribution == #
bdraws[0, :] -= 0.5
bdraws[1, :] += 0.6
bdraws[2, :] -= 1.1
# == Set X[i] = bdraws[j, i], where j is a random draw from {0, 1, 2} == #
js = np.random.random_integers(0, 2, size=k)
X = bdraws[js, np.arange(k)]
# == Rescale, so that the random variable is zero mean == #
m, sigma = X.mean(), X.std()
return (X - m) / sigma
示例2: test_pickling
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def test_pickling(self):
# test that a frozen instance pickles and unpickles
# (this method is a clone of common_tests.check_pickling)
beta = stats.beta(2.3098496451481823, 0.62687954300963677)
poiss = stats.poisson(3.)
sample = stats.rv_discrete(values=([0, 1, 2, 3],
[0.1, 0.2, 0.3, 0.4]))
for distfn in [beta, poiss, sample]:
distfn.random_state = 1234
distfn.rvs(size=8)
s = pickle.dumps(distfn)
r0 = distfn.rvs(size=8)
unpickled = pickle.loads(s)
r1 = unpickled.rvs(size=8)
assert_equal(r0, r1)
# also smoke test some methods
medians = [distfn.ppf(0.5), unpickled.ppf(0.5)]
assert_equal(medians[0], medians[1])
assert_equal(distfn.cdf(medians[0]),
unpickled.cdf(medians[1]))
示例3: test_beta
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def test_beta(self):
# case with finite support interval
v = stats.beta.expect(lambda x: (x-19/3.)*(x-19/3.), args=(10, 5),
loc=5, scale=2)
assert_almost_equal(v, 1./18., decimal=13)
m = stats.beta.expect(lambda x: x, args=(10, 5), loc=5., scale=2.)
assert_almost_equal(m, 19/3., decimal=13)
ub = stats.beta.ppf(0.95, 10, 10, loc=5, scale=2)
lb = stats.beta.ppf(0.05, 10, 10, loc=5, scale=2)
prob90 = stats.beta.expect(lambda x: 1., args=(10, 10), loc=5.,
scale=2., lb=lb, ub=ub, conditional=False)
assert_almost_equal(prob90, 0.9, decimal=13)
prob90c = stats.beta.expect(lambda x: 1, args=(10, 10), loc=5,
scale=2, lb=lb, ub=ub, conditional=True)
assert_almost_equal(prob90c, 1., decimal=13)
示例4: testBetaSample
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def testBetaSample(self):
with self.test_session():
a = 1.
b = 2.
beta = tf.contrib.distributions.Beta(a, b)
n = tf.constant(100000)
samples = beta.sample(n)
sample_values = samples.eval()
self.assertEqual(sample_values.shape, (100000,))
self.assertFalse(np.any(sample_values < 0.0))
self.assertLess(
stats.kstest(
# Beta is a univariate distribution.
sample_values, stats.beta(a=1., b=2.).cdf)[0],
0.01)
# The standard error of the sample mean is 1 / (sqrt(18 * n))
self.assertAllClose(sample_values.mean(axis=0),
stats.beta.mean(a, b),
atol=1e-2)
self.assertAllClose(np.cov(sample_values, rowvar=0),
stats.beta.var(a, b),
atol=1e-1)
# Test that sampling with the same seed twice gives the same results.
示例5: __init__
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def __init__(self, lower=None, upper=None, shape_A=None, shape_B=None):
self.shape_A = shape_A
self.shape_B = shape_B
self.lower = lower
self.upper = upper
if (self.shape_A is not None) and (self.shape_B is not None):
if self.shape_A >= 1. and self.shape_B >= 1.0:
self.mean = (self.shape_A) / (self.shape_A + self.shape_B)
self.variance = (self.shape_A * self.shape_B) / ( (self.shape_A + self.shape_B)**2 * (self.shape_A + self.shape_B + 1.0) )
self.skewness = 2.0 * (self.shape_B - self.shape_A) * np.sqrt(self.shape_A + self.shape_B + 1.0) / ( (self.shape_A + self.shape_B + 2.0) * np.sqrt(self.shape_A * self.shape_B) )
self.kurtosis = 6.0 * ((self.shape_A - self.shape_B)**2 * (self.shape_A + self.shape_B + 1.0) - self.shape_A * self.shape_B * (self.shape_A + self.shape_B + 2.0) ) /( (self.shape_A * self.shape_B) * (self.shape_A + self.shape_B + 2.0) * (self.shape_A + self.shape_B + 3.0)) + 3.0
self.bounds = np.array([0, 1])
self.shape_parameter_A = self.shape_B - 1.0
self.shape_parameter_B = self.shape_A - 1.0
self.parent = beta(self.shape_A, self.shape_B)
if (self.lower is not None) and (self.upper is not None):
self.x_range_for_pdf = np.linspace(self.lower, self.upper, RECURRENCE_PDF_SAMPLES)
示例6: test_beta_binomial_two_identical_models
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def test_beta_binomial_two_identical_models(db_path, sampler):
binomial_n = 5
def model_fun(args):
return {"result": st.binom(binomial_n, args.theta).rvs()}
models = [model_fun for _ in range(2)]
models = list(map(SimpleModel, models))
population_size = ConstantPopulationSize(800)
parameter_given_model_prior_distribution = [Distribution(theta=st.beta(
1, 1))
for _ in range(2)]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["result"]),
population_size,
eps=MedianEpsilon(.1),
sampler=sampler)
abc.new(db_path, {"result": 2})
minimum_epsilon = .2
history = abc.run(minimum_epsilon, max_nr_populations=3)
mp = history.get_model_probabilities(history.max_t)
assert abs(mp.p[0] - .5) + abs(mp.p[1] - .5) < .08
示例7: test_all_in_one_model
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def test_all_in_one_model(db_path, sampler):
models = [AllInOneModel() for _ in range(2)]
population_size = ConstantPopulationSize(800)
parameter_given_model_prior_distribution = [Distribution(theta=RV("beta",
1, 1))
for _ in range(2)]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["result"]),
population_size,
eps=MedianEpsilon(.1),
sampler=sampler)
abc.new(db_path, {"result": 2})
minimum_epsilon = .2
history = abc.run(minimum_epsilon, max_nr_populations=3)
mp = history.get_model_probabilities(history.max_t)
assert abs(mp.p[0] - .5) + abs(mp.p[1] - .5) < .08
示例8: test_beta_binomial_two_identical_models_adaptive
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def test_beta_binomial_two_identical_models_adaptive(db_path, sampler):
binomial_n = 5
def model_fun(args):
return {"result": st.binom(binomial_n, args.theta).rvs()}
models = [model_fun for _ in range(2)]
models = list(map(SimpleModel, models))
population_size = AdaptivePopulationSize(800)
parameter_given_model_prior_distribution = [
Distribution(theta=st.beta(1, 1)) for _ in range(2)]
abc = ABCSMC(models, parameter_given_model_prior_distribution,
MinMaxDistance(measures_to_use=["result"]),
population_size,
eps=MedianEpsilon(.1),
sampler=sampler)
abc.new(db_path, {"result": 2})
minimum_epsilon = .2
history = abc.run(minimum_epsilon, max_nr_populations=3)
mp = history.get_model_probabilities(history.max_t)
assert abs(mp.p[0] - .5) + abs(mp.p[1] - .5) < .08
示例9: plot_dist
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def plot_dist(alpha_value: float, beta_value: float, data: np.ndarray = None):
beta_dist = beta(alpha_value, beta_value)
xs = np.linspace(0, 1, 1000)
ys = beta_dist.pdf(xs)
fig, ax = plt.subplots(figsize=(7, 3))
ax.plot(xs, ys)
ax.set_xlim(0, 1)
ax.set_xlabel("x")
ax.set_ylabel("P(x)")
if data is not None:
likelihoods = beta_dist.pdf(data)
sum_log_likelihoods = np.sum(beta_dist.logpdf(data))
ax.vlines(data, ymin=0, ymax=likelihoods)
ax.scatter(data, likelihoods, color="black")
st.write(
f"""
_Under your alpha={alpha_slider:.2f} and beta={beta_slider:.2f},
the sum of log likelihoods is {sum_log_likelihoods:.2f}_
"""
)
st.pyplot(fig)
示例10: get_score_df
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def get_score_df(self):
'''
:return: pd.DataFrame
'''
term_freq_df = self.term_ranker_.get_ranks('')
cat_freq_df = pd.DataFrame({
'cat': term_freq_df[self.category_name],
'ncat': term_freq_df[self.not_category_names].sum(axis=1),
})
if self.neutral_category_names:
cat_freq_df['neut'] = term_freq_df[self.neutral_category_names].sum(axis=1)
cat_freq_df['all'] = cat_freq_df.sum(axis=1)
N = cat_freq_df['all'].sum()
catN = cat_freq_df['cat'].sum()
ncatN = cat_freq_df['ncat'].sum()
cat_freq_df['cat_pct'] = cat_freq_df['cat'] * 1. / catN
cat_freq_df['ncat_pct'] = cat_freq_df['ncat'] * 1. / ncatN
def row_beta_posterior(row):
return pd.Series({
'cat_p': beta(row['all'], N - row['all']).sf(row['cat'] * 1. / catN),
'ncat_p': beta(row['all'], N - row['all']).sf(row['ncat'] * 1. / ncatN),
})
p_val_df = cat_freq_df.apply(row_beta_posterior, axis=1)
cat_freq_df['cat_p'] = p_val_df['cat_p']
cat_freq_df['ncat_p'] = p_val_df['ncat_p']
cat_freq_df['cat_z'] = norm.ppf(p_val_df['cat_p'])
cat_freq_df['ncat_z'] = norm.ppf(p_val_df['ncat_p'])
cat_freq_df['score'] = None
cat_freq_df['score'][cat_freq_df['cat_pct'] == cat_freq_df['ncat_pct']] = 0
cat_freq_df['score'][cat_freq_df['cat_pct'] < cat_freq_df['ncat_pct']] = cat_freq_df['ncat_z']
cat_freq_df['score'][cat_freq_df['cat_pct'] > cat_freq_df['ncat_pct']] = -cat_freq_df['cat_z']
return cat_freq_df
示例11: momentcondquant
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def momentcondquant(distfn, params, mom2, quantile=None, shape=None):
'''moment conditions for estimating distribution parameters by matching
quantiles, defines as many moment conditions as quantiles.
Returns
-------
difference : array
difference between theoretical and empirical quantiles
Notes
-----
This can be used for method of moments or for generalized method of
moments.
'''
#this check looks redundant/unused know
if len(params) == 2:
loc, scale = params
elif len(params) == 3:
shape, loc, scale = params
else:
#raise NotImplementedError
pass #see whether this might work, seems to work for beta with 2 shape args
#mom2diff = np.array(distfn.stats(*params)) - mom2
#if not quantile is None:
pq, xq = quantile
#ppfdiff = distfn.ppf(pq, alpha)
cdfdiff = distfn.cdf(xq, *params) - pq
#return np.concatenate([mom2diff, cdfdiff[:1]])
return cdfdiff
#return mom2diff
示例12: test_logpdf
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def test_logpdf(self):
# Regression test for Ticket #1326: avoid nan with 0*log(0) situation
logpdf = stats.beta.logpdf(0,1,0.5)
assert_almost_equal(logpdf, -0.69314718056)
logpdf = stats.beta.logpdf(0,0.5,1)
assert_almost_equal(logpdf, np.inf)
示例13: test_logpdf_ticket_1866
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def test_logpdf_ticket_1866(self):
alpha, beta = 267, 1472
x = np.array([0.2, 0.5, 0.6])
b = stats.beta(alpha, beta)
assert_allclose(b.logpdf(x).sum(), -1201.699061824062)
assert_allclose(b.pdf(x), np.exp(b.logpdf(x)))
示例14: test_fix_fit
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def test_fix_fit(self):
def check(func, dist, args, alpha):
# Not sure why 'ncf', and 'beta' are failing
# frechet has different len(args) than distfunc.numargs
if dist in self.skip + ['frechet']:
raise SkipTest("%s fit known to fail" % dist)
distfunc = getattr(stats, dist)
with np.errstate(all='ignore'):
res = distfunc.rvs(*args, **{'size':200})
vals = distfunc.fit(res,floc=0)
vals2 = distfunc.fit(res,fscale=1)
assert_(len(vals) == 2+len(args))
assert_(vals[-2] == 0)
assert_(vals2[-1] == 1)
assert_(len(vals2) == 2+len(args))
if len(args) > 0:
vals3 = distfunc.fit(res, f0=args[0])
assert_(len(vals3) == 2+len(args))
assert_(vals3[0] == args[0])
if len(args) > 1:
vals4 = distfunc.fit(res, f1=args[1])
assert_(len(vals4) == 2+len(args))
assert_(vals4[1] == args[1])
if len(args) > 2:
vals5 = distfunc.fit(res, f2=args[2])
assert_(len(vals5) == 2+len(args))
assert_(vals5[2] == args[2])
for func, dist, args, alpha in test_all_distributions():
yield check, func, dist, args, alpha
示例15: __init__
# 需要導入模塊: from scipy import stats [as 別名]
# 或者: from scipy.stats import beta [as 別名]
def __init__(self, A=1.4, α=0.6, β=0.96, grid_size=50,
G=None, π=np.sqrt, F=stats.beta(2, 2)):
self.A, self.α, self.β = A, α, β
# === set defaults for G, π and F === #
self.G = G if G is not None else lambda x, ϕ: A * (x * ϕ)**α
self.π = π
self.F = F
# === Set up grid over the state space for DP === #
# Max of grid is the max of a large quantile value for F and the
# fixed point y = G(y, 1).
grid_max = max(A**(1 / (1 - α)), self.F.ppf(1 - ϵ))
self.x_grid = np.linspace(ϵ, grid_max, grid_size)