本文整理匯總了Python中scipy.stats.beta.pdf方法的典型用法代碼示例。如果您正苦於以下問題:Python beta.pdf方法的具體用法?Python beta.pdf怎麽用?Python beta.pdf使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類scipy.stats.beta
的用法示例。
在下文中一共展示了beta.pdf方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: pdf
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def pdf(self, u: Array, log=False):
assert self.smoothing == "beta", "Empirical Copula only has density (PDF) for smoothing = 'beta'"
assert isinstance(self.data, np.ndarray), "data is still undefined for EmpiricalCopula"
u = self.pobs(u, self._ties)
data_rank = rank_data(self.data, 1, self._ties)
n = len(self.data)
if log:
return np.array([
log_sum(
np.array([
sum(beta.logpdf(row, a=row_rank, b=n + 1 - row_rank))
for row_rank in data_rank
])
) for row in u]) - np.log(n + self._offset)
else:
return np.array([
sum([
np.prod(beta.pdf(row, a=row_rank, b=n + 1 - row_rank))
for row_rank in data_rank
]) for row in u]) / (n + self._offset)
示例2: betaNLL
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def betaNLL(param,*args):
'''Negative log likelihood function for beta
<param>: list for parameters to be fitted.
<args>: 1-element array containing the sample data.
Return <nll>: negative log-likelihood to be minimized.
'''
a,b=param
data=args[0]
pdf=beta.pdf(data,a,b,loc=0,scale=1)
lg=np.log(pdf)
#-----Replace -inf with 0s------
lg=np.where(lg==-np.inf,0,lg)
nll=-1*np.sum(lg)
return nll
示例3: hpd
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def hpd(self, posterior, var, *parameters):
"""
Find out High Posterior Density Region
"""
bins = posterior[var][1]
x = 0.5 * (bins[0:-1] + bins[1:])
pdf = posterior[var][0]
k = np.linspace(0, max(pdf), 1000)
area_above = np.array([np.trapz(pdf[pdf >= kk], x[pdf >= kk]) for kk in k])
index = np.argwhere(np.abs(area_above - self.alpha) == np.min(np.abs(area_above - self.alpha)))[0]
if self.plot:
self.plot_rope_posterior(index, k, x, posterior, var, *parameters)
return x[pdf >= k[index]]
示例4: set_prior
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def set_prior(self, theta_min, theta_max, num_theta, dist_name):
self.theta_val = np.linspace(theta_min, theta_max, num=num_theta)
self.num_theta = num_theta
if dist_name == 'uniform':
# the density is uniform
self.theta_density = np.ones(num_theta) / num_theta
elif dist_name == 'beta':
# centered beta
# rescale to move away from the boundary
self.theta_density = beta.pdf((self.theta_val - theta_min) / (theta_max - theta_min + 0.1), 2, 2)
# renormalize
self.theta_density = self.theta_density / sum(self.theta_density)
else:
raise Exception('Unknown prior distribution.')
示例5: posterior_analytic
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def posterior_analytic(self, data):
"""
Find posterior distribution for the analytic method of solution
"""
ca = np.sum(data[0])
na = len(data[0])
cb = np.sum(data[1])
nb = len(data[1])
# find posterior of A and B from analytic solution
x = np.linspace(0, 1, self.resolution-1)
dx = x[1] - x[0]
pa = (np.array([beta.pdf(xx, self.alpha_prior + ca, self.beta_prior + na - ca) for xx in x]),
np.append(x, x[-1]+dx) - 0.5*dx)
pb = (np.array([beta.pdf(xx, self.alpha_prior + cb, self.beta_prior + nb - cb) for xx in x]),
np.append(x, x[-1] + dx) - 0.5 * dx)
# bootstrapping now
a_rvs = beta.rvs(self.alpha_prior + ca, self.beta_prior + na - ca, size=400*self.resolution)
b_rvs = beta.rvs(self.alpha_prior + cb, self.beta_prior + nb - cb, size=400*self.resolution)
rvs = b_rvs - a_rvs
bins = np.linspace(np.min(rvs) - 0.2 * abs(np.min(rvs)), np.max(rvs) + 0.2 * abs(np.max(rvs)), self.resolution)
lift = np.histogram(rvs, bins=bins, normed=True)
bins = np.linspace(0, 1, self.resolution)
sigma_a_rvs = np.sqrt(a_rvs * (1 - a_rvs))
sigma_b_rvs = np.sqrt(b_rvs * (1 - b_rvs))
psigma_a = np.histogram(sigma_a_rvs, bins=bins, normed=True)
psigma_b = np.histogram(sigma_b_rvs, bins=bins, normed=True)
rvs = (b_rvs - a_rvs) / np.sqrt(0.5 * (sigma_a_rvs**2 + sigma_b_rvs**2))
bins = np.linspace(np.min(rvs) - 0.2 * abs(np.min(rvs)), np.max(rvs) + 0.2 * abs(np.max(rvs)), self.resolution)
pes = np.histogram(rvs, bins=bins, normed=True)
posterior = {'muA': pa, 'muB': pb, 'psigma_a': psigma_a, 'psigma_b': psigma_b,
'lift': lift, 'es': pes, 'prior': self.prior()}
return posterior
示例6: prior
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def prior(self):
"""
Find out prior distribution
"""
return [beta.pdf(x, self.alpha_prior, self.beta_prior) for x in np.linspace(0, 1, self.resolution)]
示例7: test_normal
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def test_normal(self):
# Create 5 normal distributions (2 parameters (mean and stddev) each).
param_space = Tuple(
FloatBox(shape=(5,)), # mean
FloatBox(shape=(5,)), # stddev
add_batch_rank=True
)
values_space = FloatBox(shape=(5,), add_batch_rank=True)
input_spaces = dict(
parameters=param_space,
values=values_space,
deterministic=bool,
)
# The Component to test.
normal = Normal(switched_off_apis={"kl_divergence"})
test = ComponentTest(component=normal, input_spaces=input_spaces)
# Batch of size=2 and deterministic (True).
input_ = [param_space.sample(2), True]
expected = input_[0][0] # 0 = mean
# Sample n times, expect always mean value (deterministic draw).
for _ in range(50):
test.test(("draw", input_), expected_outputs=expected)
test.test(("sample_deterministic", tuple([input_[0]])), expected_outputs=expected)
# Batch of size=1 and non-deterministic -> expect roughly the mean.
input_ = [param_space.sample(1), False]
expected = input_[0][0] # 0 = mean
outs = []
for _ in range(50):
out = test.test(("draw", input_))
outs.append(out)
out = test.test(("sample_stochastic", tuple([input_[0]])))
outs.append(out)
recursive_assert_almost_equal(np.mean(outs), expected.mean(), decimals=1)
# Test log-likelihood outputs.
means = np.array([[0.1, 0.2, 0.3, 0.4, 100.0]])
stds = np.array([[0.8, 0.2, 0.3, 2.0, 50.0]])
values = np.array([[1.0, 2.0, 0.4, 10.0, 5.4]])
test.test(
("log_prob", [tuple([means, stds]), values]),
expected_outputs=np.log(norm.pdf(values, means, stds)), decimals=4
)
示例8: test_multivariate_normal
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def test_multivariate_normal(self):
# Create batch0=n (batch-rank), batch1=2 (can be used for m mixed Gaussians), num-events=3 (trivariate)
# distributions (2 parameters (mean and stddev) each).
num_events = 3 # 3=trivariate Gaussian
num_mixed_gaussians = 2 # 2x trivariate Gaussians (mixed)
param_space = Tuple(
FloatBox(shape=(num_mixed_gaussians, num_events)), # mean
FloatBox(shape=(num_mixed_gaussians, num_events)), # diag (variance)
add_batch_rank=True
)
values_space = FloatBox(shape=(num_mixed_gaussians, num_events), add_batch_rank=True)
input_spaces = dict(
parameters=param_space,
values=values_space,
deterministic=bool,
)
# The Component to test.
multivariate_normal = MultivariateNormal(switched_off_apis={"kl_divergence"})
test = ComponentTest(component=multivariate_normal, input_spaces=input_spaces)
input_ = [input_spaces["parameters"].sample(4), True]
expected = input_[0][0] # 0=mean
# Sample n times, expect always mean value (deterministic draw).
for _ in range(50):
test.test(("draw", input_), expected_outputs=expected)
test.test(("sample_deterministic", tuple([input_[0]])), expected_outputs=expected)
# Batch of size=1 and non-deterministic -> expect roughly the mean.
input_ = [input_spaces["parameters"].sample(1), False]
expected = input_[0][0] # 0=mean
outs = []
for _ in range(50):
out = test.test(("draw", input_))
outs.append(out)
out = test.test(("sample_stochastic", tuple([input_[0]])))
outs.append(out)
recursive_assert_almost_equal(np.mean(outs), expected.mean(), decimals=1)
# Test log-likelihood outputs (against scipy).
means = values_space.sample(2)
stds = values_space.sample(2)
values = values_space.sample(2)
test.test(
("log_prob", [tuple([means, stds]), values]),
# Sum up the individual log-probs as we have a diag (independent) covariance matrix.
expected_outputs=np.sum(np.log(norm.pdf(values, means, stds)), axis=-1), decimals=4
)
示例9: test_beta
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def test_beta(self):
# Create 5 beta distributions (2 parameters (alpha and beta) each).
param_space = Tuple(
FloatBox(shape=(5,)), # alpha
FloatBox(shape=(5,)), # beta
add_batch_rank=True
)
values_space = FloatBox(shape=(5,), add_batch_rank=True)
input_spaces = dict(
parameters=param_space,
values=values_space,
deterministic=bool,
)
# The Component to test.
low, high = -1.0, 2.0
beta_distribution = Beta(low=low, high=high, switched_off_apis={"kl_divergence"})
test = ComponentTest(component=beta_distribution, input_spaces=input_spaces)
# Batch of size=2 and deterministic (True).
input_ = [input_spaces["parameters"].sample(2), True]
# Mean for a Beta distribution: 1 / [1 + (beta/alpha)]
expected = (1.0 / (1.0 + input_[0][1] / input_[0][0])) * (high - low) + low
# Sample n times, expect always mean value (deterministic draw).
for _ in range(50):
test.test(("draw", input_), expected_outputs=expected, decimals=5)
test.test(("sample_deterministic", tuple([input_[0]])), expected_outputs=expected, decimals=5)
# Batch of size=1 and non-deterministic -> expect roughly the mean.
input_ = [input_spaces["parameters"].sample(1), False]
expected = (1.0 / (1.0 + input_[0][1] / input_[0][0])) * (high - low) + low
outs = []
for _ in range(50):
out = test.test(("draw", input_))
outs.append(out)
out = test.test(("sample_stochastic", tuple([input_[0]])))
outs.append(out)
recursive_assert_almost_equal(np.mean(outs), expected.mean(), decimals=1)
# Test log-likelihood outputs (against scipy).
alpha_ = values_space.sample(1)
beta_ = values_space.sample(1)
values = values_space.sample(1)
values_scaled = values * (high - low) + low
test.test(
("log_prob", [tuple([alpha_, beta_]), values_scaled]),
expected_outputs=np.log(beta.pdf(values, alpha_, beta_)), decimals=4
)
示例10: test_squashed_normal
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def test_squashed_normal(self):
param_space = Tuple(
FloatBox(shape=(5,)),
FloatBox(shape=(5,)),
add_batch_rank=True
)
values_space = FloatBox(shape=(5,), add_batch_rank=True)
input_spaces = dict(
parameters=param_space,
deterministic=bool,
values=values_space
)
low, high = -2.0, 1.0
squashed_distribution = SquashedNormal(switched_off_apis={"kl_divergence"}, low=low, high=high)
test = ComponentTest(component=squashed_distribution, input_spaces=input_spaces)
# Batch of size=2 and deterministic (True).
input_ = [param_space.sample(2), True]
expected = ((np.tanh(input_[0][0]) + 1.0) / 2.0) * (high - low) + low # [0] = mean
# Sample n times, expect always mean value (deterministic draw).
for _ in range(50):
test.test(("draw", input_), expected_outputs=expected, decimals=5)
test.test(("sample_deterministic", tuple([input_[0]])), expected_outputs=expected, decimals=5)
# Batch of size=1 and non-deterministic -> expect roughly the mean.
input_ = [param_space.sample(1), False]
expected = ((np.tanh(input_[0][0]) + 1.0) / 2.0) * (high - low) + low # [0] = mean
outs = []
for _ in range(500):
out = test.test(("draw", input_))
outs.append(out)
self.assertTrue(out.max() <= high)
self.assertTrue(out.min() >= low)
out = test.test(("sample_stochastic", tuple([input_[0]])))
outs.append(out)
self.assertTrue(out.max() <= high)
self.assertTrue(out.min() >= low)
recursive_assert_almost_equal(np.mean(outs), expected.mean(), decimals=1)
# Test log-likelihood outputs.
means = np.array([[0.1, 0.2, 0.3, 0.4, 5.0]])
stds = np.array([[0.8, 0.2, 0.3, 2.0, 4.0]])
# Make sure values are within low and high.
values = np.array([[0.9, 0.2, 0.4, -0.1, -1.05]])
# TODO: understand and comment the following formula to get the log-prob.
# Unsquash values, then get log-llh from regular gaussian.
unsquashed_values = np.arctanh((values - low) / (high - low) * 2.0 - 1.0)
log_prob_unsquashed = np.log(norm.pdf(unsquashed_values, means, stds))
log_prob = log_prob_unsquashed - np.sum(np.log(1 - np.tanh(unsquashed_values) ** 2), axis=-1, keepdims=True)
test.test(("log_prob", [tuple([means, stds]), values]), expected_outputs=log_prob, decimals=4)
示例11: test_gumbel_softmax_distribution
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def test_gumbel_softmax_distribution(self):
# 5-categorical Gumble-Softmax.
param_space = Tuple(FloatBox(shape=(5,)), add_batch_rank=True)
values_space = FloatBox(shape=(5,), add_batch_rank=True)
input_spaces = dict(parameters=param_space, deterministic=bool, values=values_space)
gumble_softmax_distribution = GumbelSoftmax(switched_off_apis={"kl_divergence", "entropy"}, temperature=1.0)
test = ComponentTest(component=gumble_softmax_distribution, input_spaces=input_spaces)
# Batch of size=2 and deterministic (True).
input_ = [param_space.sample(2), True]
expected = np.argmax(input_[0], axis=-1)
# Sample n times, expect always argmax value (deterministic draw).
for _ in range(50):
test.test(("draw", input_), expected_outputs=expected, decimals=5)
test.test(("sample_deterministic", tuple([input_[0]])), expected_outputs=expected, decimals=5)
# TODO: finish this test case, using an actual Gumble-Softmax distribution from the
# paper: https://arxiv.org/pdf/1611.01144.pdf.
return
# Batch of size=1 and non-deterministic -> expect roughly the mean.
input_ = [param_space.sample(1), False]
expected = "???"
outs = []
for _ in range(100):
out = test.test(("draw", input_))
outs.append(np.argmax(out, axis=-1))
out = test.test(("sample_stochastic", tuple([input_[0]])))
outs.append(np.argmax(out, axis=-1))
recursive_assert_almost_equal(np.mean(outs), expected.mean(), decimals=1)
# Test log-likelihood outputs.
means = np.array([[0.1, 0.2, 0.3, 0.4, 5.0]])
stds = np.array([[0.8, 0.2, 0.3, 2.0, 4.0]])
# Make sure values are within low and high.
values = np.array([[0.9, 0.2, 0.4, -0.1, -1.05]])
# TODO: understand and comment the following formula to get the log-prob.
# Unsquash values, then get log-llh from regular gaussian.
unsquashed_values = np.arctanh((values - low) / (high - low) * 2.0 - 1.0)
log_prob_unsquashed = np.log(norm.pdf(unsquashed_values, means, stds))
log_prob = log_prob_unsquashed - np.sum(np.log(1 - np.tanh(unsquashed_values) ** 2), axis=-1, keepdims=True)
test.test(("log_prob", [tuple([means, stds]), values]), expected_outputs=log_prob, decimals=4)
示例12: test_beta
# 需要導入模塊: from scipy.stats import beta [as 別名]
# 或者: from scipy.stats.beta import pdf [as 別名]
def test_beta(self):
input_space = Box(-2.0, 1.0, shape=(200, 10))
low, high = -1.0, 2.0
plain_beta_value_space = Box(0.0, 1.0, shape=(200, 5))
for fw, sess in framework_iterator(session=True):
cls = TorchBeta if fw == "torch" else Beta
inputs = input_space.sample()
beta_distribution = cls(inputs, {}, low=low, high=high)
inputs = beta_distribution.inputs
if sess:
inputs = sess.run(inputs)
else:
inputs = inputs.numpy()
alpha, beta_ = np.split(inputs, 2, axis=-1)
# Mean for a Beta distribution: 1 / [1 + (beta/alpha)]
expected = (1.0 / (1.0 + beta_ / alpha)) * (high - low) + low
# Sample n times, expect always mean value (deterministic draw).
out = beta_distribution.deterministic_sample()
check(out, expected, rtol=0.01)
# Batch of size=n and non-deterministic -> expect roughly the mean.
values = beta_distribution.sample()
if sess:
values = sess.run(values)
else:
values = values.numpy()
self.assertTrue(np.max(values) <= high)
self.assertTrue(np.min(values) >= low)
check(np.mean(values), expected.mean(), decimals=1)
# Test log-likelihood outputs (against scipy).
inputs = input_space.sample()
beta_distribution = cls(inputs, {}, low=low, high=high)
inputs = beta_distribution.inputs
if sess:
inputs = sess.run(inputs)
else:
inputs = inputs.numpy()
alpha, beta_ = np.split(inputs, 2, axis=-1)
values = plain_beta_value_space.sample()
values_scaled = values * (high - low) + low
if fw == "torch":
values_scaled = torch.Tensor(values_scaled)
out = beta_distribution.logp(values_scaled)
check(
out,
np.sum(np.log(beta.pdf(values, alpha, beta_)), -1),
rtol=0.001)
# TODO(sven): Test entropy outputs (against scipy).