本文整理汇总了Python中pymc3.Normal方法的典型用法代码示例。如果您正苦于以下问题:Python pymc3.Normal方法的具体用法?Python pymc3.Normal怎么用?Python pymc3.Normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pymc3
的用法示例。
在下文中一共展示了pymc3.Normal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: model
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def model(profiles, comparisons, selections, sample=2500, alpha_prior_std=10):
all_attributes = pd.get_dummies(profiles).columns
profiles_dummies = pd.get_dummies(profiles, drop_first=True)
choices = pd.concat({profile: profiles_dummies.loc[comparisons[profile]].reset_index(drop=True) for profile in comparisons.columns}, axis=1)
respondants = selections.columns
n_attributes_in_model = profiles_dummies.shape[1]
n_participants = selections.shape[1]
with pm.Model():
# https://www.sawtoothsoftware.com/download/ssiweb/CBCHB_Manual.pdf
# need to include the covariance matrix as a parent of `partsworth`
alpha = pm.Normal('alpha', 0, sd=alpha_prior_std, shape=n_attributes_in_model, testval=np.random.randn(n_attributes_in_model))
partsworth = pm.MvNormal("partsworth", alpha, tau=np.eye(n_attributes_in_model), shape=(n_participants, n_attributes_in_model))
cs = [_create_observation_variable(selection, choices, partsworth[i, :]) for i, (_, selection) in enumerate(selections.iteritems())]
trace = pm.sample(sample)
return transform_trace_to_individual_summary_statistics(trace, respondants, profiles_dummies.columns, all_attributes)
示例2: fit
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def fit(self, X, Y, n_samples=10000, tune_steps=1000, n_jobs=4):
with pm.Model() as self.model:
# Priors
std = pm.Uniform("std", 0, self.sps, testval=X.std())
beta = pm.StudentT("beta", mu=0, lam=self.sps, nu=self.nu)
alpha = pm.StudentT("alpha", mu=0, lam=self.sps, nu=self.nu, testval=Y.mean())
# Deterministic model
mean = pm.Deterministic("mean", alpha + beta * X)
# Posterior distribution
obs = pm.Normal("obs", mu=mean, sd=std, observed=Y)
## Run MCMC
# Find search start value with maximum a posterior estimation
start = pm.find_MAP()
# sample posterior distribution for latent variables
trace = pm.sample(n_samples, njobs=n_jobs, tune=tune_steps, start=start)
# Recover posterior samples
self.burned_trace = trace[int(n_samples / 2):]
示例3: tfp_schools_model
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def tfp_schools_model(num_schools, treatment_stddevs):
"""Non-centered eight schools model for tfp."""
import tensorflow_probability.python.edward2 as ed
import tensorflow as tf
if int(tf.__version__[0]) > 1:
import tensorflow.compat.v1 as tf # pylint: disable=import-error
tf.disable_v2_behavior()
avg_effect = ed.Normal(loc=0.0, scale=10.0, name="avg_effect") # `mu`
avg_stddev = ed.Normal(loc=5.0, scale=1.0, name="avg_stddev") # `log(tau)`
school_effects_standard = ed.Normal(
loc=tf.zeros(num_schools), scale=tf.ones(num_schools), name="school_effects_standard"
) # `eta`
school_effects = avg_effect + tf.exp(avg_stddev) * school_effects_standard # `theta`
treatment_effects = ed.Normal(
loc=school_effects, scale=treatment_stddevs, name="treatment_effects"
) # `y`
return treatment_effects
示例4: apply_parameters
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def apply_parameters(self, g, df, initialization_trace=None):
for node in nx.topological_sort(g):
parent_names = g.nodes()[node]["parent_names"]
if parent_names:
if not initialization_trace:
sd = np.array([df[node].std()] + (df[node].std() / df[parent_names].std()).tolist())
mu = np.array([df[node].std()] + (df[node].std() / df[parent_names].std()).tolist())
node_sd = df[node].std()
else:
node_sd = initialization_trace["{}_sd".format(node)].mean()
mu = initialization_trace["beta_{}".format(node)].mean(axis=0)
sd = initialization_trace["beta_{}".format(node)].std(axis=0)
g.nodes()[node]["parameters"] = pm.Normal("beta_{}".format(node), mu=mu, sd=sd,
shape=len(parent_names) + 1)
g.nodes()[node]["sd"] = pm.Exponential("{}_sd".format(node), lam=node_sd)
return g
示例5: build_bayesian_network
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def build_bayesian_network(self, g, df):
for node in nx.topological_sort(g):
if g.nodes()[node]["parent_names"]:
mu = g.nodes()[node]["parameters"][0] # intercept
mu += pm.math.dot(df[g.nodes()[node]["parent_names"]],
g.nodes()[node]["parameters"][1:])
if g.nodes()[node]["variable_type"] == 'c':
sd = g.nodes()[node]["sd"]
g.nodes()[node]["variable"] = pm.Normal("{}".format(node),
mu=mu, sd=sd,
observed=df[node])
elif g.nodes()[node]["variable_type"] == 'b':
g.nodes()[node]["variable"] = pm.Bernoulli("{}".format(node),
logit_p=mu,
observed=df[node])
else:
raise Exception("Unrecognized variable type: {}".format(g.nodes()[node]["variable_type"]))
return g
示例6: __get_weights
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def __get_weights(self, index, shape, scale = None):
return pm.Normal('w%d' % index, self.weight_loc, self.weight_scale, shape = shape)
示例7: __get_biases
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def __get_biases(self, index, shape, scale = None):
return pm.Normal('b%d' % index, self.weight_loc, self.weight_scale, shape = shape)
示例8: test_shape
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def test_shape(self):
spec = DistSpec(Normal, mu=0, sd=1)
spec2 = DistSpec(Normal, mu=0, sd=DistSpec(Lognormal, 0, 1))
with Model('layer'):
var = spec((100, 100), 'var')
var2 = spec2((100, 100), 'var2')
assert (var.init_value.shape == (100, 100))
assert (var.name.endswith('var'))
assert (var2.init_value.shape == (100, 100))
assert (var2.name.endswith('var2'))
示例9: test_expressions
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def test_expressions(expr):
with Model() as model:
var = expr((10, 10))
Normal('obs', observed=var)
assert var.tag.test_value.shape == (10, 10)
assert len(model.free_RVs) == 3
fit(1)
示例10: test_workflow
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def test_workflow(self):
inp = InputLayer(self.x.shape)
out = DenseLayer(inp, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity)
out = DenseLayer(out, 1, W=NormalSpec(sd=LognormalSpec()), nonlinearity=to.identity)
assert out.root is inp
with out:
pm.Normal('y', mu=get_output(out),
sd=self.sd,
observed=self.y)
示例11: from_posterior
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def from_posterior(param, samples, distribution = None, half = False, freedom=10):
if len(samples.shape)>1:
shape = samples.shape[1:]
else:
shape = None
if (distribution is None):
smin, smax = np.min(samples), np.max(samples)
width = smax - smin
x = np.linspace(smin, smax, 1000)
y = stats.gaussian_kde(samples)(x)
if half:
x = np.concatenate([x, [x[-1] + 0.1 * width]])
y = np.concatenate([y, [0]])
else:
x = np.concatenate([[x[0] - 0.1 * width], x, [x[-1] + 0.1 * width]])
y = np.concatenate([[0], y, [0]])
return pm.distributions.Interpolated(param, x, y)
elif (distribution=='normal'):
temp = stats.norm.fit(samples)
if shape is None:
return pm.Normal(param, mu=temp[0], sigma=freedom*temp[1])
else:
return pm.Normal(param, mu=temp[0], sigma=freedom*temp[1], shape=shape)
elif (distribution=='hnormal'):
temp = stats.halfnorm.fit(samples)
if shape is None:
return pm.HalfNormal(param, sigma=freedom*temp[1])
else:
return pm.HalfNormal(param, sigma=freedom*temp[1], shape=shape)
elif (distribution=='hcauchy'):
temp = stats.halfcauchy.fit(samples)
if shape is None:
return pm.HalfCauchy(param, freedom*temp[1])
else:
return pm.HalfCauchy(param, freedom*temp[1], shape=shape)
示例12: fit
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def fit(self, X, y):
"""Fit the Imputer to the dataset by fitting bayesian model.
Args:
X (pd.Dataframe): dataset to fit the imputer.
y (pd.Series): response, which is eventually imputed.
Returns:
self. Instance of the class.
"""
_not_num_series(self.strategy, y)
nc = len(X.columns)
# initialize model for bayesian linear reg. Default vals for priors
# assume data is scaled and centered. Convergence can struggle or fail
# if not the case and proper values for the priors are not specified
# separately, also assumes each beta is normal and "independent"
# while betas likely not independent, this is technically a rule of OLS
with pm.Model() as fit_model:
alpha = pm.Normal("alpha", self.am, sd=self.asd)
beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
sigma = pm.HalfCauchy("σ", self.sig)
mu = alpha+beta.dot(X.T)
score = pm.Normal("score", mu, sd=sigma, observed=y)
self.statistics_ = {"param": fit_model, "strategy": self.strategy}
return self
示例13: fit
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def fit(self, X, y):
"""Fit the Imputer to the dataset by fitting bayesian and LS model.
Args:
X (pd.Dataframe): dataset to fit the imputer.
y (pd.Series): response, which is eventually imputed.
Returns:
self. Instance of the class.
"""
_not_num_series(self.strategy, y)
nc = len(X.columns)
# get predictions for the data, which will be used for "closest" vals
y_pred = self.lm.fit(X, y).predict(X)
y_df = DataFrame({"y": y, "y_pred": y_pred})
# calculate bayes and use appropriate means for alpha and beta priors
# here we specify the point estimates from the linear regression as the
# means for the priors. This will greatly speed up posterior sampling
# and help ensure that convergence occurs
if self.am is None:
self.am = self.lm.intercept_
if self.bm is None:
self.bm = self.lm.coef_
# initialize model for bayesian linear reg. Default vals for priors
# assume data is scaled and centered. Convergence can struggle or fail
# if not the case and proper values for the priors are not specified
# separately, also assumes each beta is normal and "independent"
# while betas likely not independent, this is technically a rule of OLS
with pm.Model() as fit_model:
alpha = pm.Normal("alpha", self.am, sd=self.asd)
beta = pm.Normal("beta", self.bm, sd=self.bsd, shape=nc)
sigma = pm.HalfCauchy("σ", self.sig)
mu = alpha+beta.dot(X.T)
score = pm.Normal("score", mu, sd=sigma, observed=y)
params = {"model": fit_model, "y_obs": y_df}
self.statistics_ = {"param": params, "strategy": self.strategy}
return self
示例14: _pyro_noncentered_model
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def _pyro_noncentered_model(J, sigma, y=None):
import pyro
import pyro.distributions as dist
mu = pyro.sample("mu", dist.Normal(0, 5))
tau = pyro.sample("tau", dist.HalfCauchy(5))
with pyro.plate("J", J):
eta = pyro.sample("eta", dist.Normal(0, 1))
theta = mu + tau * eta
return pyro.sample("obs", dist.Normal(theta, sigma), obs=y)
示例15: _numpyro_noncentered_model
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Normal [as 别名]
def _numpyro_noncentered_model(J, sigma, y=None):
import numpyro
import numpyro.distributions as dist
mu = numpyro.sample("mu", dist.Normal(0, 5))
tau = numpyro.sample("tau", dist.HalfCauchy(5))
with numpyro.plate("J", J):
eta = numpyro.sample("eta", dist.Normal(0, 1))
theta = mu + tau * eta
return numpyro.sample("obs", dist.Normal(theta, sigma), obs=y)