本文整理汇总了Python中pymc3.Uniform方法的典型用法代码示例。如果您正苦于以下问题:Python pymc3.Uniform方法的具体用法?Python pymc3.Uniform怎么用?Python pymc3.Uniform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pymc3
的用法示例。
在下文中一共展示了pymc3.Uniform方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def fit(self, X, Y, n_samples=10000, tune_steps=1000, n_jobs=4):
with pm.Model() as self.model:
# Priors
std = pm.Uniform("std", 0, self.sps, testval=X.std())
beta = pm.StudentT("beta", mu=0, lam=self.sps, nu=self.nu)
alpha = pm.StudentT("alpha", mu=0, lam=self.sps, nu=self.nu, testval=Y.mean())
# Deterministic model
mean = pm.Deterministic("mean", alpha + beta * X)
# Posterior distribution
obs = pm.Normal("obs", mu=mean, sd=std, observed=Y)
## Run MCMC
# Find search start value with maximum a posterior estimation
start = pm.find_MAP()
# sample posterior distribution for latent variables
trace = pm.sample(n_samples, njobs=n_jobs, tune=tune_steps, start=start)
# Recover posterior samples
self.burned_trace = trace[int(n_samples / 2):]
示例2: ln_prob
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def ln_prob(self, sampler=None):
"""
Change ln_prob method to take in a Sampler and return a PyMC3
distribution.
"""
from bilby.core.sampler import Pymc3
if not isinstance(sampler, Pymc3):
raise ValueError("Sampler is not a bilby Pymc3 sampler object")
return pm.Uniform(self.name, lower=self.minimum,
upper=self.maximum)
# From hereon, the syntax is exactly equivalent to other bilby examples
# We make a prior
示例3: __init__
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def __init__(self):
super(FlatSpec, self).__init__(testval=init.Uniform(1))
示例4: fit
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def fit(self, X, y):
"""
Fits a Gaussian Process regressor using MCMC.
Parameters
----------
X: np.ndarray, shape=(nsamples, nfeatures)
Training instances to fit the GP.
y: np.ndarray, shape=(nsamples,)
Corresponding continuous target values to `X`.
"""
self.X = X
self.n = self.X.shape[0]
self.y = y
self.model = pm.Model()
with self.model as model:
l = pm.Uniform('l', 0, 10)
log_s2_f = pm.Uniform('log_s2_f', lower=-7, upper=5)
s2_f = pm.Deterministic('sigmaf', tt.exp(log_s2_f))
log_s2_n = pm.Uniform('log_s2_n', lower=-7, upper=5)
s2_n = pm.Deterministic('sigman', tt.exp(log_s2_n))
f_cov = s2_f * covariance_equivalence[type(self.covfunc).__name__](1, l)
Sigma = f_cov(self.X) + tt.eye(self.n) * s2_n ** 2
y_obs = pm.MvNormal('y_obs', mu=np.zeros(self.n), cov=Sigma, observed=self.y)
with self.model as model:
if self.step is not None:
self.trace = pm.sample(self.niter, step=self.step())[self.burnin:]
else:
self.trace = pm.sample(self.niter, init=self.init)[self.burnin:]
示例5: fit
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def fit(self, X, y):
"""
Fits a Student-t regressor using MCMC.
Parameters
----------
X: np.ndarray, shape=(nsamples, nfeatures)
Training instances to fit the GP.
y: np.ndarray, shape=(nsamples,)
Corresponding continuous target values to `X`.
"""
self.X = X
self.n = self.X.shape[0]
self.y = y
self.model = pm.Model()
with self.model as model:
l = pm.Uniform('l', 0, 10)
log_s2_f = pm.Uniform('log_s2_f', lower=-7, upper=5)
s2_f = pm.Deterministic('sigmaf', tt.exp(log_s2_f))
log_s2_n = pm.Uniform('log_s2_n', lower=-7, upper=5)
s2_n = pm.Deterministic('sigman', tt.exp(log_s2_n))
f_cov = s2_f * covariance_equivalence[type(self.covfunc).__name__](1, l)
Sigma = f_cov(self.X) + tt.eye(self.n) * s2_n ** 2
y_obs = pm.MvStudentT('y_obs', nu=self.nu, mu=np.zeros(self.n), Sigma=Sigma, observed=self.y)
with self.model as model:
if self.step is not None:
self.trace = pm.sample(self.niter, step=self.step())[self.burnin:]
else:
self.trace = pm.sample(self.niter, init=self.init)[self.burnin:]
示例6: sample
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def sample(
self, n_samples: int, beta: float = 1.):
problem = self.problem
log_post_fun = TheanoLogProbability(problem, beta)
trace = self.trace
x0 = None
if self.x0 is not None:
x0 = {x_name: val
for x_name, val in zip(self.problem.x_names, self.x0)}
# create model context
with pm.Model() as model:
# uniform bounds
k = [pm.Uniform(x_name, lower=lb, upper=ub)
for x_name, lb, ub in
zip(problem.get_reduced_vector(problem.x_names),
problem.lb, problem.ub)]
# convert to tensor vector
theta = tt.as_tensor_variable(k)
# use a DensityDist for the log-posterior
pm.DensityDist('log_post', logp=lambda v: log_post_fun(v),
observed={'v': theta})
# step, by default automatically determined by pymc3
step = None
if self.step_function:
step = self.step_function()
# perform the actual sampling
trace = pm.sample(
draws=int(n_samples), trace=trace, start=x0, step=step,
**self.options)
# convert trace to inference data object
data = az.from_pymc3(trace=trace, model=model)
self.trace = trace
self.data = data
示例7: __init__
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def __init__(self, minimum, maximum, name=None, latex_label=None):
"""
Uniform prior with bounds (should be equivalent to bilby.prior.Uniform)
"""
bilby.core.prior.Prior.__init__(self, name, latex_label,
minimum=minimum,
maximum=maximum)
示例8: __init__
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def __init__(
self,
learner_cls,
parameter_keys,
model_params,
fit_params,
model_path,
**kwargs,
):
self.priors = [
[pm.Normal, {"mu": 0, "sd": 10}],
[pm.Laplace, {"mu": 0, "b": 10}],
]
self.uniform_prior = [pm.Uniform, {"lower": -20, "upper": 20}]
self.prior_indices = np.arange(len(self.priors))
self.parameter_f = [
(pm.Normal, {"mu": 0, "sd": 5}),
(pm.Cauchy, {"alpha": 0, "beta": 1}),
0,
-5,
5,
]
self.parameter_s = [
(pm.HalfCauchy, {"beta": 1}),
(pm.HalfNormal, {"sd": 0.5}),
(pm.Exponential, {"lam": 0.5}),
(pm.Uniform, {"lower": 1, "upper": 10}),
10,
]
# ,(pm.HalfCauchy, {'beta': 2}), (pm.HalfNormal, {'sd': 1}),(pm.Exponential, {'lam': 1.0})]
self.learner_cls = learner_cls
self.model_params = model_params
self.fit_params = fit_params
self.parameter_keys = parameter_keys
self.parameters = list(product(self.parameter_f, self.parameter_s))
pf_arange = np.arange(len(self.parameter_f))
ps_arange = np.arange(len(self.parameter_s))
self.parameter_ind = list(product(pf_arange, ps_arange))
self.model_path = model_path
self.models = dict()
self.logger = logging.getLogger(ModelSelector.__name__)
示例9: construct_model
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def construct_model(self, X, Y):
"""
Constructs the nested logit model by applying priors on weight vectors **weights** as per :meth:`model_configuration`.
Then we apply a uniform prior to the :math:`\\lambda s`, i.e. :math:`\\lambda s \\sim Uniform(\\text{alpha}, 1.0)`.
The probability of choosing the object :math:`x_i` from the query set :math:`Q = \\{x_1, \\ldots ,x_n\\}` is
evaluated in :meth:`get_probabilities`.
Parameters
----------
X : numpy array
(n_instances, n_objects, n_features)
Feature vectors of the objects
Y : numpy array
(n_instances, n_objects)
Preferences in the form of discrete choices for given objects
Returns
-------
model : pymc3 Model :class:`pm.Model`
"""
self.loss_function_ = likelihood_dict.get(self.loss_function, None)
with pm.Model() as self.model:
self.Xt = theano.shared(X)
self.Yt = theano.shared(Y)
shapes = {"weights": self.n_object_features_fit_}
weights_dict = create_weight_dictionary(self.model_configuration, shapes)
lambda_k = pm.Uniform("lambda_k", self.alpha, 1.0, shape=self.n_nests)
utility = tt.dot(self.Xt, weights_dict["weights"])
self.p = self.get_probabilities(utility, lambda_k)
LogLikelihood(
"yl", loss_func=self.loss_function_, p=self.p, observed=self.Yt
)
self.logger.info("Model construction completed")
示例10: fit_cross_cov
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def fit_cross_cov(self, n_exp=2, n_gauss=2, range_mu=None):
"""
Fit an analytical covariance to the experimental data.
Args:
n_exp (int): number of exponential basic functions
n_gauss (int): number of gaussian basic functions
range_mu: prior mean of the range. Default mean of the lags
Returns:
pymc.Model: PyMC3 model to be sampled using MCMC
"""
self.n_exp = n_exp
self.n_gauss = n_gauss
n_var = self.n_properties
df = self.exp_var
lags = self.lags
# Prior standard deviation for the error of the regression
prior_std_reg = df.std(0).max() * 10
# Prior value for the mean of the ranges
if not range_mu:
range_mu = lags.mean()
# pymc3 Model
with pm.Model() as model: # model specifications in PyMC3 are wrapped in a with-statement
# Define priors
sigma = pm.HalfCauchy('sigma', beta=prior_std_reg, testval=1., shape=n_var)
psill = pm.Normal('sill', prior_std_reg, sd=.5 * prior_std_reg, shape=(n_exp + n_gauss))
range_ = pm.Normal('range', range_mu, sd=range_mu * .3, shape=(n_exp + n_gauss))
lambda_ = pm.Uniform('weights', 0, 1, shape=(n_var * (n_exp + n_gauss)))
# Exponential covariance
exp = pm.Deterministic('exp',
# (lambda_[:n_exp*n_var]*
psill[:n_exp] *
(1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)),
(range_[:n_exp].reshape((1, n_exp)) / 3.) ** -1))))
gauss = pm.Deterministic('gaus',
psill[n_exp:] *
(1. - T.exp(T.dot(-lags.values.reshape((len(lags), 1)) ** 2,
(range_[n_exp:].reshape((1, n_gauss)) * 4 / 7.) ** -2))))
# We stack the basic functions in the same matrix and tile it to match the number of properties we have
func = pm.Deterministic('func', T.tile(T.horizontal_stack(exp, gauss), (n_var, 1, 1)))
# We weight each basic function and sum them
func_w = pm.Deterministic("func_w", T.sum(func * lambda_.reshape((n_var, 1, (n_exp + n_gauss))), axis=2))
for e, cross in enumerate(df.columns):
# Likelihoods
pm.Normal(cross + "_like", mu=func_w[e], sd=sigma[e], observed=df[cross].values)
return model
示例11: construct_model
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def construct_model(self, X, Y):
"""
Constructs the nested logit model by applying priors on weight vectors **weights** and **weights_k** as per
:meth:`model_configuration`. Then we apply a uniform prior to the :math:`\\lambda s`, i.e.
:math:`\\lambda s \\sim Uniform(\\text{alpha}, 1.0)`.The probability of choosing the object :math:`x_i` from the
query set :math:`Q = \\{x_1, \\ldots ,x_n\\}` is evaluated in :meth:`get_probabilities`.
Parameters
----------
X : numpy array
(n_instances, n_objects, n_features)
Feature vectors of the objects
Y : numpy array
(n_instances, n_objects)
Preferences in the form of discrete choices for given objects
Returns
-------
model : pymc3 Model :class:`pm.Model`
"""
self.random_state_ = check_random_state(self.random_state)
self.loss_function_ = likelihood_dict.get(self.loss_function, None)
if np.prod(X.shape) > self.threshold:
upper_bound = int(self.threshold / np.prod(X.shape[1:]))
indices = self.random_state_.choice(X.shape[0], upper_bound, replace=False)
X = X[indices, :, :]
Y = Y[indices, :]
self.logger.info(
"Train Set instances {} objects {} features {}".format(*X.shape)
)
with pm.Model() as self.model:
self.Xt = theano.shared(X)
self.Yt = theano.shared(Y)
shapes = {
"weights": self.n_object_features_fit_,
"weights_ik": (self.n_object_features_fit_, self.n_nests),
}
weights_dict = create_weight_dictionary(self.model_configuration, shapes)
alpha_ik = tt.dot(self.Xt, weights_dict["weights_ik"])
alpha_ik = ttu.softmax(alpha_ik, axis=2)
utility = tt.dot(self.Xt, weights_dict["weights"])
lambda_k = pm.Uniform("lambda_k", self.alpha, 1.0, shape=self.n_nests)
self.p = self.get_probabilities(utility, lambda_k, alpha_ik)
LogLikelihood(
"yl", loss_func=self.loss_function_, p=self.p, observed=self.Yt
)
self.logger.info("Model construction completed")
示例12: fit
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def fit(self, X, Y):
model_args = dict()
for param_key in self.parameter_keys:
model_args[param_key] = self.uniform_prior
self.logger.info("Uniform Prior")
self.model_params["model_args"] = model_args
key = "{}_uniform_prior".format(self.parameter_keys)
self.fit_learner(X, Y, key)
for j, param in enumerate(self.parameters):
self.logger.info("mu: {}, sd/b: {}".format(*self.parameter_ind[j]))
if len(self.parameter_keys) == 2:
for i1, i2 in product(self.prior_indices, self.prior_indices):
prior1 = self.priors[i1]
prior2 = self.priors[i2]
self.logger.info("Priors {}, {}".format(i1, i2))
model_args = dict()
k1 = list(prior1[1].keys())
k2 = list(prior2[1].keys())
prior1[1] = dict(zip(k1, param))
prior2[1] = dict(zip(k2, param))
model_args[self.parameter_keys[0]] = prior1
model_args[self.parameter_keys[1]] = prior2
key = "{}_{}_{}_{}_mu_{}_sd_{}".format(
self.parameter_keys[0],
i1,
self.parameter_keys[1],
i2,
self.parameter_ind[j][0],
self.parameter_ind[j][1],
)
self.model_params["model_args"] = model_args
self.fit_learner(X, Y, key)
else:
for i, prior in enumerate(self.priors):
self.logger.info("Prior {}".format(i))
model_args = dict()
k1 = list(prior[1].keys())
prior[1] = dict(zip(k1, param))
model_args[self.parameter_keys[0]] = prior
self.model_params["model_args"] = model_args
key = "{}_{}_mu_{}_sd_{}".format(
self.parameter_keys[0],
i,
self.parameter_ind[j][0],
self.parameter_ind[j][1],
)
self.fit_learner(X, Y, key)
示例13: construct_model
# 需要导入模块: import pymc3 [as 别名]
# 或者: from pymc3 import Uniform [as 别名]
def construct_model(self, X, Y):
"""
Constructs the nested logit model by applying priors on weight vectors **weights** and **weights_k** as per
:meth:`model_configuration`. Then we apply a uniform prior to the :math:`\\lambda s`, i.e.
:math:`\\lambda s \\sim Uniform(\\text{alpha}, 1.0)`.The probability of choosing the object :math:`x_i` from
the query set :math:`Q = \\{x_1, \\ldots ,x_n\\}` is evaluated in :meth:`get_probabilities`.
Parameters
----------
X : numpy array
(n_instances, n_objects, n_features)
Feature vectors of the objects
Y : numpy array
(n_instances, n_objects)
Preferences in the form of discrete choices for given objects
Returns
-------
model : pymc3 Model :class:`pm.Model`
"""
self.loss_function_ = likelihood_dict.get(self.loss_function, None)
if np.prod(X.shape) > self.threshold:
upper_bound = int(self.threshold / np.prod(X.shape[1:]))
indices = self.random_state_.choice(X.shape[0], upper_bound, replace=False)
X = X[indices, :, :]
Y = Y[indices, :]
self.logger.info(
"Train Set instances {} objects {} features {}".format(*X.shape)
)
y_nests = self.create_nests(X)
with pm.Model() as self.model:
self.Xt = theano.shared(X)
self.Yt = theano.shared(Y)
self.y_nests = theano.shared(y_nests)
shapes = {
"weights": self.n_object_features_fit_,
"weights_k": self.n_object_features_fit_,
}
weights_dict = create_weight_dictionary(self.model_configuration, shapes)
lambda_k = pm.Uniform("lambda_k", self.alpha, 1.0, shape=self.n_nests)
weights = weights_dict["weights"] / lambda_k[:, None]
utility = self._eval_utility(weights)
utility_k = tt.dot(self.features_nests, weights_dict["weights_k"])
self.p = self.get_probabilities(utility, lambda_k, utility_k)
LogLikelihood(
"yl", loss_func=self.loss_function_, p=self.p, observed=self.Yt
)
self.logger.info("Model construction completed")