本文整理汇总了Python中pymc3.find_MAP函数的典型用法代码示例。如果您正苦于以下问题:Python find_MAP函数的具体用法?Python find_MAP怎么用?Python find_MAP使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了find_MAP函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _fit_time_series_model
def _fit_time_series_model(self, signal, target, samples):
model_randomwalk = pm.Model()
with model_randomwalk:
sigma_alpha = pm.Exponential('sigma_alpha', 1. / .02, testval=.1)
sigma_beta = pm.Exponential('sigma_beta', 1. / .02, testval=.1)
alpha = GaussianRandomWalk('alpha', sigma_alpha ** -2, shape=len(tar))
beta = GaussianRandomWalk('beta', sigma_beta ** -2, shape=len(tar))
# Define regression
regression = alpha + beta * rev.values
# Assume prices are Normally distributed, the mean comes from the regression.
sd = pm.Uniform('sd', 0, 20)
likelihood = pm.Normal('y',
mu=regression,
sd=sd,
observed=tar.values)
# First optimize random walk
start = pm.find_MAP(vars=[alpha, beta], fmin=optimize.fmin_l_bfgs_b)
step = pm.NUTS(scaling=start)
trace = pm.sample(10, step, start)
# Sample
start2 = trace.point(-1)
step = pm.NUTS(scaling=start2)
trace_rw = pm.sample(samples, step, start=start)
示例2: init_nuts
def init_nuts(init='advi', n_init=500000, model=None, **kwargs):
"""Initialize and sample from posterior of a continuous model.
This is a convenience function. NUTS convergence and sampling speed is extremely
dependent on the choice of mass/scaling matrix. In our experience, using ADVI
to estimate a diagonal covariance matrix and using this as the scaling matrix
produces robust results over a wide class of continuous models.
Parameters
----------
init : str {'advi', 'advi_map', 'map', 'nuts'}
Initialization method to use.
* advi : Run ADVI to estimate posterior mean and diagonal covariance matrix.
* advi_map: Initialize ADVI with MAP and use MAP as starting point.
* map : Use the MAP as starting point.
* nuts : Run NUTS and estimate posterior mean and covariance matrix.
n_init : int
Number of iterations of initializer
If 'advi', number of iterations, if 'metropolis', number of draws.
model : Model (optional if in `with` context)
**kwargs : keyword arguments
Extra keyword arguments are forwarded to pymc3.NUTS.
Returns
-------
start, nuts_sampler
start : pymc3.model.Point
Starting point for sampler
nuts_sampler : pymc3.step_methods.NUTS
Instantiated and initialized NUTS sampler object
"""
model = pm.modelcontext(model)
pm._log.info('Initializing NUTS using {}...'.format(init))
if init == 'advi':
v_params = pm.variational.advi(n=n_init)
start = pm.variational.sample_vp(v_params, 1, progressbar=False)[0]
cov = np.power(model.dict_to_array(v_params.stds), 2)
elif init == 'advi_map':
start = pm.find_MAP()
v_params = pm.variational.advi(n=n_init, start=start)
cov = np.power(model.dict_to_array(v_params.stds), 2)
elif init == 'map':
start = pm.find_MAP()
cov = pm.find_hessian(point=start)
elif init == 'nuts':
init_trace = pm.sample(step=pm.NUTS(), draws=n_init)
cov = pm.trace_cov(init_trace[n_init//2:])
start = {varname: np.mean(init_trace[varname]) for varname in init_trace.varnames}
else:
raise NotImplemented('Initializer {} is not supported.'.format(init))
step = pm.NUTS(scaling=cov, is_cov=True, **kwargs)
return start, step
示例3: fit
def fit(self, x, y, mcmc_samples=1000):
t = x.shape[0] - 1 # number of additive components
varnames = ["xc", "w", "decay", "sigma", "b", "lam"]
with pm.Model() as model:
# Priors for additive predictor
w = pm.Normal("w", mu=0, sd=1, shape=t)
decay = pm.HalfNormal("decay", sd=200, shape=t)
# Prior for likelihood
sigma = pm.Uniform("sigma", 0, 0.3)
b = pm.Normal("b", mu=0, sd=20)
lam = pm.Uniform("lam", 0, 0.3)
# Building linear predictor
lin_pred = 0
for ii in range(1, t + 1):
lin_pred += self.bias(w[ii - 1], decay[ii - 1])(x[ii, :])
phi2 = pm.Deterministic("phi2", 0.5 * lam + (1 - lam) * phi(b + lin_pred + x[0, :] / sigma))
y = pm.Bernoulli("y", p=phi2, observed=y)
with model:
# Inference
start = pm.find_MAP() # Find starting value by optimization
print("MAP found:")
# step = pm.NUTS(scaling = start)
# step = pm.Slice()
step = pm.NUTS(scaling=start)
trace = pm.sample(mcmc_samples, step, start=start, progressbar=True) # draw posterior samples
return trace, model
示例4: model_returns_t
def model_returns_t(data, samples=500):
"""Run Bayesian model assuming returns are normally distributed.
Parameters
----------
returns : pandas.Series
Series of simple returns of an algorithm or stock.
samples : int, optional
Number of posterior samples to draw.
Returns
-------
pymc3.sampling.BaseTrace object
A PyMC3 trace object that contains samples for each parameter
of the posterior.
"""
with pm.Model():
mu = pm.Normal('mean returns', mu=0, sd=.01, testval=data.mean())
sigma = pm.HalfCauchy('volatility', beta=1, testval=data.std())
nu = pm.Exponential('nu_minus_two', 1. / 10., testval=3.)
returns = pm.T('returns', nu=nu + 2, mu=mu, sd=sigma, observed=data)
pm.Deterministic('annual volatility',
returns.distribution.variance**.5 * np.sqrt(252))
pm.Deterministic('sharpe', returns.distribution.mean /
returns.distribution.variance**.5 *
np.sqrt(252))
start = pm.find_MAP(fmin=sp.optimize.fmin_powell)
step = pm.NUTS(scaling=start)
trace = pm.sample(samples, step, start=start)
return trace
示例5: fit
def fit(self,xdata,ydata,yerr,arange=[-100.,100],brange=[-100.,100]):
trace = None
with pm.Model() as model:
# alpha = pm.Normal('alpha', mu=1.0e7, sd=1.0e6)
# beta = pm.Normal('beta', mu=1.0e7, sd=1.0e6)
# sigma = pm.Uniform('sigma', lower=0, upper=20)
alpha = pm.Uniform('alpha', lower=arange[0], upper=arange[1])
beta = pm.Uniform('beta', lower=brange[0], upper=brange[1])
sigma = yerr
y_est = alpha + beta * xdata
likelihood = pm.Normal('y', mu=y_est, sd=sigma, observed=ydata)
# obtain starting values via MAP
start = pm.find_MAP()
step = pm.NUTS(state=start)
trace = pm.sample(2000, step, start=start, progressbar=False)
# pm.traceplot(trace)
# plt.show()
# pprint(trace['alpha'].mean())
# pprint(trace['alpha'].std())
# print pm.summary(trace)
# print pm.summary(trace, ['alpha'])
# print pm.stats()
# print(trace.__dict__)
# Return the traces
return [trace['alpha'], trace['beta']]
示例6: sample_pymc3
def sample_pymc3(d, samples=2000, njobs=2):
with pm.Model() as model:
dfc = pm.Normal(mu=0.0, sd=d['sigma_fc'], name='dfc')
Q = pm.Gamma(mu=d['mu_Q'], sd=d['sigma_Q'], name='Q')
Pdet = pm.Gamma(mu=d['mu_Pdet'], sd=d['sigma_Pdet'], name='Pdet')
kc = pm.Gamma(mu=d['mu_kc'], sd=d['sigma_kc'], name='kc')
M = d['M']
T = d['T']
scale=d['scale']
mu_fc = d['mu_fc']
f = d['f']
like = pm.Gamma(alpha=M, beta=(M/(((2 * 1.381e-5 * T) / (np.pi * Q * kc)) / scale * (dfc + mu_fc)**3 /
((f * f - (dfc + mu_fc)**2) * (f * f - (dfc + mu_fc)**2) + f * f * (dfc + mu_fc)**2 / Q**2)
+ Pdet)),
observed=d['y'],
name='like')
start = pm.find_MAP()
step = pm.NUTS(state=start)
trace = pm.sample(samples, step=step, start=start, progressbar=True, njobs=njobs)
return trace
示例7: lin_fit
def lin_fit(t, y, yerr=None, samples=10000, sampler="NUTS", alphalims=[-100,100]):
"""
Bayesian linear fitting function.
See Jake Vanderplas' blog post on how to be a
bayesian in python for more details
uses pymc3 MCMC sampling
inputs:
t :: Vector of values at which the function is evaluated ("x" values)
y :: Vector of dependent values (observed y(t))
yerr (optional = None) :: Errors on y values. If not provided, errors are taken to be the same for each dta point,
with a 1/sigma (jefferys) prior.
samples (optional = 1000) :: Number of samples to draw from MCMC
sampler (optional = "NUTS") :: Type of MCMC sampler to use. "NUTS" or "Metropolis"
alphalims (optional = [-100,100]) :: Length 2 vector of endpoints for uniform prior on intercept of the line
"""
with pm.Model() as model:
#Use uninformative priors on slope/intercept of line
alpha = pm.Uniform('alpha',alphalims[0],alphalims[1])
#this defines an uninformative prior on slope. See Jake's blog post
beta = pm.DensityDist('beta',lambda value: -1.5 * T.log(1 + value**2.),testval=0)
#if yerr not given, assume all values have same errorbar
if yerr is None:
sigma = pm.DensityDist('sigma', lambda value: -T.log(T.abs_(value)),testval=1)
else:
sigma = yerr
like = pm.Normal('likelihood',mu=alpha+beta*t, sd=sigma, observed=y)
#start the sampler at the maximum a-posteriori value
start = pm.find_MAP()
step = select_sampler(sampler,start)
trace = pm.sample(draws=samples,start=start,step=step)
return trace
示例8: test_linear_component
def test_linear_component(self):
vars_to_create = {
'sigma',
'sigma_interval__',
'y_obs',
'lm_x0',
'lm_Intercept'
}
with Model() as model:
lm = LinearComponent(
self.data_linear['x'],
self.data_linear['y'],
name='lm'
) # yields lm_x0, lm_Intercept
sigma = Uniform('sigma', 0, 20) # yields sigma_interval__
Normal('y_obs', mu=lm.y_est, sigma=sigma, observed=self.y_linear) # yields y_obs
start = find_MAP(vars=[sigma])
step = Slice(model.vars)
trace = sample(500, tune=0, step=step, start=start,
progressbar=False, random_seed=self.random_seed)
assert round(abs(np.mean(trace['lm_Intercept'])-self.intercept), 1) == 0
assert round(abs(np.mean(trace['lm_x0'])-self.slope), 1) == 0
assert round(abs(np.mean(trace['sigma'])-self.sd), 1) == 0
assert vars_to_create == set(model.named_vars.keys())
示例9: run
def run(self, samples=1000, find_map=True, verbose=True, step='nuts',
burn=0.5, **kwargs):
''' Run the model.
Args:
samples (int): Number of MCMC samples to generate
find_map (bool): passed to find_map argument of pm.sample()
verbose (bool): if True, prints additional information
step (str or PyMC3 Sampler): either an instantiated PyMC3 sampler,
or the name of the sampler to use (either 'nuts' or
'metropolis').
start: Optional starting point to pass onto sampler.
burn (int or float): Number or proportion of samples to treat as
burn-in; passed onto the BayesianModelResults instance returned
by this method.
kwargs (dict): optional keyword arguments passed on to the sampler.
Returns: an instance of class BayesianModelResults.
'''
with self.model:
njobs = kwargs.pop('njobs', 1)
start = kwargs.pop('start', pm.find_MAP() if find_map else None)
chain = kwargs.pop('chain', 0)
if isinstance(step, string_types):
step = {
'nuts': pm.NUTS,
'metropolis': pm.Metropolis
}[step.lower()](**kwargs)
self.start = start
trace = pm.sample(
samples, start=start, step=step, progressbar=verbose, njobs=njobs, chain=chain)
self.last_trace = trace # for convenience
return BayesianModelResults(trace)
示例10: run
def run(n=5000):
with model_1:
xstart = pm.find_MAP()
xstep = pm.Slice()
trace = pm.sample(5000, xstep, xstart, random_seed=123, progressbar=True)
pm.summary(trace)
示例11: model_returns_t_alpha_beta
def model_returns_t_alpha_beta(data, bmark, samples=2000):
"""Run Bayesian alpha-beta-model with T distributed returns.
This model estimates intercept (alpha) and slope (beta) of two
return sets. Usually, these will be algorithm returns and
benchmark returns (e.g. S&P500). The data is assumed to be T
distributed and thus is robust to outliers and takes tail events
into account.
Parameters
----------
returns : pandas.Series
Series of simple returns of an algorithm or stock.
bmark : pandas.Series
Series of simple returns of a benchmark like the S&P500.
If bmark has more recent returns than returns_train, these dates
will be treated as missing values and predictions will be
generated for them taking market correlations into account.
samples : int (optional)
Number of posterior samples to draw.
Returns
-------
pymc3.sampling.BaseTrace object
A PyMC3 trace object that contains samples for each parameter
of the posterior.
"""
if len(data) != len(bmark):
# pad missing data
data = pd.Series(data, index=bmark.index)
data_no_missing = data.dropna()
with pm.Model():
sigma = pm.HalfCauchy(
'sigma',
beta=1,
testval=data_no_missing.values.std())
nu = pm.Exponential('nu_minus_two', 1. / 10., testval=.3)
# alpha and beta
beta_init, alpha_init = sp.stats.linregress(
bmark.loc[data_no_missing.index],
data_no_missing)[:2]
alpha_reg = pm.Normal('alpha', mu=0, sd=.1, testval=alpha_init)
beta_reg = pm.Normal('beta', mu=0, sd=1, testval=beta_init)
pm.T('returns',
nu=nu + 2,
mu=alpha_reg + beta_reg * bmark,
sd=sigma,
observed=data)
start = pm.find_MAP(fmin=sp.optimize.fmin_powell)
step = pm.NUTS(scaling=start)
trace = pm.sample(samples, step, start=start)
return trace
示例12: run
def run(n=1000):
if n == "short":
n = 50
with model:
start = pm.find_MAP()
step = pm.NUTS(scaling=start)
trace = pm.sample(n, step=step, start=start)
return trace
示例13: _inference
def _inference(self, reinit=True):
with self.cached_model:
if reinit or (self.cached_start is None) or (self.cached_sampler is None):
self.cached_start = pm.find_MAP(fmin=sp.optimize.fmin_powell)
self.cached_sampler = pm.NUTS(scaling=self.cached_start)
trace = pm.sample(self.samples, self.cached_sampler, start=self.cached_start)
return trace
示例14: learn_model
def learn_model(model, draws=50000):
with model:
start = pm.find_MAP()
#step = pm.Slice() # It is very slow when the model has many parameters
#step = pm.HamiltonianMC(scaling=start) # It leads to constant samples
#step = pm.NUTS(scaling=start) # It leads to constant samples
step = pm.Metropolis()
trace = pm.sample(draws, step, start=start)
return trace
示例15: fit
def fit(self, X, y, sampling_iterations):
X = self._force_shape(X)
self.input_data_dimension = len(X[0])
model, w, b = self._build_model(X, y)
with model:
self.map_estimate = pymc3.find_MAP(model=model, vars=[w, b])
step = pymc3.NUTS(scaling=self.map_estimate)
trace = pymc3.sample(sampling_iterations, step, start=self.map_estimate)
self.samples = trace