本文整理汇总了Python中pymc3.summary函数的典型用法代码示例。如果您正苦于以下问题:Python summary函数的具体用法?Python summary怎么用?Python summary使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了summary函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(n=5000):
with model_1:
xstart = pm.find_MAP()
xstep = pm.Slice()
trace = pm.sample(5000, xstep, xstart, random_seed=123, progressbar=True)
pm.summary(trace)
示例2: test_value_n_eff_rhat
def test_value_n_eff_rhat(self):
mu = -2.1
tau = 1.3
with Model():
Normal('x0', mu, tau, testval=floatX_array(.1)) # 0d
Normal('x1', mu, tau, shape=2, testval=floatX_array([.1, .1]))# 1d
Normal('x2', mu, tau, shape=(2, 2),
testval=floatX_array(np.tile(.1, (2, 2))))# 2d
Normal('x3', mu, tau, shape=(2, 2, 3),
testval=floatX_array(np.tile(.1, (2, 2, 3))))# 3d
trace = pm.sample(100, step=pm.Metropolis())
for varname in trace.varnames:
# test effective_n value
n_eff = pm.effective_n(trace, varnames=[varname])[varname]
n_eff_df = np.asarray(
pm.summary(trace, varnames=[varname])['n_eff']
).reshape(n_eff.shape)
npt.assert_equal(n_eff, n_eff_df)
# test Rhat value
rhat = pm.gelman_rubin(trace, varnames=[varname])[varname]
rhat_df = np.asarray(
pm.summary(trace, varnames=[varname])['Rhat']
).reshape(rhat.shape)
npt.assert_equal(rhat, rhat_df)
示例3: test_summary_1d_variable_model
def test_summary_1d_variable_model():
mu = -2.1
tau = 1.3
with Model() as model:
x = Normal('x', mu, tau, shape=2, testval=[.1, .1])
step = Metropolis(model.vars, np.diag([1.]), blocked=True)
trace = pm.sample(100, step=step)
pm.summary(trace)
示例4: test_summary_2d_variable_model
def test_summary_2d_variable_model(self):
mu = -2.1
tau = 1.3
with Model() as model:
Normal('x', mu, tau, shape=(2, 2),
testval=floatX_array(np.tile(.1, (2, 2))))
step = Metropolis(model.vars, np.diag([1.]), blocked=True)
trace = pm.sample(100, step=step)
pm.summary(trace)
示例5: __init__
def __init__(self,X_train,y_train,n_hidden,lam=1):
n_train = y_train.shape[0]
n_dim = X_train.shape[1]
print X_train.shape
with pm.Model() as rbfnn:
C = pm.Normal('C',mu=0,sd=10,shape=(n_hidden))
#beta = pm.Gamma('beta',1,1)
w = pm.Normal('w',mu=0,sd=10,shape=(n_hidden+1))
#component, updates = theano.scan(fn=lambda x: T.sum(C-x)**2,sequences=[X_train])
y_out=[]
for x in X_train:
#rbf_out = T.exp(-lam*T.sum((C-x)**2,axis=1))
#1d speed up
rbf_out = T.exp(-lam*(C-x)**2)
#rbf_out = theano.printing.Print(rbf_out)
rbf_out_biased = \
T.concatenate([ rbf_out, T.alloc(1,1) ], 0)
y_out.append(T.dot(w,rbf_out_biased))
y = pm.Normal('y',mu=y_out,sd=0.01,observed=y_train)
start = pm.find_MAP(fmin=scipy.optimize.fmin_l_bfgs_b)
print start
step = pm.NUTS(scaling=start)
trace = pm.sample(2000, step, progressbar=False)
step = pm.NUTS(scaling=trace[-1])
trace = pm.sample(20000,step,start=trace[-1])
print summary(trace, vars=['C', 'w'])
vars = trace.varnames
for i, v in enumerate(vars):
for d in trace.get_values(v, combine=False, squeeze=False):
d=np.squeeze(d)
with open(str(v)+".txt","w+") as thefile:
for item in d:
print>>thefile, item
traceplot(trace)
plt.show()
示例6: run
def run(n=1500):
if n == 'short':
n = 50
with m:
trace = pm.sample(n)
pm.traceplot(trace, varnames=['mu_hat'])
print('Example observed data: ')
print(y[:30, :].T)
print('The true ranking is: ')
print(yreal.flatten())
print('The Latent mean is: ')
latentmu = np.hstack(([0], pm.summary(trace, varnames=['mu_hat'])['mean'].values))
print(np.round(latentmu, 2))
print('The estimated ranking is: ')
print(np.argsort(latentmu))
示例7: print
import pymc3 as pm
import seaborn as sn
import matplotlib.pyplot as plt
with pm.Model() as model:
uniform = pm.Uniform('uniform', lower=0, upper=1)
normal = pm.Normal('normal', mu=0, sd=1)
beta = pm.Beta('beta', alpha=0.5, beta=0.5)
exponential = pm.Exponential('exponential', 1.0)
trace = pm.sample(2000)
print(pm.summary(trace).round(2))
pm.traceplot(trace)
plt.show()
示例8: get_garch_model
}
"""
def get_garch_model():
r = np.array([28, 8, -3, 7, -1, 1, 18, 12], dtype=np.float64)
sigma1 = np.array([15, 10, 16, 11, 9, 11, 10, 18], dtype=np.float64)
alpha0 = np.array([10, 10, 16, 8, 9, 11, 12, 18], dtype=np.float64)
shape = r.shape
with Model() as garch:
alpha1 = Uniform('alpha1', 0., 1., shape=shape)
beta1 = Uniform('beta1', 0., 1 - alpha1, shape=shape)
mu = Normal('mu', mu=0., sd=100., shape=shape)
theta = tt.sqrt(alpha0 + alpha1 * tt.pow(r - mu, 2) +
beta1 * tt.pow(sigma1, 2))
Normal('obs', mu, sd=theta, observed=r)
return garch
def run(n=1000):
if n == "short":
n = 50
with get_garch_model():
tr = sample(n, tune=1000)
return tr
if __name__ == '__main__':
summary(run())
示例9: mixed_effects
#.........这里部分代码省略.........
)
# Dependent Variable
BoundedNegativeBinomial = pm.Bound(pm.NegativeBinomial, lower=1)
y_est = BoundedNegativeBinomial('y_est', mu=mu, alpha=alpha, observed=y)
y_pred = BoundedNegativeBinomial('y_pred', mu=mu, alpha=alpha, shape=y.shape)
# y_est = pm.NegativeBinomial('y_est', mu=mu, alpha=alpha, observed=y)
# y_pred = pm.NegativeBinomial('y_pred', mu=mu, alpha=alpha, shape=y.shape)
# y_est = pm.Poisson('y_est', mu=mu, observed=data)
# y_pred = pm.Poisson('y_pred', mu=mu, shape=data.shape)
start = pm.find_MAP()
step = pm.Metropolis(start=start)
# step = pm.NUTS()
# backend = pm.backends.Text('test')
# trace = pm.sample(NSamples, step, start=start, chain=1, njobs=2, progressbar=True, trace=backend)
trace = pm.sample(NSamples, step, start=start, njobs=1, progressbar=True)
trace2 = trace
trace = trace[-burn::thin]
# waic = pm.waic(trace)
# dic = pm.dic(trace)
# with pm.Model() as model:
# trace_loaded = pm.backends.sqlite.load('FF49_industry.sqlite')
# y_pred.dump('FF49_industry_missing/y_pred')
## POSTERIOR PREDICTIVE CHECKS
y_pred = trace.get_values('y_pred')
pm.summary(trace, vars=covariates)
# PARAMETER POSTERIORS
anno_kwargs = {'xycoords': 'data', 'textcoords': 'offset points',
'rotation': 90, 'va': 'bottom', 'fontsize': 'large'}
anno_kwargs2 = {'xycoords': 'data', 'textcoords': 'offset points',
'rotation': 0, 'va': 'bottom', 'fontsize': 'large'}
n0, n1, n2, n3 = 1, 5, 9, 14 # numbering for posterior plots
# intercepts
# mn = pm.df_summary(trace)['mean']['Intercept_log__0']
# ax[0,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(0,15), color=blue, **anno_kwargs2)
# mn = pm.df_summary(trace)['mean']['Intercept_log__1']
# ax[0,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(0,15), color=purple, **anno_kwargs2)
# coeffs
# mn = pm.df_summary(trace)['mean'][2]
# ax[1,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(5, 10), color=red, **anno_kwargs)
# mn = pm.df_summary(trace)['mean'][3]
# ax[2,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(5,10), color=red, **anno_kwargs)
# mn = pm.df_summary(trace)['mean'][4]
# ax[3,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(5,10), color=red, **anno_kwargs)
# plt.savefig('figure1_mixed.png')
ax = pm.traceplot(trace, vars=['Intercept']+trace.varnames[n0:n1],
lines={k: v['mean'] for k, v in pm.df_summary(trace).iterrows()}
)
for i, mn in enumerate(pm.df_summary(trace)['mean'][n0:n1]): # +1 because up and down intercept
ax[i,0].annotate('{:.3f}'.format(mn), xy=(mn,0), xytext=(5,10), color=red, **anno_kwargs)
plt.savefig('figure1_mixed.png')
示例10: posterior_summary
def posterior_summary(self, **kwargs):
return pm.summary(self.posterior_, **kwargs)
示例11: print
with mdl_ols:
## find MAP using Powell, seems to be more robust
t1 = time.time()
start_MAP = pm.find_MAP(fmin=optimize.fmin_powell)
t2 = time.time()
print("Found MAP, took %f seconds" % (t2 - t1))
## take samples
t1 = time.time()
traces_ols = pm.sample(2000, start=start_MAP, step=pm.NUTS(), progressbar=True)
print()
t2 = time.time()
print("Done sampling, took %f seconds" % (t2 - t1))
pm.summary(traces_ols)
## plot the samples and the marginal distributions
_ = pm.traceplot(
traces_ols,
figsize=(12, len(traces_ols.varnames) * 1.5),
lines={k: v["mean"] for k, v in pm.df_summary(traces_ols).iterrows()},
)
plt.show()
do_tstudent = False
if do_tstudent:
print("Robust Student-t analysis...")
示例12: print
print(map_estimate)
from pymc3 import NUTS, sample
from pymc3 import traceplot
with basic_model:
# obtain starting values via MAP
start = find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = NUTS(scaling=start)
# draw 2000 posterior samples
trace = sample(2000, step, start=start)
trace['alpha'][-5:]
traceplot(trace)
plt.show()
from pymc3 import summary
summary(trace)
n = 500
p = 0.3
with Model():
x = Normal('alpha', mu=0, sd=10)
print type(x)
示例13: print
else:
fit_results = np.array([out.values['decay']*delta_t,
np.sqrt(out.covar[0,0])*delta_t,
out.values['amplitude'],
np.sqrt(out.covar[1,1])])
print(out.fit_report(min_correl=0.25))
trace = sm.run(x=data,
aB=alpha_B,
bB=beta_B,
aA=alpha_A,
bA=beta_A,
delta_t=delta_t,
N=N)
pm.summary(trace)
traceB_results = np.percentile(trace['B'],(2.5,25,50,75,97.5))
traceB_results = np.concatenate((traceB_results, [np.std(trace['B'])], [np.mean(trace['B'])]))
traceA_results=np.percentile(trace['A'],(2.5,25,50,75,97.5))
traceA_results = np.concatenate((traceA_results, [np.std(trace['A'])], [np.mean(trace['A'])]))
results = np.concatenate((data_results, fit_results, traceB_results, traceA_results))
print(results)
if result_array is None:
result_array = results
else:
result_array = np.vstack((result_array, results))
示例14: run
def run(n=5000):
with model_1:
trace = pm.sample(n)
pm.summary(trace)
示例15: two_gaussians
log_like2 = - 0.5 * n * tt.log(2 * np.pi) \
- 0.5 * tt.log(dsigma) \
- 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))
with pm.Model() as ATMIP_test:
X = pm.Uniform('X',
shape=n,
lower=-2. * np.ones_like(mu1),
upper=2. * np.ones_like(mu1),
testval=-1. * np.ones_like(mu1),
transform=None)
like = pm.Deterministic('like', two_gaussians(X))
llk = pm.Potential('like', like)
with ATMIP_test:
step = atmcmc.ATMCMC(n_chains=n_chains, tune_interval=tune_interval,
likelihood_name=ATMIP_test.deterministics[0].name)
trcs = atmcmc.ATMIP_sample(
n_steps=n_steps,
step=step,
njobs=njobs,
progressbar=True,
trace=test_folder,
model=ATMIP_test)
pm.summary(trcs)
Pltr = pm.traceplot(trcs, combined=True)
plt.show(Pltr[0][0])