本文整理汇总了Python中pymc.MCMC.trace方法的典型用法代码示例。如果您正苦于以下问题:Python MCMC.trace方法的具体用法?Python MCMC.trace怎么用?Python MCMC.trace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pymc.MCMC
的用法示例。
在下文中一共展示了MCMC.trace方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: estimate_failures
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def estimate_failures(samples, #samples from noisy labelers
n_samples=10000, #number of samples to run MCMC for
burn=None, #burn-in. Defaults to n_samples/2
thin=10, #thinning rate. Sample every k samples from markov chain
alpha_p=1, beta_p=1, #beta parameters for true positive rate
alpha_e=1, beta_e=10 #beta parameters for noise rates
):
if burn is None:
burn = n_samples / 2
S,N = samples.shape
p = Beta('p', alpha=alpha_p, beta=beta_p) #prior on true label
l = Bernoulli('l', p=p, size=S)
e_pos = Beta('e_pos', alpha_e, beta_e, size=N) # error rate if label = 1
e_neg = Beta('e_neg', alpha_e, beta_e, size=N) # error rate if label = 0
@deterministic(plot=False)
def noise_rate(l=l, e_pos=e_pos, e_neg=e_neg):
#probability that a noisy labeler puts a label 1
return np.outer(l, 1-e_pos) + np.outer(1-l, e_neg)
noisy_label = Bernoulli('noisy_label', p=noise_rate, size=samples.shape, value=samples, observed=True)
variables = [l, e_pos, e_neg, p, noisy_label, noise_rate]
model = MCMC(variables, verbose=3)
model.sample(iter=n_samples, burn=burn, thin=thin)
model.write_csv('out.csv', ['p', 'e_pos', 'e_neg'])
p = np.median(model.trace('p')[:])
e_pos = np.median(model.trace('e_pos')[:],0)
e_neg = np.median(model.trace('e_neg')[:],0)
return p, e_pos, e_neg
示例2: analizeMwm
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def analizeMwm():
masked_values = np.ma.masked_equal(x, value=None)
print("m v: ", masked_values)
print("dmwm da: ", dmwm.disasters_array)
Mwm = MCMC(dmwm)
Mwm.sample(iter=10000, burn=1000, thin=10)
print("Mwm t: ", Mwm.trace('switchpoint')[:])
hist(Mwm.trace('late_mean')[:])
# show()
plot(Mwm)
示例3: test_nd
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def test_nd(self):
M = MCMC([self.NDstoch()], db=self.name, dbname=os.path.join(testdir, 'ND.'+self.name), dbmode='w')
M.sample(10, progress_bar=0)
a = M.trace('nd')[:]
assert_equal(a.shape, (10,2,2))
db = getattr(pymc.database, self.name).load(os.path.join(testdir, 'ND.'+self.name))
assert_equal(db.trace('nd')[:], a)
示例4: test_simple
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def test_simple(self):
# Priors
mu = Normal('mu', mu=0, tau=0.0001)
s = Uniform('s', lower=0, upper=100, value=10)
tau = s ** -2
# Likelihood with missing data
x = Normal('x', mu=mu, tau=tau, value=m, observed=True)
# Instantiate sampler
M = MCMC([mu, s, tau, x])
# Run sampler
M.sample(10000, 5000, progress_bar=0)
# Check length of value
assert_equal(len(x.value), 100)
# Check size of trace
tr = M.trace('x')()
assert_equal(shape(tr), (5000, 2))
sd2 = [-2 < i < 2 for i in ravel(tr)]
# Check for standard normal output
assert_almost_equal(sum(sd2) / 10000., 0.95, decimal=1)
示例5: bimodal_gauss
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def bimodal_gauss(data,pm,dmin=0.3):
'''run MCMC to get regression on bimodal normal distribution'''
size = len(data[pm])
### set up model
p = Uniform( "p", 0.2 , 0.8) #this is the fraction that come from mean1 vs mean2
# p = distributions.truncated_normal_like('p', mu=0.5, tau=0.001, a=0., b=1.)
# p = Normal( 'p', mu=(1.*sum(comp0==1))/size, tau=1./0.1**2 ) # attention: wings!, tau = 1/sig^2
# p = Normal( 'p', mu=0.5, tau=1./0.1**2 ) # attention: wings!, tau = 1/sig^2
ber = Bernoulli( "ber", p = p, size = size) # produces 1 with proportion p
precision = Gamma('precision', alpha=0.01, beta=0.01)
mean1 = Uniform( "mean1", -0.5, 1.0) # if not truncated
sig1 = Uniform( 'sig1', 0.01, 1.)
mean2 = Uniform( "mean2", mean1 + dmin, 1.5)
sig2 = Uniform( 'sig2', 0.01, 1.)
pop1 = Normal( 'pop1', mean1, 1./sig1**2) # tau is 1/sig^2
pop2 = Normal( 'pop2', mean2, 1./sig2**2)
@deterministic
def bimod(ber = ber, pop1 = pop1, pop2 = pop2): # value determined from parents completely
return ber*pop1 + (1-ber)*pop2
obs = Normal( "obs", bimod, precision, value = data[pm], observed = True)
model = Model( {"p":p, "precision": precision, "mean1": mean1, 'sig1': sig1, "mean2":mean2, 'sig2':sig2, "obs":obs} )
from pymc import MCMC, Matplot
M = MCMC(locals(), db='pickle', dbname='metals.pickle')
iter = 10000; burn = 9000; thin = 10
M.sample(iter=iter, burn=burn, thin=thin)
M.db.commit()
mu1 = np.mean(M.trace('mean1')[:])
sig1= np.mean(M.trace('sig1')[:])
mu2 = np.mean(M.trace('mean2')[:])
sig2= np.mean(M.trace('sig2')[:])
p = np.mean(M.trace('p')[:])
return p, mu1, sig1, mu2, sig2, M
示例6: analizeM
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def analizeM():
M = MCMC(dm)
print("M: ", M)
M.sample(iter=10000, burn=1000, thin=10)
print("M t: ", M.trace('switchpoint')[:])
hist(M.trace('late_mean')[:])
# show()
plot(M)
# show()
print("M smd dm sp: ", M.step_method_dict[dm.switchpoint])
print("M smd dm em: ", M.step_method_dict[dm.early_mean])
print("M smd dm lm: ", M.step_method_dict[dm.late_mean])
M.use_step_method(Metropolis, dm.late_mean, proposal_sd=2.)
示例7: bimodal_gauss
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def bimodal_gauss(data,pm):
'''run MCMC to get regression on bimodal normal distribution'''
m1 = np.mean(data[pm])/2.
m2 = np.mean(data[pm])*2.
dm = m2 - m1
size = len(data[pm])
### set up model
p = Uniform( "p", 0.2 , 0.8) #this is the fraction that come from mean1 vs mean2
# p = distributions.truncated_normal_like('p', mu=0.5, tau=0.001, a=0., b=1.)
# p = Normal( 'p', mu=(1.*sum(comp0==1))/size, tau=1./0.1**2 ) # attention: wings!, tau = 1/sig^2
# p = Normal( 'p', mu=0.5, tau=1./0.1**2 ) # attention: wings!, tau = 1/sig^2
ber = Bernoulli( "ber", p = p, size = size) # produces 1 with proportion p
precision = Gamma('precision', alpha=0.01, beta=0.01)
dmu = Normal( 'dmu', dm, tau=1./0.05**2 ) # [PS] give difference between means, finite
# dmu = Lognormal( 'dmu', 0.3, tau=1./0.1**2)
mean1 = Normal( "mean1", mu = m1, tau = 1./0.1**2 ) # better to use Normals versus Uniforms,
# if not truncated
mean2 = Normal( "mean2", mu = mean1 + dmu, tau = 1./0.1**2 ) # tau is 1/sig^2
@deterministic
def mean( ber = ber, mean1 = mean1, mean2 = mean2):
return ber*mean1 + (1-ber)*mean2
obs = Normal( "obs", mean, precision, value = data[pm], observed = True)
model = Model( {"p":p, "precision": precision, "mean1": mean1, "mean2":mean2, "obs":obs} )
from pymc import MCMC, Matplot
M = MCMC(locals(), db='pickle', dbname='metals.pickle')
iter = 3000; burn = 2000; thin = 10
M.sample(iter=iter, burn=burn, thin=thin)
M.db.commit()
mu1 = np.mean(M.trace('mean1')[:])
mu2 = np.mean(M.trace('mean2')[:])
p = np.mean(M.trace('p')[:])
return p, mu1, 0.1, mu2, 0.1, M
示例8: test_zcompression
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def test_zcompression(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore')
db = pymc.database.hdf5.Database(dbname=os.path.join(testdir, 'disaster_modelCompressed.hdf5'),
dbmode='w',
dbcomplevel=5)
S = MCMC(disaster_model, db=db)
S.sample(45,10,1, progress_bar=0)
assert_array_equal(S.trace('early_mean')[:].shape, (35,))
S.db.close()
db.close()
del S
示例9: estimate_failures_from_counts
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def estimate_failures_from_counts(counts, #samples from noisy labelers
n_samples=10000, #number of samples to run MCMC for
burn=None, #burn-in. Defaults to n_samples/2
thin=10, #thinning rate. Sample every k samples from markov chain
alpha_p=1, beta_p=1, #beta parameters for true positive rate
alpha_e=1, beta_e=10 #beta parameters for noise rates
):
if burn is None:
burn = n_samples / 2
S = counts.sum()
N = len(counts.shape)
p_label = Beta('p_label', alpha=alpha_p, beta=beta_p) #prior on true label
e_pos = Beta('e_pos', alpha_e, beta_e, size=N) # error rate if label = 1
e_neg = Beta('e_neg', alpha_e, beta_e, size=N) # error rate if label = 0
print counts
@deterministic(plot=False)
def patterns(p_label=p_label, e_pos=e_pos, e_neg=e_neg):
#probability that the noisy labelers output pattern p
P = np.zeros((2,)*N)
for pat in itertools.product([0,1], repeat=N):
P[pat] = p_label*np.product([1-e_pos[i] if pat[i]==1 else e_pos[i] for i in xrange(N)])
P[pat] += (1-p_label)*np.product([e_neg[i] if pat[i]==1 else 1-e_neg[i] for i in xrange(N)])
assert np.abs(P.sum() - 1) < 1e-6
return P.ravel()
pattern_counts = Multinomial('pattern_counts',n=S, p=patterns, value=counts.ravel(), observed=True)
variables = [p_label, e_pos, e_neg, patterns]
model = MCMC(variables, verbose=3)
model.sample(iter=n_samples, burn=burn, thin=thin)
model.write_csv('out.csv', ['p_label', 'e_pos', 'e_neg'])
p = np.median(model.trace('p_label')[:])
e_pos = np.median(model.trace('e_pos')[:],0)
e_neg = np.median(model.trace('e_neg')[:],0)
return p, e_pos, e_neg
示例10: test_zcompression
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def test_zcompression(self):
original_filters = warnings.filters[:]
warnings.simplefilter("ignore")
try:
db = pymc.database.hdf5.Database(dbname=os.path.join(testdir, 'disaster_modelCompressed.hdf5'),
dbmode='w',
dbcomplevel=5)
S = MCMC(disaster_model, db=db)
S.sample(45,10,1, progress_bar=0)
assert_array_equal(S.trace('early_mean')[:].shape, (35,))
S.db.close()
db.close()
del S
finally:
warnings.filters = original_filters
示例11: fit_std_curve_by_pymc
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def fit_std_curve_by_pymc(i_vals, i_sds, dpx_concs):
import pymc
from pymc import Uniform, stochastic, deterministic, MCMC
from pymc import Matplot
# Define prior distributions for both Ka and Kd
ka = Uniform('ka', lower=0, upper=1000)
kd = Uniform('kd', lower=0, upper=1000)
@stochastic(plot=True, observed=True)
def quenching_model(ka=ka, kd=kd, value=i_vals):
pred_i = quenching_func(ka, kd, dpx_concs)
# The first concentration in dpx_concs should always be zero
# (that is, the first point in the titration should be the
# unquenched fluorescence), so we assert that here:
assert dpx_concs[0] == 0
# The reason this is necessary is that in the likelihood calculation
# we skip the error for the first point, since (when the std. err
# is calculated by well) the error is 0 (the I / I_0 ratio is
# always 1 for each well, the the variance/SD across the wells is 0).
# If we don't skip this first point, we get nan for the likelihood.
# In addition, the model always predicts 1 for the I / I_0 ratio
# when the DPX concentration is 0, so it contributes nothing to
# the overall fit.
return -np.sum((value[1:] - pred_i[1:])**2 / (2 * i_sds[1:]**2))
pymc_model = pymc.Model([ka, kd, quenching_model])
mcmc = MCMC(pymc_model)
mcmc.sample(iter=155000, burn=5000, thin=150)
Matplot.plot(mcmc)
plt.figure()
num_to_plot = 1000
ka_vals = mcmc.trace('ka')[:]
kd_vals = mcmc.trace('kd')[:]
if num_to_plot > len(ka_vals):
num_to_plot = len(ka_vals)
for i in range(num_to_plot):
plt.plot(dpx_concs, quenching_func(ka_vals[i], kd_vals[i], dpx_concs),
alpha=0.01, color='r')
plt.errorbar(dpx_concs, i_vals, yerr=i_sds, linestyle='', marker='o',
color='k', linewidth=2)
return (ka_vals, kd_vals)
示例12: Outliers_Krough
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
def Outliers_Krough(self):
fit_dict = OrderedDict()
fit_dict['methodology'] = r'Outliers Krough'
#Initial Guess for fitting
Bces_guess = self.bces_regression()
m_0, n_0 = Bces_guess['m'][0], Bces_guess['n'][0]
Spread_vector = ones(len(self.x_array))
#Model for outliers detection
Outliers_dect_dict = self.inference_outliers(self.x_array, self.y_array, m_0, n_0, Spread_vector)
mcmc = MCMC(Outliers_dect_dict)
mcmc.sample(100000, 20000)
#Extract the data with the outliers coordinates
probability_of_points = mcmc.trace('inlier')[:].astype(float).mean(0)
fit_dict['x_coords_outliers'] = self.x_array[probability_of_points < self.prob_threshold]
fit_dict['y_coords_outliers'] = self.y_array[probability_of_points < self.prob_threshold]
return fit_dict
示例13: range
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
Ibetas = np.empty((Ni,int((iters-burns)/thins)))
Iig = np.empty((Ni,int((iters-burns)/thins)))
Iil = np.empty((Ni,int((iters-burns)/thins)))
Iia = np.empty((Ni,int((iters-burns)/thins)))
Iiw = np.empty((Ni,int((iters-burns)/thins)))
Iae = np.empty((Ni,int((iters-burns)/thins)))
for i in range(0, 1):
print('-------------')
print('Processing class ' + str(i+1) + ' out of ' + str(Ni))
print('-------------')
np.save('terribleHackClass.npy',np.array([i]))
imp.reload(decayModelAlign)
M = MCMC(decayModelAlign)
M.sample(iter=iters, burn=burns, thin=thins,verbose=0)
Irhos[i,:]=M.trace('rho_s')[:]
Ialphas[i,:]=M.trace('alpha')[:]
Ibetas[i,:]=M.trace('beta')[:]
Iig[i,:]=M.trace('ignore_length')[:]
Iil[i,:]=M.trace('attract_length')[:]
Iia[i,:]=M.trace('attract_angle')[:]
Iiw[i,:]=M.trace('align_weight')[:]
Iae[i,:]=M.trace('attract_exponent')[:]
np.save('Irhos0.npy',Irhos)
np.save('Ialphas0.npy',Ialphas)
np.save('Ibetas0.npy',Ibetas)
np.save('Iig0.npy',Iig)
np.save('Iil0.npy',Iil)
np.save('Iia0.npy',Iia)
np.save('Iiw0.npy',Iiw)
示例14: bayes_plotter
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
class bayes_plotter():
def __init__(self):
self.Fig = None
self.Axis = None
self.Valid_Traces = None
self.pymc_database = None
self.dbMCMC = None
self.Traces_filter = None
self.pymc_stats_keys = ['mean', '95% HPD interval', 'standard deviation', 'mc error', 'quantiles', 'n']
def load_pymc_database(self, Database_address):
#In case the database is open from a previous use
if self.pymc_database != None:
self.pymc_database.close()
#Load the pymc output textfile database
self.pymc_database = database.pickle.load(Database_address)
#Create a dictionary with the bases to
self.Traces_dict = {}
self.traces_list = self.pymc_database.trace_names[0] #This variable contains all the traces from the MCMC (stochastic and deterministic)
for trace in self.traces_list:
self.Traces_dict[trace] = self.pymc_database.trace(trace)
#Generate a MCMC object to recover all the data from the run
self.dbMCMC = MCMC(self.Traces_dict, self.pymc_database)
return
def extract_traces_statistics(self, traces_list = None):
# traces_yplus = bp.pymc_database.trace('He_abud')[:]
# print 'The y_plus trace evolution\n'
# print 'Mean inf', statistics_dict['He_abud']['mean']
# print 'Median numpy', median(traces_yplus)
# print 'Mean numpy', mean(traces_yplus), '\n'
#
# print 'percentiles: 25, 50, 75'
# print percentile(traces_yplus,25), percentile(traces_yplus,50), percentile(traces_yplus,75),'\n'
# print 'percentiles: 16, 84'
# print percentile(traces_yplus,16), percentile(traces_yplus,84),'\n'
# print 'percentiles: 37.73, 68.27'
# print percentile(traces_yplus,37.73), percentile(traces_yplus,68.27),'\n'
# print 'Standard deviation'
# print percentile(traces_yplus,4.55), percentile(traces_yplus,95.45)
# print 'HUD 95', statistics_dict['He_abud']['95% HPD interval'],'\n'
#
# print 'PYMC std', statistics_dict['He_abud']['standard deviation']
# print 'Numpy std', std(traces_yplus), '\n'
self.statistics_dict = OrderedDict()
#If no list input we extract all the traces from the analysis
if traces_list == None:
traces_list = self.traces_list
for trace in traces_list:
self.statistics_dict[trace] = OrderedDict()
for stat in self.pymc_stats_keys:
self.statistics_dict[trace][stat] = self.dbMCMC.trace(trace).stats()[stat]
Trace_array = self.pymc_database.trace(trace)[:]
self.statistics_dict[trace]['16th_p'] = percentile(Trace_array, 16)
self.statistics_dict[trace]['84th_p'] = percentile(Trace_array, 84)
return self.statistics_dict
def close_database(self):
self.pymc_database.close()
self.pymc_database = None
return
def Import_FigConf(self, Fig = None, Axis = None):
if Fig != None:
self.Fig = Fig
if Axis != None:
self.Axis = Axis
def FigConf(self, Figtype = 'Single', FigWidth = 16, FigHeight = 9, AxisFormat = 111, fontsize = 8, PlotStyle = 'Night', n_columns = None, n_rows = None, n_colors = None, color_map = 'colorblind'):
self.Triangle_Saver = False
if Figtype == 'Single':
self.Fig = plt.figure(figsize = (FigWidth, FigHeight))
self.Axis1 = self.Fig.add_subplot(AxisFormat)
#fig.subplots_adjust(hspace = .5, wspace=.001)
elif Figtype == 'Posteriors':
#.........这里部分代码省略.........
示例15: MCMC
# 需要导入模块: from pymc import MCMC [as 别名]
# 或者: from pymc.MCMC import trace [as 别名]
iterations = 1000000
burn = 900000
thin = 10
hmod = kq1.generate_model(HISTORICAL)
cmod = kq1.generate_model(CONCURRENT)
H = MCMC(hmod, db="pickle", dbname="historical_model.pickle")
H.sample(iterations, burn, thin=thin, verbose=2)
C = MCMC(cmod, db="pickle", dbname="concurrent_model.pickle")
C.sample(iterations, burn, thin=thin, verbose=2)
conc = np.sum(C.trace("pred")[:], 0)/float((iterations-burn)/thin)
historical = np.sum(H.trace("pred")[:], 0)/float((iterations-burn)/thin)
x = np.arange(200,3200, 200)
colors = ('0.7', 'black')
markers = (False, True)
for i,p in enumerate((70,85)):
plot(x, conc[i*-1][C.crit_pred==1], color=colors[i], marker='^'*markers[i])
plot(x, historical[i*-1][H.crit_pred==1], color=colors[i], marker='o'*markers[i])
plot(x, conc[i*-1][C.crit_pred==0], color=colors[i], marker='^'*markers[i], linestyle="dashed")
plot(x, historical[i*-1][H.crit_pred==0], color=colors[i], marker='o'*markers[i], linestyle="dashed")
xlim(150, 3050)
ylim(0,1)