本文整理汇总了Python中pymc.MCMC属性的典型用法代码示例。如果您正苦于以下问题:Python pymc.MCMC属性的具体用法?Python pymc.MCMC怎么用?Python pymc.MCMC使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类pymc
的用法示例。
在下文中一共展示了pymc.MCMC属性的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: show_summary
# 需要导入模块: import pymc [as 别名]
# 或者: from pymc import MCMC [as 别名]
def show_summary(pymc_model, mcmc, map):
"""
Show summary statistics of MCMC and MAP estimates.
Parameters
----------
pymc_model : pymc model
The pymc model to sample.
map : pymc.MAP
The MAP fit.
mcmc : pymc.MCMC
MCMC samples
TODO
----
* Automatically determine appropriate number of decimal places from statistical uncertainty.
* Automatically adjust concentration units (e.g. pM, nM, uM) depending on estimated affinity.
"""
# Compute summary statistics.
DeltaG = map.DeltaG.value
dDeltaG = mcmc.DeltaG.trace().std()
Kd = np.exp(map.DeltaG.value)
dKd = np.exp(mcmc.DeltaG.trace()).std()
text = "DeltaG = %.1f +- %.1f kT\n" % (DeltaG, dDeltaG)
if (Kd < 1e-12):
text += "Kd = %.1f nM +- %.1f fM" % (Kd/1e-15, dKd/1e-15)
elif (Kd < 1e-9):
text += "Kd = %.1f pM +- %.1f pM" % (Kd/1e-12, dKd/1e-12)
elif (Kd < 1e-6):
text += "Kd = %.1f nM +- %.1f nM" % (Kd/1e-9, dKd/1e-9)
elif (Kd < 1e-3):
text += "Kd = %.1f uM +- %.1f uM" % (Kd/1e-6, dKd/1e-6)
elif (Kd < 1):
text += "Kd = %.1f mM +- %.1f mM" % (Kd/1e-3, dKd/1e-3)
else:
text += "Kd = %.3e M +- %.3e M" % (Kd, dKd)
text += '\n'
print(text)
示例2: t_test
# 需要导入模块: import pymc [as 别名]
# 或者: from pymc import MCMC [as 别名]
def t_test(df, groups=None, groupcol='group', valuecol='value', pooling='default', samples=40000, burns=10000, thin=1, *args, **kwargs):
"""Bayesian t-Test
Given a dataframe of the form:
|Group |Observed Value|
|-------|--------------|
|<group>| <float>|
...
Perform pairwise t-Tests on groups
Inputs:
dataframe -- Pandas dataframe of form above
groups -- (optional) list of groups to look at. Excluded looks at all groups
groupcol -- string for indexing dataframe column for groups
valuecol -- string for indexing dataframe column for values of observations
pooling -- strategy for using pooled data in test.
* 'default' -- uses pairwise pooled data
* 'all' -- uses pooled data from all groups
samples -- number of samples to use in MCMC
burns -- number of burns to use in MCMC
thin -- thinning to use in MCMC
progress_bar -- boolean, show progress bar of sampler (PyMC progress bar)
Returns:
(description, raw_data)
description: table describing output data
raw_data: dictionary of output data
"""
test = TTest(df, groups, groupcol, valuecol,
pooling, samples, burns, thin, *args, **kwargs)
return test.summary()
示例3: run_mcmc
# 需要导入模块: import pymc [as 别名]
# 或者: from pymc import MCMC [as 别名]
def run_mcmc(self, dbfilename='output'):
"""
Sample the model with pymc using sensible defaults.
Parameters
----------
dbfilename : str, optional, default='output'
Name of storage filename for database.
Returns
-------
mcmc : pymc.MCMC
The MCMC samples.
"""
# DEBUG: Write model
print('Writing graph...')
pymc.graph.moral_graph(self.model, format='ps')
# Sample the model with pymc
# TODO: Allow
mcmc = pymc.MCMC(self.model, db='sqlite', name='output', verbose=True)
nthin = 10
nburn = nthin*100
niter = nthin*100
# Specify initial parameter standard deviations to apply to specific classes of parameters
keywords = {
'concentration' : 0.1,
'affinity' : 0.1,
'volume' : 0.01,
}
print('Assigning initial guesses for Metropolis step method proposal standard deviations:')
for stochastic in self.model.stochastics:
if hasattr(stochastic, '__name__'):
sigma = 1.0 # default proposal standard deviation
parameter_name = stochastic.__name__
# See if we have specified a special standard deviation for this parameter class
for keyword in keywords:
if keyword in parameter_name:
sigma = keywords[keyword]
print('%-64ss : %8.5f' % (parameter_name, sigma))
mcmc.use_step_method(pymc.Metropolis, stochastic, proposal_sd=sigma, proposal_distribution=None)
print('Running MCMC...')
mcmc.sample(iter=(nburn+niter), burn=nburn, thin=nthin, progress_bar=True, tune_throughout=True)
# Close the database.
#mcmc.db.close()
return mcmc
示例4: run_mcmc
# 需要导入模块: import pymc [as 别名]
# 或者: from pymc import MCMC [as 别名]
def run_mcmc(pymc_model, nthin=20, nburn=0, niter=20000, map=True, db='ram', dbname=None):
"""
Sample the model with pymc. Initial values of the parameters can be chosen with a maximum a posteriori estimate.
Parameters
----------
pymc_model : pymc model
The pymc model to sample.
nthin: int
The number of MCMC steps that constitute 1 iteration.
nburn: int
The number of MCMC iterations during the burn-in.
niter: int
The number of production iterations.
map: bool
Whether to initialize the parameters before MCMC with the maximum a posteriori estimate.
db : str
How to store model, default = 'ram' means not storing it. To store model use storage = 'pickle'. If not,
supply the name of the database backend that will store the values of the stochastics and deterministics sampled
during the MCMC loop.
dbname : str
name for storage object, default = None. To store model use e.g. dbname = 'my_mcmc.pickle'
Returns
-------
mcmc : pymc.MCMC
The MCMC samples.
"""
# Find MAP:
if map == True:
pymc.MAP(pymc_model).fit()
# Sample the model with pymc
mcmc = pymc.MCMC(pymc_model, db=db, dbname=dbname, name='Sampler', verbose=True)
step_methods = 'AdaptiveMetropolis'
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'DeltaG'), proposal_sd=0.1, proposal_distribution='Normal')
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'log_F_PL'), proposal_sd=0.1, proposal_distribution='Normal')
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'log_F_P'), proposal_sd=0.1, proposal_distribution='Normal')
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'log_F_L'), proposal_sd=0.1, proposal_distribution='Normal')
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'log_F_plate'), proposal_sd=0.1, proposal_distribution='Normal')
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'log_F_buffer'), proposal_sd=0.1, proposal_distribution='Normal')
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'log_F_buffer_control'), proposal_sd=0.1, proposal_distribution='Normal')
if hasattr(pymc_model, 'epsilon_ex'):
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'epsilon_ex'), proposal_sd=10000.0, proposal_distribution='Normal')
if hasattr(pymc_model, 'epsilon_em'):
mcmc.use_step_method(pymc.Metropolis, getattr(pymc_model, 'epsilon_em'), proposal_sd=10000.0, proposal_distribution='Normal')
# Uncomment below to use log_F_PL highly correlated with DeltaG to improve sampling somewhat.
#mcmc.use_step_method(pymc.AdaptiveMetropolis, [pymc_model.log_F_PL, pymc_model.DeltaG], scales={ pymc_model.log_F_PL : 0.1, pymc_model.DeltaG : 0.1 })
mcmc.sample(iter=(nburn+niter), burn=nburn, thin=nthin, progress_bar=False, tune_throughout=True)
return mcmc
示例5: show_summary
# 需要导入模块: import pymc [as 别名]
# 或者: from pymc import MCMC [as 别名]
def show_summary(self, mcmc, map_fit=None):
"""
Show summary statistics of MCMC and (optionally) MAP estimates.
Parameters
----------
mcmc : pymc.MCMC
MCMC samples
map_fit : pymc.MAP, optional, default=None
The MAP fit.
TODO
----
* Automatically determine appropriate number of decimal places from statistical uncertainty.
* Automatically adjust concentration units (e.g. pM, nM, uM) depending on estimated affinity.
"""
# Compute summary statistics
alpha = 0.95 # confidence interval width
from scipy.stats import bayes_mvs
for group in self.parameter_names:
print(group)
for name in self.parameter_names[group]:
try:
if map_fit:
mle = getattr(map_fit, name).value
else:
mle = getattr(mcmc, name).trace().mean()
trace = getattr(mcmc, name).trace()
mean_cntr, var_cntr, std_cntr = bayes_mvs(trace, alpha=alpha)
(center, (lower, upper)) = mean_cntr
if trace.std() == 0.0:
lower = upper = trace[0]
if ('concentration' in name) or ('volume' in name):
print("%-64s : initial %7.1e final %7.1e : %7.1e [%7.1e, %7.1e]" % (name, trace[0], trace[-1], mle, lower, upper))
else:
print("%-64s : initial %7.1f final %7.1f : %7.1f [%7.1f, %7.1f]" % (name, trace[0], trace[-1], mle, lower, upper))
except AttributeError as e:
# Skip observed stochastics
pass
print('')
示例6: generate_plots
# 需要导入模块: import pymc [as 别名]
# 或者: from pymc import MCMC [as 别名]
def generate_plots(self, mcmc, map_fit=None, pdf_filename=None):
"""
Generate interactive or PDF plots from MCMC trace.
Parameters
----------
mcmc : pymc.MCMC
MCMC samples to plot
map_fit : pymc.MAP, optional, default=None
Plot the maximum a posteriori (MAP) estimate if provided.
pdf_filename : str, optional, default=None
If specified, generate a PDF containing plots.
"""
alpha = 0.95 # confidence interval width
print('')
print('Generating plots...')
from scipy.stats import bayes_mvs
with PdfPages(pdf_filename) as pdf:
for group in self.parameter_names:
print(group)
for name in self.parameter_names[group]:
try:
if map_fit:
mle = getattr(map_fit, name).value
else:
mle = getattr(mcmc, name).trace().mean()
trace = getattr(mcmc, name).trace()
mean_cntr, var_cntr, std_cntr = bayes_mvs(trace, alpha=alpha)
(center, (lower, upper)) = mean_cntr
if trace.std() == 0.0:
lower = upper = trace[0]
plt.figure(figsize=(12, 8))
plt.hold(True)
niterations = len(trace)
plt.plot([0, niterations], [mle, mle], 'r-')
plt.plot(trace, 'k.')
plt.xlabel('iteration')
plt.ylabel(name)
plt.title(name)
pdf.savefig()
plt.close()
except AttributeError as e:
pass