本文整理汇总了Python中pymultinest.run函数的典型用法代码示例。如果您正苦于以下问题:Python run函数的具体用法?Python run怎么用?Python run使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
# Begin timing the estimation process
start_time = time.time()
# Run the MultiNest software
pmn.run(binary_logit_log_likelihood, uniform_prior, num_dimensions,
outputfiles_basename=relative_output_folder,
n_live_points=num_live_points,
sampling_efficiency=desired_sampling_efficiency,
log_zero=-1e200,
mode_tolerance=-1e180,
null_log_evidence=-1e180,
resume=False, verbose=True, init_MPI=False)
# Record the ending time of the estimation process
end_time = time.time()
tot_minutes = (end_time - start_time) / 60.0
# Save the parameter names
with open(relative_output_folder + "parameter_names.json", 'wb') as f:
json.dump(explanatory_vars, f)
# Save the number of live points used as the total estimation time
model_run_params = {"n_live_points": num_live_points,
"sampling_efficiency": desired_sampling_efficiency,
"estimation_minutes": tot_minutes}
with open(relative_output_folder + "model_run_parameters.json", "wb") as f:
json.dump(model_run_params, f)
# Print a report on how long the estimation process took
print "Estimation process took {:.2f} minutes".format(tot_minutes)
示例2: multinest
def multinest(parameter_names, transform, loglikelihood, output_basename, **problem):
parameters = parameter_names
n_params = len(parameters)
def myprior(cube, ndim, nparams):
params = transform([cube[i] for i in range(ndim)])
for i in range(ndim):
cube[i] = params[i]
def myloglike(cube, ndim, nparams):
l = loglikelihood([cube[i] for i in range(ndim)])
return l
# run MultiNest
mn_args = dict(
outputfiles_basename = output_basename,
resume = problem.get('resume', False),
verbose = True,
n_live_points = problem.get('n_live_points', 400))
if 'seed' in problem:
mn_args['seed'] = problem['seed']
pymultinest.run(myloglike, myprior, n_params, **mn_args)
import json
# store name of parameters, always useful
with file('%sparams.json' % output_basename, 'w') as f:
json.dump(parameters, f, indent=2)
# analyse
a = pymultinest.Analyzer(n_params = n_params,
outputfiles_basename = output_basename)
s = a.get_stats()
with open('%sstats.json' % a.outputfiles_basename, mode='w') as f:
json.dump(s, f, indent=2)
return a
示例3: test
def test():
test.prior_was_called = False
test.loglike_was_called = False
test.dumper_was_called = False
def myprior(cube, ndim, nparams):
for i in range(ndim):
cube[i] = cube[i] * 10 * math.pi
test.prior_was_called = True
def myloglike(cube, ndim, nparams):
chi = 1.
for i in range(ndim):
chi *= math.cos(cube[i] / 2.)
test.loglike_was_called = True
return math.pow(2. + chi, 5)
def mydumper(nSamples,nlive,nPar,
physLive,posterior,paramConstr,
maxLogLike,logZ,logZerr,nullcontext):
print("calling dumper")
test.dumper_was_called = True
# number of dimensions our problem has
parameters = ["x", "y"]
n_params = len(parameters)
# run MultiNest
pymultinest.run(myloglike, myprior, n_params,
resume = True, verbose = True,
dump_callback=mydumper)
assert test.prior_was_called
assert test.loglike_was_called
assert test.dumper_was_called
示例4: fit_multinest
def fit_multinest(self, n_live_points=1000, basename="chains/1-", verbose=True, overwrite=True, **kwargs):
self._mnest_basename = basename
# creates the directory for the output
# folder = os.path.abspath(os.path.dirname(self._mnest_basename))
# if not os.path.exists(self._mnest_basename):
# os.makedirs(self._mnest_basename)
if hasattr(self, "which"):
self.n_params = 9 + 6 * self.lc.n_planets
else:
self.n_params = 5 + 6 * self.lc.n_planets
pymultinest.run(
self.mnest_loglike,
self.mnest_prior,
self.n_params,
n_live_points=n_live_points,
outputfiles_basename=self._mnest_basename,
verbose=verbose,
**kwargs
)
self._make_samples()
示例5: multinest
def multinest(optimizer, nprocs=1):
# number of dimensions our problem has
parameters = ["{0}".format(i)
for i in range(len(optimizer.params.get_all(True)))]
nparams = len(parameters)
if not os.path.exists('chains'):
os.mkdir('chains')
def lnprior(cube, ndim, nparams):
theta = np.array([cube[i] for i in range(ndim)])
for i in range(len(optimizer.params.get_all(True))):
param = optimizer.params.get_all(True)[i]
if "mass_" in param.name:
theta[i] = 10 ** (theta[i] * 8 - 9)
elif "radius_" in param.name:
theta[i] = 10 ** (theta[i] * 4 - 4)
elif "flux_" in param.name:
theta[i] = 10 ** (theta[i] * 4 - 4)
elif "a_" in param.name:
theta[i] = 10 ** (theta[i] * 2 - 2)
elif "e_" in param.name:
theta[i] = 10 ** (theta[i] * 3 - 3)
elif "inc_" in param.name:
theta[i] *= 2.0 * np.pi
elif "om_" in param.name:
theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 2 - 2)
elif "ln_" in param.name:
theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 8 - 8)
elif "ma_" in param.name:
theta[i] = 2.0 * np.pi * 10 ** (theta[i] * 2 - 2)
for i in range(ndim):
cube[i] = theta[i]
def lnlike(cube, ndim, nparams):
theta = np.array([cube[i] for i in range(ndim)])
optimizer.params.update(theta)
mod_flux, mod_rv = optimizer.model(nprocs)
flnl = -(0.5 * ((mod_flux - optimizer.photo_data[1]) /
optimizer.photo_data[2]) ** 2)
rvlnl = -(0.5 * ((mod_rv - optimizer.rv_data[1]) /
optimizer.rv_data[2]) ** 2)
tlnl = np.sum(flnl) + np.sum(rvlnl)
nobj = np.append(np.sum(flnl) + np.sum(rvlnl), theta)
optimizer.chain = np.vstack([optimizer.chain, nobj])
if tlnl > optimizer.maxlnp:
optimizer.iterout(tlnl, theta, mod_flux)
return np.sum(flnl) + np.sum(rvlnl)
# run MultiNest
pymultinest.run(lnlike, lnprior, nparams, n_live_points=1000)
示例6: main
def main():
cube = [ 0.9, 0.5, 0.1 ] # initial values not used
ndim = len(cube)
nparams = len(cube)
os.chdir('/home/jordi/allst/sample')
pm.run(F_calc_Likelihood4stmd, F_allpriors, nparams,importance_nested_sampling = False,
resume = False, verbose = True, n_live_points=32, outputfiles_basename="DMco_",
sampling_efficiency=0.02,const_efficiency_mode=True,init_MPI=False)
示例7: run_multinest
def run_multinest(posterior, save_file):
"""Uses multinest sampler to calculate evidence instead of emceee
cmd is bash command to call lagrange_cpp
posterior is posterior class, should have methods prior and lik
save_file is path to save. will resume from file if arround"""
# checks
# if path exsissts
if not os.path.exists(save_file) and mpi.COMM_WORLD.rank == 0:
os.mkdir(save_file)
assert hasattr(posterior, 'prior') and hasattr(posterior, 'lik'), 'must have prior and lik methods'
# run sampler
pymultinest.run(posterior.lik, posterior.prior, posterior.get_dim(),
outputfiles_basename=save_file)
示例8: generate
def generate(lmod_pars, lparams, lphoto_data, lrv_data, lncores, lfname):
global mod_pars, params, photo_data, rv_data, ncores, fname
mod_pars, params, photo_data, rv_data, ncores, fname = \
lmod_pars, lparams, lphoto_data, lrv_data, lncores, lfname
# number of dimensions our problem has
parameters = ["{0}".format(i) for i in range(mod_pars[0] * 5 + (mod_pars[0] - 1) * 6)]
nparams = len(parameters)
# make sure the output directories exist
if not os.path.exists("./output/{0}/multinest".format(fname)):
os.makedirs(os.path.join("./", "output", "{0}".format(fname), "multinest"))
if not os.path.exists("./output/{0}/plots".format(fname)):
os.makedirs(os.path.join("./", "output", "{0}".format(fname), "plots"))
if not os.path.exists("chains"): os.makedirs("chains")
# we want to see some output while it is running
progress_plot = pymultinest.ProgressPlotter(n_params=nparams,
outputfiles_basename='output/{0}/multinest/'.format(fname))
progress_plot.start()
# progress_print = pymultinest.ProgressPrinter(n_params=nparams, outputfiles_basename='output/{0}/multinest/'.format(fname))
# progress_print.start()
# run MultiNest
pymultinest.run(lnlike, lnprior, nparams, outputfiles_basename=u'./output/{0}/multinest/'.format(fname),
resume=True, verbose=True,
sampling_efficiency='parameter', n_live_points=1000)
# run has completed
progress_plot.stop()
# progress_print.stop()
json.dump(parameters, open('./output/{0}/multinest/params.json'.format(fname), 'w')) # save parameter names
# plot the distribution of a posteriori possible models
plt.figure()
plt.plot(photo_data[0], photo_data[1], '+ ', color='red', label='data')
a = pymultinest.Analyzer(outputfiles_basename="./output/{0}/reports/".format(fname), n_params=nparams)
for theta in a.get_equal_weighted_posterior()[::100, :-1]:
params = utilfuncs.split_parameters(theta, mod_pars[0])
mod_flux, mod_rv = utilfuncs.model(mod_pars, params, photo_data[0], rv_data[0])
plt.plot(photo_data[0], mod_flux, '-', color='blue', alpha=0.3, label='data')
utilfuncs.report_as_input(params, fname)
plt.savefig('./output/{0}/plots/posterior.pdf'.format(fname))
plt.close()
示例9: perform_scan_multinest
def perform_scan_multinest(self, chains_dir, nlive=100):
""" Perform a scan with MultiNest
"""
self.make_dirs([chains_dir])
n_params = len(self.floated_params)
pymultinest_options = {'importance_nested_sampling': False,
'resume': False, 'verbose': True,
'sampling_efficiency': 'model',
'init_MPI': False, 'evidence_tolerance': 0.5,
'const_efficiency_mode': False}
pymultinest.run(self.ll, self.prior_cube, n_params,
outputfiles_basename=chains_dir,
n_live_points=nlive, **pymultinest_options)
示例10: main
def main():
"""
"""
# Set up MPI variables
world=MPI.COMM_WORLD
rank=world.rank
size=world.size
master = rank==0
if master:
print "Runtime parameters"
pprint.pprint(rp)
time.sleep(2)
if not os.path.exists(rp["outdir"]):
try:
os.mkdir(rp["outdir"])
except:
pass
n_params = rp["nc_fit"] + 3
#progress = pymultinest.ProgressPlotter(n_params=n_params, interval_ms=10000,
# outputfiles_basename=rp["outputfiles_basename"])
#progress.start()
pymultinest.run(loglike, logprior, n_params, resume=False, verbose=True,
multimodal=rp["multimodal"], max_modes=rp["max_modes"], write_output=True,
n_live_points=rp["n_live_points"],
evidence_tolerance=rp["evidence_tolerance"],
mode_tolerance=rp["mode_tolerance"],
seed=rp["seed"],
max_iter=rp["max_iter"],
importance_nested_sampling=rp["do_ins"],
outputfiles_basename=rp["outputfiles_basename"],\
init_MPI=False)
if master:
# Copy the config.ini file to the output dir
shutil.copy(param_file,rp["outdir"])
#progress.stop()
return 0
示例11: multinest
def multinest(self,*args,**kwargs):
import pymultinest
#res = self.fit(False,True)
#f = open("calls.txt","w+")
self.freeParameters = self.modelManager.getFreeParameters()
def prior(cube, ndim, nparams):
for i,p in enumerate(self.freeParameters.values()):
cube[i] = p.prior.multinestCall(cube[i])
pass
pass
def loglike(cube, ndim, nparams):
logL = self.minusLogLike(cube)*(-1)
if(numpy.isnan(logL)):
logL = -1e10
#f.write(" ".join(map(lambda x:"%s" %x,cube[:ndim])))
#f.write(" %s\n" % logL)
return logL
pass
if('verbose' not in kwargs):
kwargs['verbose'] = True
if('resume' not in kwargs):
kwargs['resume'] = False
if('outputfiles_basename' not in kwargs):
kwargs['outputfiles_basename'] = '_1_'
pass
kwargs['log_zero'] = -1e9
pymultinest.run(loglike, prior, len(self.freeParameters), *args, **kwargs)
print("done")
#Collect the samples
analyzer = pymultinest.Analyzer(n_params=len(self.freeParameters),outputfiles_basename=kwargs['outputfiles_basename'])
eqw = analyzer.get_equal_weighted_posterior()
self.samples = eqw[:,:-1]
self.posteriors = eqw[:,-1]
示例12: run
def run(gp):
pymultinest.run(myloglike, myprior, gp.ndim, n_params = gp.ndim+1,
n_clustering_params = gp.nrho, # gp.ndim, or separate modes on the rho parameters only: gp.nrho
wrapped_params = [ gp.pops, gp.nipol, gp.nrho],
importance_nested_sampling = False, # INS enabled
multimodal = False, # separate modes
const_efficiency_mode = True, # use const sampling efficiency
n_live_points = gp.nlive,
evidence_tolerance = 0.0, # 0 to keep algorithm working indefinitely
sampling_efficiency = 0.05, # 0.05, MultiNest README for >30 params
n_iter_before_update = 2, # output after this many iterations
null_log_evidence = -1e100,
max_modes = gp.nlive, # preallocation of modes: max=number of live points
mode_tolerance = -1.e100, # mode tolerance in the case where no special value exists: highly negative
outputfiles_basename = gp.files.outdir,
seed = -1, verbose = True,
resume = gp.restart,
context = 0, write_output = True,
log_zero = -1e500, # points with log likelihood<log_zero will be neglected
max_iter = 0, # set to 0 for never reaching max_iter (no stopping criterium based on number of iterations)
init_MPI = False, dump_callback = None)
示例13: run
def run():
import gl_file as gfile
if gp.getnewdata:
gfile.bin_data()
gfile.get_data()
## number of dimensions
n_dims = gp.nepol + gp.pops*gp.nepol + gp.pops*gp.nbeta #rho, (nu, beta)_i
parameters = stringlist(gp.pops, gp.nepol)
# show live progress
# progress = pymultinest.ProgressPlotter(n_params = n_dims)
# progress.start()
# threading.Timer(2, show, [gp.files.outdir+'/phys_live.points.pdf']).start()
# print(str(len(gp.files.outdir))+': len of gp.files.outdir')
pymultinest.run(myloglike, myprior,
n_dims, n_params = n_dims, # None beforehands
n_clustering_params = gp.nepol, # separate modes on the rho parameters only
wrapped_params = None, # do not wrap-around parameters
importance_nested_sampling = True, # INS enabled
multimodal = True, # separate modes
const_efficiency_mode = True, # use const sampling efficiency
n_live_points = gp.nlive,
evidence_tolerance = 0.0, # set to 0 to keep algorithm working indefinitely
sampling_efficiency = 0.80,
n_iter_before_update = gp.nlive, # output after this many iterations
null_log_evidence = -1, # separate modes if logevidence > this param.
max_modes = gp.nlive, # preallocation of modes: maximum = number of live points
mode_tolerance = -1.,
outputfiles_basename = gp.files.outdir,
seed = -1,
verbose = True,
resume = False,
context = 0,
write_output = True,
log_zero = -1e6,
max_iter = 10000000,
init_MPI = True,
dump_callback = None)
示例14: run
def run(self, clean_up=None, **kwargs):
if clean_up is None:
if self.run_dir is None:
clean_up = True
else:
clean_up = False
if self.run_dir is None:
run_dir = tempfile.mkdtemp()
else:
run_dir = self.run_dir
basename = self.prepare_fit_directory(run_dir, self.prefix)
start_time = time.time()
logger.info('Starting fit in {0} with prefix {1}'.format(run_dir, self.prefix))
pymultinest.run(self.likelihood.multinest_evaluate, self.priors.prior_transform,
self.n_params,
outputfiles_basename='{0}_'.format(basename),
**kwargs)
logger.info("Fit finished - took {0:.2f} s"
.format(time.time() - start_time))
fitted_parameter_names = [item for item in self.likelihood.param_names
if not self.likelihood.fixed[item]]
self.result = MultiNestResult.from_multinest_basename(
basename, fitted_parameter_names)
if clean_up:
logger.info("Cleaning up - deleting {0}".format(run_dir))
shutil.rmtree(run_dir)
else:
logger.info("Multinest files can be found in {0}".format(run_dir))
self.likelihood.parameters[~self.likelihood.fixed_mask()] = (
self.result.median.values)
return self.result
示例15: run
def run(self, clean_up=None, **kwargs):
if clean_up is None:
if self.run_dir is None:
clean_up = True
else:
clean_up = False
if self.run_dir is None:
run_dir = tempfile.mkdtemp()
else:
run_dir = self.run_dir
basename = self.prepare_fit_directory(run_dir, self.prefix)
start_time = time.time()
logger.info('Starting fit in {0} with prefix {1}'.format(run_dir, self.prefix))
pymultinest.run(self.likelihood, self.priors.prior_transform,
self.n_params,
outputfiles_basename='{0}_'.format(basename),
**kwargs)
logger.info("Fit finished - took {0:.2f} s"
.format(time.time() - start_time))
self.result = MultinestResult.from_multinest_basename(
basename, self.likelihood.param_names)
if clean_up == True:
logger.info("Cleaning up - deleting {0}".format(run_dir))
shutil.rmtree(run_dir)
return self.result