本文整理汇总了Python中emcee.utils.MPIPool.close方法的典型用法代码示例。如果您正苦于以下问题:Python MPIPool.close方法的具体用法?Python MPIPool.close怎么用?Python MPIPool.close使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类emcee.utils.MPIPool
的用法示例。
在下文中一共展示了MPIPool.close方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit_bim_bh3_curves
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def fit_bim_bh3_curves(p0=None):
# Choose initial position
if p0 is None:
p0 = np.zeros((nwalkers, ndim))
for walk_ix in range(nwalkers):
for d_ix in range(len(data)):
p0[walk_ix, d_ix*3] = np.random.uniform(1, 6)
p0[walk_ix, d_ix*3 + 1] = np.random.uniform(6e-5, 1e-3)
p0[walk_ix, d_ix*3 + 2] = np.random.uniform(2, 3)
hp_ix = len(data)*3
p0[walk_ix, hp_ix] = np.random.uniform(1,6) # fmax mean
p0[walk_ix, hp_ix + 1] = np.random.uniform(0,1) # fmax sd
p0[walk_ix, hp_ix + 2] = np.random.uniform(6e-5, 1e-3) # k mean
p0[walk_ix, hp_ix + 3] = np.random.uniform(0,1e-1) # k sd
p0[walk_ix, hp_ix + 4] = np.random.uniform(2,3) # f0 mean
p0[walk_ix, hp_ix + 5] = np.random.uniform(0,1) # f0 sd
#plt.figure()
#for d_ix, data_i in enumerate(data):
# plt.plot(time, data_i, color=colors[d_ix])
# plt.plot(time, fit_func(p0[0, d_ix*3:(d_ix+1)*3]), color='k')
# Initialize the MPI pool
pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit(0)
# Get the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, posterior, pool=pool)
# Burn-in
print("Burn-in sampling...")
pos, prob, state = sampler.run_mcmc(p0, burn_steps, storechain=False)
sampler.reset()
# Main sampling
print("Main sampling...")
sampler.run_mcmc(pos, sample_steps)
# Close the pool!
pool.close()
# Pickle the sampler
sampler.pool = None
with open('bimbh3_141125_2.pck','w') as f:
pickle.dump(sampler, f)
return sampler
示例2: run_pool
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def run_pool(pC, pW, walk, step): #pCenter and pWidths
steps = step
nwalkers = walk
ndim = len(pC)
## r in, del r, i, PA
p0 = [pC[0], pC[1], pC[2], pC[3], pC[4]]
widths = [pW[0], pW[1], pW[2], pW[3], pW[4]]
p = emcee.utils.sample_ball(p0,widths,size=nwalkers)
pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit(0)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnlike_visonly, live_dangerously=True, pool=pool)
print 'Beginning the MCMC run.'
start = time.clock()
sampler.run_mcmc(p, steps)
stop = time.clock()
pool.close()
print 'MCMC finished successfully.\n'
print 'This was a visibility-only run with {} walkers and {} steps'.format(nwalkers,steps)
print "Mean acor time: "+str(np.mean(sampler.acor))
print "\nMean acceptance fraction: "+str(np.mean(sampler.acceptance_fraction))
print 'Run took %r minutes' % ((stop - start)/60.)
chain = sampler.chain
chi = (sampler.lnprobability)/(-0.5)
whatbywhat = str(nwalkers)+'x'+str(steps)
os.system('mkdir MCMCRUNS/vis_only/'+whatbywhat)
chainFile = 'MCMCRUNS/vis_only/'+whatbywhat+'/'+whatbywhat+'.chain.fits'
chiFile = 'MCMCRUNS/vis_only/'+whatbywhat+'/'+whatbywhat+'.chi.fits'
infoFile = 'MCMCRUNS/vis_only/'+whatbywhat+'/'+whatbywhat+'.runInfo.txt'
fits.writeto(chainFile,chain)
fits.writeto(chiFile,chi)
f = open(infoFile,'w')
f.write('run took %r minutes\n' % ((stop - start)/60.))
f.write('walkers: %r\n' % nwalkers)
f.write('steps: %r\n' % steps)
f.write('initial model: %r\n' % p0)
f.write('widths: %r\n' % widths)
f.write("mean acor time: "+str(np.mean(sampler.acor)))
f.write("\nmean acceptance fraction: "+str(np.mean(sampler.acceptance_fraction)))
f.close()
print 'Data written to: \n'+chainFile+'\n'+chiFile+'\n'+infoFile
示例3: tdelay_dt_mcmc
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def tdelay_dt_mcmc(run, theta, Niter=20, Nwalkers=10, Ndim=2, sigma_smhm=0.2, nsnap0=15, downsampled='14', flag=None, continue_chain=False):
'''
'''
if Ndim == 2:
tdelay_range = [0., 3.]#np.arange(0., 3., 0.5)
dt_range = [0.1, 4.]
# new chain
chain_file = ''.join([UT.fig_dir(), run, '.tdelay_dt_mcmc.chain.dat'])
if os.path.isfile(chain_file) and continue_chain:
print 'Continuing previous MCMC chain!'
sample = np.loadtxt(chain_file)
Niter = Niter - (np.float(len(sample))/np.float(Nwalkers)) # Number of chains left to finish
if Niter <= 0:
raise ValueError
print Niter, ' iterations left to finish'
else:
f = open(chain_file, 'w')
f.close()
# Initializing Walkers
pos0 = [np.array([np.random.uniform(tdelay_range[0], tdelay_range[1]), np.random.uniform(dt_range[0], dt_range[1])]) for i in range(Nwalkers)]
pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit(0)
# Initializing the emcee sampler
kwargs = {
'theta': theta,
'sigma_smhm': 0.2,
'nsnap0': 15,
'downsampled': '14',
}
sampler = emcee.EnsembleSampler(Nwalkers, Ndim, sigM, pool=pool, kwargs=kwargs)
for result in sampler.sample(pos0, iterations=Niter, storechain=False):
position = result[0]
#print position
f = open(chain_file, 'a')
for k in range(position.shape[0]):
output_str = '\t'.join(position[k].astype('str')) + '\n'
f.write(output_str)
f.close()
pool.close()
return None
示例4: lnPost
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def lnPost(theta):
'''log-posterior
'''
# prior calculations
if prior_min[0] < theta[0] < prior_max[0] and \
prior_min[1] < theta[1] < prior_max[1] and \
prior_min[2] < theta[2] < prior_max[2] and \
prior_min[3] < theta[3] < prior_max[3] and \
prior_min[4] < theta[4] < prior_max[4]:
lnPrior = 0.0
else:
lnPrior = -np.inf
if not np.isfinite(lnPrior):
return -np.inf
return lnPrior + lnLike(theta)
"""Initializing Walkers"""
pos = [np.array([11. , np.log(.4) , 11.5 , 1.0 , 13.5]) + 1e-3*np.random.randn(Ndim) for i in range(Nwalkers)]
"""Initializing MPIPool"""
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
"""Initializing the emcee sampler"""
sampler = emcee.EnsembleSampler(Nwalkers, Ndim, lnprob, pool=pool)
# Burn in + Production
sampler.run_mcmc(pos, Nchains_burn + Nchains_pro)
# Production.
samples = sampler.chain[:, Nchains_burn:, :].reshape((-1, Ndim))
#closing the pool
pool.close()
np.savetxt("mcmc_sample.dat" , samples)
示例5: main
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def main():
'''
A parallel run.
'''
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
clf = hbsgc.HBSGC(pool=pool)
# save start time
clf.last_clock = time.clock()
clf.filter_calcs()
clf.data_calcs()
clf.star_model_calcs()
# if clf.calc_model_mags:
# clf.star_model_mags()
clf.gal_model_calcs()
# if clf.calc_model_mags:
# clf.gal_model_mags()
clf.fit_calcs()
clf.count_tot = 0
clf.sample()
clf.save_proba()
if clf.min_chi2_write:
clf.save_min_chi2()
pool.close()
示例6: ens_mpi_sample
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def ens_mpi_sample(gf, nwalkers, burn_steps, sample_steps, pos=None,
random_state=None):
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
# Initialize the parameter array with initial values (in log10 units)
# Number of parameters to estimate
ndim = (len(gf.builder.global_params) +
(len(gf.data) * len(gf.builder.local_params)))
# Initialize the walkers with starting positions drawn from the priors
# Note that the priors are in log10 scale already, so they don't
# need to be transformed here
if pos is None:
p0 = np.zeros((nwalkers, ndim))
for walk_ix in range(nwalkers):
for p_ix in range(ndim):
p0[walk_ix, p_ix] = gf.priors[p_ix].random()
else:
p0 = pos
# Create the sampler object
sampler = emcee.EnsembleSampler(nwalkers, ndim, posterior,
args=[gf], pool=pool)
if random_state is not None:
sampler.random_state = random_state
print "Burn in sampling..."
pos, prob, state = sampler.run_mcmc(p0, burn_steps, storechain=False)
sampler.reset()
print "Main sampling..."
sampler.run_mcmc(pos, sample_steps)
# Close the pool!
pool.close()
print "Done sampling."
return sampler
示例7: mcmc
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def mcmc(zmin , zmax , iteration):
ndim, nwalkers = 16, 50
bounds = [(-0.2, 0.2), (-0.2, 0.2),(-0.2,0.2),\
(1.5, 3.0) , (0.7, 2.0) , (0.3, 1.0),\
(-20.0,-1.0),(-20.0,-2.0),(-20.0,-2.0),\
(0, 1),(0.0, 4.0), (0,2.0), (0,1),\
(-7.2, 10.0),(-7.2, 10.0),(-7.2, 10.0)]
p0 = np.array([0.0, 0.0, 0.0, 2.0, 1.0, 0.5 , np.log(0.1), np.log(0.1),np.log(0.1),
0.7,1.5,1.0,0.4,np.log(2.0),np.log(2.0),np.log(2.0)])
p0 = [p0 + 1e-5 * np.random.randn(ndim) for k in range(nwalkers)]
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
# Set up the sampler.
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob , pool=pool)
#sampler.sample(p0, 1000)
#print sampler.chain.shape
it = 0
for result in sampler.sample(p0, iterations = 1000):
print it
it += 1
# Run a burn-in chain and save the final location.
#pos, _, _, _ = sampler.run_mcmc(p0, 3000)
#pos, _, _, _ = sampler.sample(p0, 3000)
from matplotlib.ticker import MaxNLocator
sample = sampler.chain
npars = sample.shape[2]
fig , axes = plt.subplots(npars , 1 , sharex=True, figsize=(10, 12))
for i in xrange(npars):
axes[i].plot(sample[:, :, i].T, color="b", alpha=.4 , lw = .5)
axes[i].yaxis.set_major_locator(MaxNLocator(5))
axes[i].set_ylim([bounds[i][0], bounds[i][1]])
axes[i].set_xlim(0, 5000)
#axes[i].set_ylabel(labels[i], fontsize=25)
axes[-1].set_xlabel("Step Number", fontsize=25)
fig.tight_layout(h_pad=0.0)
fig_file = "/home/vakili/public_html/files/redsequence_all_temp/"+str(zmin)+"_z_"+str(zmax)+"burn_iter"+str(iteration)+".png"
plt.savefig(fig_file)
plt.close()
# Run the production chain.
#sampler.reset()
#sampler.run_mcmc(pos, 1000)
"""
import corner
labels = ["$m$", "$b$", "\ln f", "$Q$", "$M$", "$\ln V$"]
#truths = true_params + [true_frac, true_outliers[0], np.log(true_outliers[1])]
bounds = [(-0.2, 0.2), (0.7,2.0), (-20.0, -2.0), (0, 1), (0.0, 2.0), (-7.2, 5.2)]
#corner.corner(sampler.flatchain, bins=35, range=bounds, labels=labels)
#plt.savefig("/home/vakili/public_html/files/mcmc.png")
#plt.close()
"""
sample = sampler.chain
pool.close()
npars = sample.shape[2]
fig , axes = plt.subplots(npars , 1 , sharex=True, figsize=(10, 12))
for i in xrange(npars):
axes[i].plot(sample[:, :, i].T, color="b", alpha=.4 , lw = .5)
axes[i].yaxis.set_major_locator(MaxNLocator(5))
axes[i].set_ylim([bounds[i][0], bounds[i][1]])
axes[i].set_xlim(0, 1500)
#axes[i].set_ylabel(labels[i], fontsize=25)
axes[-1].set_xlabel("Step Number", fontsize=25)
fig.tight_layout(h_pad=0.0)
fig_file = "/home/vakili/public_html/files/redsequence_all_temp/"+str(zmin)+"_z_"+str(zmax)+"chain_iter"+str(iteration)+".png"
plt.savefig(fig_file)
plt.close()
"""
est = np.median(sampler.flatchain , axis = 0)
est[2] = np.median(np.exp(sampler.flatchain)**.5 , axis = 0)[2]
est_err = np.std(sampler.flatchain , axis = 0)
est_err[2] = np.std(np.exp(sampler.flatchain)**.5 , axis = 0)[2]
xx = np.linspace(14.5 , 25.5 , 1000)
pred = est[1] + est[0]*(xx - 19)
"""
return None
"""
norm = 0.0
post_prob = np.zeros(len(x))
for i in range(sampler.chain.shape[1]):
for j in range(sampler.chain.shape[0]):
ll_fg, ll_bg = sampler.blobs[i][j]
#.........这里部分代码省略.........
示例8: emceeinit
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def emceeinit(w0, inclin, nbins, nthreads, nsteps, savename, data, dbins, MPI=0, allbinseq=0):
"""Emcee driver function"""
#HARDCODED - Warning. Also bins.
global incl
incl = inclin
#Initialize the MPI-based pool used for parallelization.
if MPI:
print MPI
pool = MPIPool()
if not pool.is_master():
# Wait for instructions from the master process.
pool.wait()
sys.exit(0)
#Setup
ndim = nbins #Removing inclination as a variable.
nwalkers = 4*ndim
p0 = np.zeros((nwalkers, ndim))
print 'Nbins is now', nbins
#Needed for fixing unresolved starting balls
global b1
global rin
rin, b1 = dbins
#Initialize walkers
radii = np.arange(nbins)
sizecorr = 1 #Currently Hardcoded; Scaling factor to treat different radii differently
scale = 0.2 #Currently hardcoded; Fraction of parameter by which it can vary
for walker in range(nwalkers):
for rs in radii:
rand = np.random.uniform(-(w0[rs]*scale*sizecorr), (w0[rs]*scale*sizecorr))
if (b1[rs] <= res) and (allbinseq <1) :
rand = np.random.uniform(0, 2.*w0[rs])
p0[walker][rs] = w0[rs] + rand #Make it rs+2, if a & l vary
# #Initialize a & l
# p0[walker][0] = np.random.uniform(.0001, .5) #When adding back in, make prev statement rs+1
# while True:
# p0[walker][1] = np.random.gamma(2., 2.)*np.amax(dbins[1:])/20. + np.amin(np.diff(dbins[1:]))
# if (p0[walker][1]>=np.amin(dbins[1:]) or p0[walker][1]<=np.amax(dbins[1:])):
# break
#THIS IS A PROBLEM FOR THE 1st BIN WITH rin. Also the normalization
# p0[walker][0] = incl+np.random.uniform(0.85*incl,1.15*incl) #When adding back in, make prev statement rs+1
#Write emcee perturbation params to log file
f = open('emceerand.log', 'a')
FORMAT = '%m-%d-%Y-%H%M'
f.write(savename+', '+str(nbins)+', '+str(nsteps)+', '+str(scale)+', '+str(sizecorr)+', '+datetime.now().strftime(FORMAT))
#Model initialization
u, v, dreal, dimag, dwgt = data
udeproj = u * np.cos(incl) #Deproject
rho = 1e3*np.sqrt(udeproj**2+v**2)
indices = np.arange(b1.size)
global gpbins
gpbins = dbins
#rin, indices
global rbin
rbin = np.concatenate([np.array([rin]), b1])
jarg = np.outer(2.*np.pi*rbin, rho/206264.806427)
global jinc
jinc = sc.j1(jarg)/jarg
# pool = mp.Pool(nthreads-1)
#Initialize sampler using MPI if necessary
if MPI:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool=pool)
else:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, threads=nthreads)
print 'Nbins, Ndim', nbins, ndim
print 'Dbins', dbins
#Run emcee, and time it
tic = time.time()
print "I'm line 110, before the threads"
sampler.run_mcmc(p0, nsteps)
print "I'm line 112, after the threads"
toc = time.time()
#Display and record run information
print 'Elapsed emcee run time:', ((toc-tic)/60.)
print 'Acceptance:', sampler.acceptance_fraction
f.write(' ,'+str(round((toc-tic)/60., 2))+', '+str(np.round(np.mean(sampler.acceptance_fraction),2))+'\n')
f.close()
#Save the results in a binary file
np.save('mc_'+savename,sampler.chain)
if MPI:
#Close the processes.
pool.close()
print 'Done with this emcee run'
#.........这里部分代码省略.........
示例9: EnsembleSampler
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
#.........这里部分代码省略.........
The amount of seconds between saving the chain to file. Setting to ``None``
disables serialisation.
"""
self.logger = logging.getLogger(__name__)
import emcee
self.chain = None
self.pool = None
self.master = True
self.num_steps = num_steps
self.num_burn = num_burn
self.temp_dir = temp_dir
if temp_dir is not None and not os.path.exists(temp_dir):
os.makedirs(temp_dir)
self.save_interval = save_interval
self.num_walkers = num_walkers
def fit(self, kwargs):
""" Runs the sampler over the model and returns the flat chain of results
Parameters
----------
kwargs : dict
Containing the following information at a minimum:
- log_posterior : function
A function which takes a list of parameters and returns
the log posterior
- start : function|list|ndarray
Either a starting position, or a function that can be called
to generate a starting position
- save_dims : int, optional
Only return values for the first ``save_dims`` parameters.
Useful to remove numerous marginalisation parameters if running
low on memory or hard drive space.
- uid : str, optional
A unique identifier used to differentiate different fits
if two fits both serialise their chains and use the
same temporary directory
Returns
-------
dict
A dictionary with key "chains" containing the final
flattened chain of dimensions
``(num_dimensions, num_walkers * (num_steps - num_burn))``
"""
log_posterior = kwargs.get("log_posterior")
start = kwargs.get("start")
save_dims = kwargs.get("save_dims")
uid = kwargs.get("uid")
assert log_posterior is not None
assert start is not None
from emcee.utils import MPIPool
import emcee
try: # pragma: no cover
self.pool = MPIPool()
if not self.pool.is_master():
self.logger.info("Slave waiting")
self.master = False
self.pool.wait()
sys.exit(0)
else:
self.logger.info("MPIPool successful initialised and master found. "
"Running with %d cores." % self.pool.size)
except ImportError:
self.logger.info("mpi4py is not installed or not configured properly. "
"Ignore if running through python, not mpirun")
except ValueError as e: # pragma: no cover
self.logger.info("Unable to start MPI pool, expected normal python execution")
self.logger.info(str(e))
if callable(start):
num_dim = start().size
else:
num_dim = start.size
if self.num_walkers is None:
self.num_walkers = num_dim * 4
self.num_walkers = max(self.num_walkers, 20)
self.logger.debug("Fitting framework with %d dimensions" % num_dim)
self.logger.info("Using Ensemble Sampler")
sampler = emcee.EnsembleSampler(self.num_walkers, num_dim,
log_posterior,
pool=self.pool, live_dangerously=True)
emcee_wrapper = EmceeWrapper(sampler)
flat_chain = emcee_wrapper.run_chain(self.num_steps, self.num_burn,
self.num_walkers, num_dim,
start=start,
save_dim=save_dims,
temp_dir=self.temp_dir,
uid=uid,
save_interval=self.save_interval)
self.logger.debug("Fit finished")
if self.pool is not None: # pragma: no cover
self.pool.close()
self.logger.debug("Pool closed")
return {"chain": flat_chain}
示例10: main
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
#.........这里部分代码省略.........
features_filename = os.path.join(feature_loader.options.get("analysis","save_path"),"troubleshoot","features{0}_{1}.npy".format(nreal+1,output_string(feature_loader.feature_string)))
logging.info("Saving features for realization {0} to {1}...".format(nreal+1,features_filename))
np.save(features_filename,observed_feature[nreal])
#Find the maximum of the likelihood using ContourPlot functionality
contour = ContourPlot()
contour.getLikelihood(likelihood_cube)
contour.getUnitsFromOptions(feature_loader.options)
parameters_maximum = contour.getMaximum()
parameter_keys = parameters_maximum.keys()
parameter_keys.sort(key=contour.parameter_axes.get)
#Display the new best fit before exiting
best_fit_parameters = np.array([ parameters_maximum[par_key] for par_key in parameter_keys ])
best_fit_chi2 = analysis.chi2(best_fit_parameters,features_covariance=features_covariance,observed_feature=observed_feature[nreal])[0]
chi2_from_expected = analysis.chi2(np.array([0.26,-1.0,0.800]),features_covariance=features_covariance,observed_feature=observed_feature[nreal])[0]
logging.info("Best fit for realization {4} is [ {0[0]:.2f} {0[1]:.2f} {0[2]:.2f} ], chi2_best={1:.3f}({2} dof), chi2_expected={3:.3f}({2} dof)".format(best_fit_parameters,best_fit_chi2,analysis.training_set.shape[1],chi2_from_expected,nreal+1))
#Update global array with best fit parameters and corresponding chi2
best_fit_all[nreal-first_realization+1,:] = best_fit_parameters.copy()
chi2_all[nreal-first_realization+1] = best_fit_chi2
chi2_from_expected_all[nreal-first_realization+1] = chi2_from_expected
#######################################################################################################################################################################
#If option was selected, append the observation results to the mock ones, for comparison
if cmd_args.observation:
observed_feature = feature_loader.load_features(CFHTLens(root_path=feature_loader.options.get("observations","root_path")))[0]
chi_squared = analysis.chi2(points,observed_feature=observed_feature,features_covariance=features_covariance,pool=pool,split_chunks=split_chunks)
now = time.time()
logging.info("actual observation, chi2 calculations completed in {0:.1f}s".format(now-last_timestamp))
last_timestamp = now
#After chi2, compute the likelihood
likelihood_cube = analysis.likelihood(chi_squared.reshape(Om.shape + w.shape + si8.shape))
#Maybe save the likelihood cube?
if cmd_args.likelihood:
likelihood_filename = os.path.join(feature_loader.options.get("analysis","save_path"),"troubleshoot","likelihood_obs_{0}.npy".format(output_string(feature_loader.feature_string)))
logging.info("Saving likelihood cube to {0}...".format(likelihood_filename))
np.save(likelihood_filename,likelihood_cube)
#Maybe save the feature profiles?
if cmd_args.save_features:
features_filename = os.path.join(feature_loader.options.get("analysis","save_path"),"troubleshoot","features_obs_{0}.npy".format(output_string(feature_loader.feature_string)))
logging.info("Saving observed features to {0}...".format(features_filename))
np.save(features_filename,observed_feature)
#Find the maximum of the likelihood using ContourPlot functionality
contour = ContourPlot()
contour.getLikelihood(likelihood_cube)
contour.getUnitsFromOptions(feature_loader.options)
parameters_maximum = contour.getMaximum()
parameter_keys = parameters_maximum.keys()
parameter_keys.sort(key=contour.parameter_axes.get)
#Display the new best fit before exiting
best_fit_parameters = np.array([ parameters_maximum[par_key] for par_key in parameter_keys ])
best_fit_chi2 = analysis.chi2(best_fit_parameters,features_covariance=features_covariance,observed_feature=observed_feature)[0]
chi2_from_expected = analysis.chi2(np.array([0.26,-1.0,0.800]),features_covariance=features_covariance,observed_feature=observed_feature)[0]
logging.info("Best fit for observation is [ {0[0]:.2f} {0[1]:.2f} {0[2]:.2f} ], chi2_best={1:.3f}({2} dof), chi2_expected={3:.3f}({2} dof)".format(best_fit_parameters,best_fit_chi2,analysis.training_set.shape[1],chi2_from_expected))
#Update global array with best fit parameters and corresponding chi2
best_fit_all[-1,:] = best_fit_parameters.copy()
chi2_all[-1] = best_fit_chi2
chi2_from_expected_all[-1] = chi2_from_expected
#######################################################################################################################################################################
#Close MPI Pool
if pool is not None:
pool.close()
logging.info("Closed MPI Pool.")
if cmd_args.save:
#Save the best fit parameters for all realizations
best_fit_filename = os.path.join(feature_loader.options.get("analysis","save_path"),"troubleshoot","best_fit_all_{0}.npy".format(output_string(feature_loader.feature_string)))
logging.info("Saving best fit to {0}...".format(best_fit_filename))
np.save(best_fit_filename,best_fit_all)
#Save the best fit chi2 for all realizations
chi2_filename = os.path.join(feature_loader.options.get("analysis","save_path"),"troubleshoot","chi2_all_{0}.npy".format(output_string(feature_loader.feature_string)))
logging.info("Saving best fit chi2 to {0}...".format(chi2_filename))
np.save(chi2_filename,chi2_all)
#Save also the chi2 for the expected best fit
chi2_filename = os.path.join(feature_loader.options.get("analysis","save_path"),"troubleshoot","chi2_all_expected_{0}.npy".format(output_string(feature_loader.feature_string)))
logging.info("Saving expected chi2 to {0}...".format(chi2_filename))
np.save(chi2_filename,chi2_from_expected_all)
end = time.time()
logging.info("DONE!!")
logging.info("Completed in {0:.1f}s".format(end-start))
示例11: main
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
#.........这里部分代码省略.........
emulator_file = os.path.join(emulators_dir,"emulator{0}_{1}.p".format(cmd_args.prefix,output_string(feature_loader.feature_string)))
logging.info("Unpickling emulator from {0}...".format(emulator_file))
analysis = LikelihoodAnalysis.load(emulator_file)
#timestamp
now = time.time()
logging.info("emulator unpickled in {0:.1f}s".format(now-last_timestamp))
last_timestamp = now
##################################################################################################################################################
#Initialize random seed
np.random.seed(cmd_args.random)
realization = np.random.randint(0,1000)
#Treat the removed model as data
model_to_remove = all_simulated_models[cmd_args.remove]
parameters_to_remove = model_to_remove.squeeze()
logging.info("Treating model {0}, realization {1} as data, loading features...".format(model_to_remove,realization+1))
observed_feature = feature_loader.load_features(model_to_remove)[np.random.randint(0,1000)]
#Compute the chi2 for this observed feature without removing it from the emulator (must be close to 0)
logging.info("Chi2 before removal: {0[0]:.3f} ({1} dof)".format(analysis.chi2(parameters_to_remove,features_covariance=features_covariance,observed_feature=observed_feature),analysis.training_set.shape[1]))
#Remove the model from the emulator
remove_index = analysis.find(parameters_to_remove)[0]
logging.info("Removing model {0} with parameters {1} from emulator...".format(remove_index,analysis.parameter_set[remove_index]))
analysis.remove_model(remove_index)
#Retrain without the removed model
analysis.train()
#Compute the chi2 for this observed feature after removing it from the emulator (likely it's not 0 anymore)
logging.info("Chi2 after removal: {0[0]:.3f} ({1} dof)".format(analysis.chi2(parameters_to_remove,features_covariance=features_covariance,observed_feature=observed_feature),analysis.training_set.shape[1]))
####################################################################################################################
######################################Compute the chi2 cube#########################################################
####################################################################################################################
logging.info("Initializing chi2 meshgrid...")
#Set the points in parameter space on which to compute the chi2 (read from options)
Om = np.ogrid[feature_loader.options.getfloat("Omega_m","min"):feature_loader.options.getfloat("Omega_m","max"):feature_loader.options.getint("Omega_m","num_points")*1j]
w = np.ogrid[feature_loader.options.getfloat("w","min"):feature_loader.options.getfloat("w","max"):feature_loader.options.getint("w","num_points")*1j]
si8 = np.ogrid[feature_loader.options.getfloat("sigma8","min"):feature_loader.options.getfloat("sigma8","max"):feature_loader.options.getint("sigma8","num_points")*1j]
num_points = len(Om) * len(w) * len(si8)
points = np.array(np.meshgrid(Om,w,si8,indexing="ij")).reshape(3,num_points).transpose()
if cmd_args.save_points is not None:
logging.info("Saving points to {0}.npy".format(cmd_args.save_points.rstrip(".npy")))
np.save(cmd_args.save_points.rstrip(".npy")+".npy",points)
#Now compute the chi2 at each of these points
if pool:
split_chunks = pool.size
logging.info("Computing chi squared for {0} parameter combinations using {1} cores...".format(points.shape[0],pool.size))
else:
split_chunks = None
logging.info("Computing chi squared for {0} parameter combinations using 1 core...".format(points.shape[0]))
chi_squared = analysis.chi2(points,observed_feature=observed_feature,features_covariance=features_covariance,pool=pool,split_chunks=split_chunks)
#Close MPI Pool
if pool is not None:
pool.close()
logging.info("Closed MPI Pool.")
now = time.time()
logging.info("chi2 calculations completed in {0:.1f}s".format(now-last_timestamp))
last_timestamp = now
#Save output
likelihood_file = "likelihood_remove{0}_{1}.npy".format(cmd_args.remove,output_string(feature_loader.feature_string))
chi2_file = "chi2_remove{0}_{1}.npy".format(cmd_args.remove,output_string(feature_loader.feature_string))
logging.info("Saving chi2 to {0}".format(chi2_file))
np.save(chi2_file,chi_squared.reshape(Om.shape + w.shape + si8.shape))
logging.info("Saving full likelihood to {0}".format(likelihood_file))
likelihood_cube = analysis.likelihood(chi_squared.reshape(Om.shape + w.shape + si8.shape))
np.save(likelihood_file,likelihood_cube)
#Find the maximum of the likelihood using ContourPlot functionality
contour = ContourPlot()
contour.getLikelihood(likelihood_cube)
contour.getUnitsFromOptions(feature_loader.options)
parameters_maximum = contour.getMaximum()
parameter_keys = parameters_maximum.keys()
parameter_keys.sort(key=contour.parameter_axes.get)
#Display the new best fit before exiting
best_fit_parameters = [ parameters_maximum[par_key] for par_key in parameter_keys ]
logging.info("New best fit is [ {0[0]:.2f} {0[1]:.2f} {0[2]:.2f} ], chi2={1[0]:.3f}".format(best_fit_parameters,analysis.chi2(np.array(best_fit_parameters),features_covariance=features_covariance,observed_feature=observed_feature)))
#End
end = time.time()
logging.info("DONE!!")
logging.info("Completed in {0:.1f}s".format(end-start))
示例12: runModel
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def runModel():
pool = MPIPool()
if not pool.is_master():
pool.wait()
sys.exit(0)
# pool=None
observation = simulateData()
nTrans = len(observation["spectype"])
ndim, nwalkers = 8 + nTrans, 1000
# mns = numpy.concatenate(([inputs.Om0, inputs.w0, inputs.rate_II_r, inputs.logL_snIa, inputs.sigma_snIa, \
# inputs.logL_snII,inputs.sigma_snII,inputs.Z], -.35*numpy.zeros(nTrans)))
sigs = numpy.concatenate(
(
[
0.1,
0.2,
0.1,
uncertainties.logL_snIa,
uncertainties.sigma_snIa,
uncertainties.logL_snII,
uncertainties.sigma_snII,
uncertainties.Z,
],
0.05 + numpy.zeros(nTrans),
)
)
p0 = []
for i in range(nwalkers):
dum = numpy.random.rand(nTrans)
dum = numpy.array(numpy.round(dum), dtype="int")
lnL_init = dum + (1 - dum) * 0.5
lnL_init = numpy.log(lnL_init)
mns = numpy.concatenate(
(
[
inputs.Om0,
inputs.w0,
inputs.rate_II_r,
inputs.logL_snIa,
inputs.sigma_snIa,
inputs.logL_snII,
inputs.sigma_snII,
inputs.Z,
],
lnL_init,
)
)
p0.append((numpy.random.rand(ndim) - 0.5) * sigs + mns)
# p0 = [numpy.random.randn(ndim)*sigs + mns for i in range(nwalkers)]
dco = 1e-11 # measurement error very small
sampler = emcee.EnsembleSampler(
nwalkers,
ndim,
lnprob,
args=[
observation["counts"],
observation["specz"],
numpy.zeros(nTrans) + dco,
observation["zprob"],
observation["spectype"],
],
pool=pool,
)
sampler.run_mcmc(p0, 2000)
pool.close()
output = open("data.pkl", "wb")
pickle.dump(sampler.chain, output)
output.close()
示例13: main
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
#.........这里部分代码省略.........
b_over_a = 0.107213
c = -0.821158
d = 0.828957
rc1 = 74.4
rc2 = 1.79
rcfrac = 0.992
det.SetTransferFunction(b_over_a, c, d, rc1, rc2, rcfrac)
tempIdx = -7
#and the remaining 4 are for the transfer function
fig_size = (20,10)
#Create a decent start guess by fitting waveform-by-waveform
wfFileName = "P42574A_12_fastandslow_oldwfs.npz"
# wfFileName = "P42574A_5_fast.npz"
if os.path.isfile(wfFileName):
data = np.load(wfFileName)
results = data['results']
wfs = data['wfs']
# wfs = wfs[::3]
# results = results[::3]
numWaveforms = wfs.size
else:
print "No saved waveforms available. Exiting."
exit(0)
#prep holders for each wf-specific param
r_arr = np.empty(numWaveforms)
phi_arr = np.empty(numWaveforms)
z_arr = np.empty(numWaveforms)
scale_arr = np.empty(numWaveforms)
t0_arr = np.empty(numWaveforms)
smooth_arr = np.ones(numWaveforms)*7.
simWfArr = np.empty((1,numWaveforms, fitSamples))
#Prepare the initial value arrays
for (idx, wf) in enumerate(wfs):
wf.WindowWaveformTimepoint(fallPercentage=.97, rmsMult=2,)
r_arr[idx], phi_arr[idx], z_arr[idx], scale_arr[idx], t0_arr[idx], smooth_arr[idx] = results[idx]['x']
# t0_arr[idx] -= 15
#Initialize the multithreading
# p = Pool(numThreads, initializer=initializeDetectorAndWaveforms, initargs=[det, wfs])
initializeDetectorAndWaveforms(det, wfs)
p = MPIPool()
if not p.is_master():
p.wait()
sys.exit(0)
#Do the MCMC
mcmc_startguess = np.hstack((r_arr[:], phi_arr[:], z_arr[:], scale_arr[:], t0_arr[:], smooth_arr[:], # waveform-specific params
tempGuess, b_over_a, c, d, rc1, rc2, rcfrac)) # detector-specific
#number of walkers _must_ be even
if nwalkers % 2:
nwalkers +=1
pos0 = [mcmc_startguess + 1e-2*np.random.randn(ndim)*mcmc_startguess for i in range(nwalkers)]
rc1idx = -3
rc2idx = -2
rcfracidx = -1
#Make sure everything in the initial guess is within bounds
for pos in pos0:
pos[:numWaveforms] = np.clip( pos[:numWaveforms], 0, np.floor(det.detector_radius*10.)/10.)
pos[numWaveforms:2*numWaveforms] = np.clip(pos[numWaveforms:2*numWaveforms], 0, np.pi/4)
pos[2*numWaveforms:3*numWaveforms] = np.clip(pos[2*numWaveforms:3*numWaveforms], 0, np.floor(det.detector_length*10.)/10.)
pos[4*numWaveforms:5*numWaveforms] = np.clip(pos[4*numWaveforms:5*numWaveforms], 0, fitSamples)
pos[5*numWaveforms:6*numWaveforms] = np.clip(pos[5*numWaveforms:6*numWaveforms], 0, 20.)
pos[tempIdx] = np.clip(pos[tempIdx], 40, 120)
pos[rcfracidx] = np.clip(pos[rcfracidx], 0, 1)
pos[rc2idx] = np.clip(pos[rc2idx], 0, np.inf)
pos[rc1idx] = np.clip(pos[rc1idx], 0, np.inf)
prior = lnprior(pos,)
if not np.isfinite(prior) :
print "BAD PRIOR WITH START GUESS YOURE KILLING ME SMALLS"
print pos
exit(0)
#Initialize, run the MCMC
sampler = emcee.EnsembleSampler( nwalkers, ndim, lnprob, pool=p)
#w/ progress bar, & time the thing
bar = ProgressBar(widgets=[Percentage(), Bar(), ETA()], maxval=iter).start()
for (idx,result) in enumerate(sampler.sample(pos0, iterations=iter, storechain=True)):
bar.update(idx+1)
bar.finish()
p.close()
print "Dumping chain to file..."
np.save("mpisampler_%dwfs.npy" % numWaveforms, sampler.chain)
示例14: run_emcee_seeded
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
def run_emcee_seeded(light_curve, transit_params, spot_parameters, n_steps,
n_walkers, output_path, burnin=0.7,
n_extra_spots=1, skip_priors=False):
"""
Fit for transit depth and spot parameters given initial guess informed by
results from `peak_finder`
Parameters
----------
light_curve : `friedrich.lightcurve.TransitLightCurve`
Light curve to fit
transit_params : `~batman.TransitParams`
Transit light curve parameters
spot_parameters : list
List of all spot parameters in [amp, t0, sig, amp, t0, sig, ...] order
n_steps : int
Number of MCMC steps to take
n_walkers : int
Number of MCMC walkers to initialize (must be even, more than twice the
number of free params in fit)
output_path : str
Path to HDF5 archive output for storing results
burnin : float
Fraction of total number of steps to save to output (will truncate
the first `burnin` of the light curve)
n_extra_spots : int
Add `n_extra_spots` extra spots to the fit to soak up spots not
predicted by `peak_finder`
skip_priors : bool
Should a prior be applied to the depth parameter?
Returns
-------
sampler : `emcee.EnsembleSampler`
Sampler object returned by `emcee`
"""
times = light_curve.times.jd
fluxes = light_curve.fluxes
errors = light_curve.errors
lower_t_bound, upper_t_bound = get_in_transit_bounds(times, transit_params)
amps = spot_parameters[::3]
init_depth = transit_params.rp**2
extra_spot_params = [0.1*np.min(amps), np.mean(times),
0.05*(upper_t_bound-lower_t_bound)]
fit_params = np.concatenate([spot_parameters,
n_extra_spots*extra_spot_params])
ndim, nwalkers = len(fit_params), n_walkers
pos = []
while len(pos) < nwalkers:
realization = fit_params + 1e-5*np.random.randn(ndim)
if not np.isinf(lnprior(realization, fluxes, lower_t_bound,
upper_t_bound, transit_params, skip_priors)):
pos.append(realization)
print('Begin MCMC...')
pool = MPIPool(loadbalance=True)
if not pool.is_master():
pool.wait()
sys.exit(0)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob,
args=(times, fluxes, errors, lower_t_bound,
upper_t_bound, transit_params,
skip_priors),
pool=pool)
sampler.run_mcmc(pos, n_steps)
print('Finished MCMC...')
pool.close()
burnin_len = int(burnin*n_steps)
from .storage import create_results_archive
create_results_archive(output_path, light_curve, sampler, burnin_len, ndim)
return sampler
示例15: len
# 需要导入模块: from emcee.utils import MPIPool [as 别名]
# 或者: from emcee.utils.MPIPool import close [as 别名]
while jj< len(y):
print 'icount, jj',icount,jj
iipix_mask,iipix = WLanalysis.coords2grid(x[jj:jj+istep], y[jj:jj+istep], idata.flatten().reshape(1,-1)[:,jj:jj+istep], size=sizes[Wx-1])
ipix_mask += iipix_mask
ipix += iipix
jj+=istep
print icount,'W%i done coords2grid %s'%(Wx,icount)#,time.strftime("%Y-%m-%d %H:%M")
save(mask_dir+'smaller/weight0_W%i_%i_numpix'%(Wx,icount), ipix)
save(mask_dir+'smaller/weight0_W%i_%i_nummask'%(Wx,icount), ipix_mask)
#ipix is the num. of pixels fall in that big pix, ipix_mask is the mask
return ipix, ipix_mask
p = MPIPool()
if not p.is_master():
p.wait()
sys.exit(0)
#p.map(partialdata2grid, range(63))
ismall_map=p.map(partialdata2grid, range(63))
small_map = sum(array(ismall_map),axis=0)
save(mask_dir+'weight0_W%i_smaller_mask.npy'%(Wx),small_map)
weight=1-small_map[1]/small_map[0]
weight[isnan(weight)]=0
save(mask_dir+'ludoweight_weight0_W%i.npy'%Wx, weight)
mask=weight/weight
mask[isnan(mask)]=0
save(mask_dir+'ludomask_weight0_W%i.npy'%Wx, mask)
p.close()