本文整理汇总了Python中lmfit.Minimizer.emcee方法的典型用法代码示例。如果您正苦于以下问题:Python Minimizer.emcee方法的具体用法?Python Minimizer.emcee怎么用?Python Minimizer.emcee使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lmfit.Minimizer
的用法示例。
在下文中一共展示了Minimizer.emcee方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit_single_line
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import emcee [as 别名]
def fit_single_line(self, x, y, zero_lev, err_continuum, fitting_parameters, bootstrap_iterations = 1000):
#Simple fit
if self.fit_dict['MC_iterations'] == 1:
fit_output = lmfit_minimize(residual_gauss, fitting_parameters, args=(x, y, zero_lev, err_continuum))
self.fit_dict['area_intg'] = simps(y, x) - simps(zero_lev, x)
self.fit_dict['area_intg_err'] = 0.0
#Bootstrap
else:
mini_posterior = Minimizer(lnprob_gaussCurve, fitting_parameters, fcn_args = ([x, y, zero_lev, err_continuum]))
fit_output = mini_posterior.emcee(steps=200, params = fitting_parameters)
#Bootstrap for the area of the lines
area_array = empty(bootstrap_iterations)
len_x_array = len(x)
for i in range(bootstrap_iterations):
y_new = y + np_normal_dist(0.0, err_continuum, len_x_array)
area_array[i] = simps(y_new, x) - simps(zero_lev, x)
self.fit_dict['area_intg'] = mean(area_array)
self.fit_dict['area_intg_err'] = std(area_array)
#Store the fitting parameters
output_params = fit_output.params
for key in self.fit_dict['parameters_list']:
self.fit_dict[key + '_norm'] = output_params[key].value
self.fit_dict[key + '_norm_er'] = output_params[key].stderr
return
示例2: __call__
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import emcee [as 别名]
def __call__(self):
#out = minimize(self.residual,
# self.params,
# scale_covar = False
# #method = 'cg'
# )
mini = Minimizer(self.residual, self.params)
out = mini.emcee(burn = 10000, steps = 60000, thin = 1, workers = 1, params = self.params)
self.H0 = 10**(out.params['a_nu'].value + 5 +
0.2 * (out.params['m04258'].value - 5*log10(out.params['mu_geometric'].value) - 25))
#print 5*log10(out.params['mu_geometric'].value) + 25
self.e_H0 = model.H0 * sqrt((out.params['a_nu'].stderr * log(10))**2
+ (log(10)/5 *out.params['m04258'].stderr )**2
+ (out.params['mu_geometric'].stderr/out.params['mu_geometric'].value)**2)
return out
示例3: setup
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import emcee [as 别名]
class MinimizerClassSuite:
"""
Benchmarks for the Minimizer class
"""
def setup(self):
self.x = np.linspace(1, 10, 250)
np.random.seed(0)
self.y = (3.0 * np.exp(-self.x / 2)
- 5.0 * np.exp(-(self.x - 0.1) / 10.)
+ 0.1 * np.random.randn(len(self.x)))
self.p = Parameters()
self.p.add_many(('a1', 4., True, 0., 10.),
('a2', 4., True, -10., 10.),
('t1', 3., True, 0.01, 10.),
('t2', 3., True, 0.01, 20.))
self.p_emcee = deepcopy(self.p)
self.p_emcee.add('noise', 0.2, True, 0.001, 1.)
self.mini_de = Minimizer(Minimizer_Residual,
self.p,
fcn_args=(self.x, self.y),
kws={'seed': 1,
'polish': False,
'maxiter': 100})
self.mini_emcee = Minimizer(Minimizer_lnprob,
self.p_emcee,
fcn_args=(self.x, self.y))
def time_differential_evolution(self):
self.mini_de.minimize(method='differential_evolution')
def time_emcee(self):
self.mini_emcee.emcee(self.p_emcee, steps=100, seed=1)
示例4: CommonMinimizerTest
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import emcee [as 别名]
#.........这里部分代码省略.........
major, minor, _micro = scipy_version.split('.', 2)
for method in SCALAR_METHODS:
if (method == 'differential_evolution' and int(major) > 0 and
int(minor) >= 2):
pytest.raises(RuntimeError, self.mini.scalar_minimize,
SCALAR_METHODS[method])
else:
pytest.raises(ValueError, self.mini.scalar_minimize,
SCALAR_METHODS[method])
pytest.raises(ValueError, self.mini.minimize)
# now check that the fit proceeds if nan_policy is 'omit'
self.mini.nan_policy = 'omit'
res = self.mini.minimize()
assert_equal(res.ndata, np.size(self.data, 0) - 1)
for para, true_para in zip(res.params.values(), self.p_true.values()):
check_wo_stderr(para, true_para.value, sig=0.15)
def test_nan_policy_function(self):
a = np.array([0, 1, 2, 3, np.nan])
pytest.raises(ValueError, _nan_policy, a)
assert_(np.isnan(_nan_policy(a, nan_policy='propagate')[-1]))
assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
a[-1] = np.inf
pytest.raises(ValueError, _nan_policy, a)
assert_(np.isposinf(_nan_policy(a, nan_policy='propagate')[-1]))
assert_equal(_nan_policy(a, nan_policy='omit'), [0, 1, 2, 3])
assert_equal(_nan_policy(a, handle_inf=False), a)
@dec.slow
def test_emcee(self):
# test emcee
if not HAS_EMCEE:
return True
np.random.seed(123456)
out = self.mini.emcee(nwalkers=100, steps=200, burn=50, thin=10)
check_paras(out.params, self.p_true, sig=3)
@dec.slow
def test_emcee_method_kwarg(self):
# test with emcee as method keyword argument
if not HAS_EMCEE:
return True
np.random.seed(123456)
out = self.mini.minimize(method='emcee', nwalkers=100, steps=200,
burn=50, thin=10)
assert out.method == 'emcee'
assert out.nfev == 100*200
check_paras(out.params, self.p_true, sig=3)
@dec.slow
def test_emcee_PT(self):
# test emcee with parallel tempering
if not HAS_EMCEE:
return True
np.random.seed(123456)
self.mini.userfcn = residual_for_multiprocessing
out = self.mini.emcee(ntemps=4, nwalkers=50, steps=200,
示例5: fit_blended_line_emcee
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import emcee [as 别名]
def fit_blended_line_emcee(self, x, y, zero_lev, err_continuum, Ncomps, fitting_parameters, add_wide_component, fitting_parameters_wide, bootstrap_iterations = 1000, MC_iterations = 200):
#---First we integrate the brute area with all the components
if self.fit_dict['MC_iterations'] == 1:
self.fit_dict['area_intg'] = simps(y, x) - simps(zero_lev, x)
self.fit_dict['area_intg_err'] = 0.0
else:
area_array = empty(bootstrap_iterations)
len_x_array = len(x)
for i in range(bootstrap_iterations):
y_new = y + np_normal_dist(0.0, err_continuum, len_x_array)
area_array[i] = simps(y_new, x) - simps(zero_lev, x)
self.fit_dict['area_intg'] = mean(area_array)
self.fit_dict['area_intg_err'] = std(area_array)
#---Second we proceed to analyze as gaussian components
idcs_components = map(str, range(Ncomps))
mini_posterior = Minimizer(lnprob_gaussMix, fitting_parameters, fcn_args = ([x, y, zero_lev, err_continuum, idcs_components]), method='powell')
fit_output = mini_posterior.emcee(steps=MC_iterations, params = fitting_parameters)
output_params = fit_output.params
if add_wide_component: #This currently only valid for Halpha
sigma_limit = output_params['sigma1'].value
limit_0, limit_1 = 6548.05 - self.fit_dict['x_scaler'] - sigma_limit * 1.5, 6548.05 - self.fit_dict['x_scaler'] + sigma_limit * 1.5
limit_2, limit_3 = 0 - sigma_limit * 4, 0 + sigma_limit * 4
limit_4, limit_5 = 6583.46 - self.fit_dict['x_scaler'] - sigma_limit * 3, 6583.46 - self.fit_dict['x_scaler'] + sigma_limit * 3
#Get the wide component area
indeces = ((x >= limit_0) & (x <= limit_1)) + ((x >= limit_2) & (x <= limit_3)) + ((x >= limit_4) & (x <= limit_5))
mask = invert(indeces)
x_wide, y_wide, zero_wide = x[mask], y[mask], zero_lev[mask]
Ncomps_wide = ['3']
#Fit the wide component without narrow component
mini_posterior_wide = Minimizer(lnprob_gaussMix, fitting_parameters_wide, fcn_args = ([x_wide, y_wide, zero_wide, err_continuum, Ncomps_wide]), method='powell')
fit_output_wide = mini_posterior_wide.emcee(steps=MC_iterations, params = fitting_parameters_wide)
output_params_wide = fit_output_wide.params
#Calculate wide component curve
y_wide_fit = gaussian_mixture(output_params_wide.valuesdict(), x, zero_lev, Ncomps_wide)
#Calculate emission line region again
y_pure_narrow = y - y_wide_fit + zero_lev
#Fit narrow components again
mini_posterior = Minimizer(lnprob_gaussMix, fitting_parameters, fcn_args = ([x, y_pure_narrow, zero_lev, err_continuum, idcs_components]), method='powell')
fit_output_narrow = mini_posterior.emcee(steps=MC_iterations, params=fitting_parameters)
output_params_narrow = fit_output_narrow.params
#Combine the results from both fits
output_params = output_params_narrow + output_params_wide
#Add the wide component to the fit we are performing
self.fit_dict.line_number = self.fit_dict.line_number + 1
for key in self.fit_dict['parameters_list']:
self.fit_dict[key + '_norm'] = output_params[key].value if output_params[key].value is not None else np_nan
self.fit_dict[key + '_norm_er'] = output_params[key].stderr if output_params[key].stderr is not None else np_nan
return
示例6: CommonMinimizerTest
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import emcee [as 别名]
class CommonMinimizerTest(unittest.TestCase):
def setUp(self):
"""
test scale minimizers except newton-cg (needs jacobian) and
anneal (doesn't work out of the box).
"""
p_true = Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.33)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.010)
self.p_true = p_true
n = 2500
xmin = 0.
xmax = 250.0
noise = np.random.normal(scale=0.7215, size=n)
self.x = np.linspace(xmin, xmax, n)
self.data = self.residual(p_true, self.x) + noise
fit_params = Parameters()
fit_params.add('amp', value=11.0, min=5, max=20)
fit_params.add('period', value=5., min=1., max=7)
fit_params.add('shift', value=.10, min=0.0, max=0.2)
fit_params.add('decay', value=6.e-3, min=0, max=0.1)
self.fit_params = fit_params
self.mini = Minimizer(self.residual, fit_params, [self.x, self.data])
def residual(self, pars, x, data=None):
amp = pars['amp'].value
per = pars['period'].value
shift = pars['shift'].value
decay = pars['decay'].value
if abs(shift) > pi/2:
shift = shift - np.sign(shift) * pi
model = amp*np.sin(shift + x/per) * np.exp(-x*x*decay*decay)
if data is None:
return model
return model - data
def test_diffev_bounds_check(self):
# You need finite (min, max) for each parameter if you're using
# differential_evolution.
self.fit_params['decay'].min = None
self.minimizer = 'differential_evolution'
np.testing.assert_raises(ValueError, self.scalar_minimizer)
def test_scalar_minimizers(self):
# test all the scalar minimizers
for method in SCALAR_METHODS:
if method in ['newton', 'dogleg', 'trust-ncg']:
continue
self.minimizer = SCALAR_METHODS[method]
if method == 'Nelder-Mead':
sig = 0.2
else:
sig = 0.15
self.scalar_minimizer(sig=sig)
def scalar_minimizer(self, sig=0.15):
try:
from scipy.optimize import minimize as scipy_minimize
except ImportError:
raise SkipTest
print(self.minimizer)
out = self.mini.scalar_minimize(method=self.minimizer)
self.residual(out.params, self.x)
for name, par in out.params.items():
nout = "%s:%s" % (name, ' '*(20-len(name)))
print("%s: %s (%s) " % (nout, par.value, self.p_true[name].value))
for para, true_para in zip(out.params.values(),
self.p_true.values()):
check_wo_stderr(para, true_para.value, sig=sig)
@decorators.slow
def test_emcee(self):
# test emcee
if not HAS_EMCEE:
return True
np.random.seed(123456)
out = self.mini.emcee(nwalkers=100, steps=200,
burn=50, thin=10)
check_paras(out.params, self.p_true, sig=3)
@decorators.slow
def test_emcee_PT(self):
# test emcee with parallel tempering
if not HAS_EMCEE:
return True
np.random.seed(123456)
#.........这里部分代码省略.........