本文整理汇总了Python中lmfit.Minimizer.leastsq方法的典型用法代码示例。如果您正苦于以下问题:Python Minimizer.leastsq方法的具体用法?Python Minimizer.leastsq怎么用?Python Minimizer.leastsq使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类lmfit.Minimizer
的用法示例。
在下文中一共展示了Minimizer.leastsq方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: LmModel
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
class LmModel(object):
"""
Base class for all models.
Models take x and y and return
"""
def __init__(self,x,y):
self.x, self.y=x,y
self.parameters=Parameters()
self.min=Minimizer(self.residual, self.parameters)
def print_para(self):
for i in self.parameters.values:
print i
def func(self,paras):
raise NotImplementedError
def est_startvals(self):
raise NotImplementedError
def residual(self,paras):
return self.func(paras)-self.y
def fit(self):
self.min.leastsq()
self.y_model=self.func(self.parameters)
示例2: NIST_Test
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def NIST_Test(DataSet, start='start2', plot=True):
NISTdata = ReadNistData(DataSet)
resid, npar, dimx = Models[DataSet]
y = NISTdata['y']
x = NISTdata['x']
params = Parameters()
for i in range(npar):
pname = 'b%i' % (i+1)
cval = NISTdata['cert_values'][i]
cerr = NISTdata['cert_stderr'][i]
pval1 = NISTdata[start][i]
params.add(pname, value=pval1)
myfit = Minimizer(resid, params, fcn_args=(x,), fcn_kws={'y':y},
scale_covar=True)
myfit.prepare_fit()
myfit.leastsq()
digs = Compare_NIST_Results(DataSet, myfit, params, NISTdata)
if plot and HASPYLAB:
fit = -resid(params, x, )
pylab.plot(x, y, 'r+')
pylab.plot(x, fit, 'ko--')
pylab.show()
return digs > 2
示例3: polyfit
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def polyfit(x,y,u=None):
'''Determine the weighted least-squares fit for 2nd order polynomial'''
x = np.asarray(x)
y = np.asarray(y)
if u is not None:
u[u==0]=1
weight = 1./np.asarray(u)
else:
weight = np.ones_like(x)
params = Parameters()
params.add('a', value=0)
params.add('b', value=(y.max()-y.min())/(x.max()-x.min()))
params.add('c', value=0.0)
def residual(pars, x, data=None,w=None):
model = pars['a'].value + pars['b'].value*x + pars['c'].value*x**2
if data is None:
return model
return (model - data) #* w
myfit = Minimizer(residual, params,fcn_args=(x,), fcn_kws={'data':y,'w':weight})
myfit.leastsq()
return [params['c'].value,params['b'].value,params['a'].value]
示例4: autobk
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def autobk(energy, mu, rbkg=1, nknots=None, group=None, e0=None,
kmin=0, kmax=None, kw=1, dk=0, win=None, vary_e0=True,
chi_std=None, nfft=2048, kstep=0.05, _larch=None):
if _larch is None:
raise Warning("cannot calculate autobk spline -- larch broken?")
# get array indices for rkbg and e0: irbkg, ie0
rgrid = np.pi/(kstep*nfft)
if rbkg < 2*rgrid: rbkg = 2*rgrid
irbkg = int(1.01 + rbkg/rgrid)
if e0 is None:
e0 = find_e0(energy, mu, group=group, _larch=_larch)
ie0 = _index_nearest(energy, e0)
# save ungridded k (kraw) and grided k (kout)
# and ftwin (*k-weighting) for FT in residual
kraw = np.sqrt(ETOK*(energy[ie0:] - e0))
if kmax is None:
kmax = max(kraw)
kout = kstep * np.arange(int(1.01+kmax/kstep))
ftwin = kout**kw * ftwindow(kout, xmin=kmin, xmax=kmax,
window=win, dx=dk)
# calc k-value and initial guess for y-values of spline params
nspline = max(4, min(60, 2*int(rbkg*(kmax-kmin)/np.pi) + 1))
spl_y = np.zeros(nspline)
spl_k = np.zeros(nspline)
for i in range(nspline):
q = kmin + i*(kmax-kmin)/(nspline - 1)
ik = _index_nearest(kraw, q)
i1 = min(len(kraw)-1, ik + 5)
i2 = max(0, ik - 5)
spl_k[i] = kraw[ik]
spl_y[i] = (2*mu[ik] + mu[i1] + mu[i2] ) / 4.0
# get spline represention: knots, coefs, order=3
# coefs will be varied in fit.
knots, coefs, order = splrep(spl_k, spl_y)
# set fit parameters from initial coefficients
fparams = Parameters()
for i, v in enumerate(coefs):
fparams.add("c%i" % i, value=v, vary=i<len(spl_y))
fitkws = dict(knots=knots, order=order, kraw=kraw, mu=mu[ie0:],
irbkg=irbkg, kout=kout, ftwin=ftwin, nfft=nfft)
# do fit
fit = Minimizer(__resid, fparams, fcn_kws=fitkws)
fit.leastsq()
# write final results
coefs = [p.value for p in fparams.values()]
bkg, chi = spline_eval(kraw, mu[ie0:], knots, coefs, order, kout)
obkg = np.zeros(len(mu))
obkg[:ie0] = mu[:ie0]
obkg[ie0:] = bkg
if _larch.symtable.isgroup(group):
setattr(group, 'bkg', obkg)
setattr(group, 'chie', mu-obkg)
setattr(group, 'k', kout)
setattr(group, 'chi', chi)
示例5: test_peakfit
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def test_peakfit():
from lmfit.utilfuncs import gaussian
def residual(pars, x, data=None):
g1 = gaussian(x, pars['a1'].value, pars['c1'].value, pars['w1'].value)
g2 = gaussian(x, pars['a2'].value, pars['c2'].value, pars['w2'].value)
model = g1 + g2
if data is None:
return model
return (model - data)
n = 601
xmin = 0.
xmax = 15.0
noise = np.random.normal(scale=.65, size=n)
x = np.linspace(xmin, xmax, n)
org_params = Parameters()
org_params.add_many(('a1', 12.0, True, None, None, None),
('c1', 5.3, True, None, None, None),
('w1', 1.0, True, None, None, None),
('a2', 9.1, True, None, None, None),
('c2', 8.1, True, None, None, None),
('w2', 2.5, True, None, None, None))
data = residual(org_params, x) + noise
fit_params = Parameters()
fit_params.add_many(('a1', 8.0, True, None, 14., None),
('c1', 5.0, True, None, None, None),
('w1', 0.7, True, None, None, None),
('a2', 3.1, True, None, None, None),
('c2', 8.8, True, None, None, None))
fit_params.add('w2', expr='2.5*w1')
myfit = Minimizer(residual, fit_params,
fcn_args=(x,), fcn_kws={'data':data})
myfit.prepare_fit()
init = residual(fit_params, x)
myfit.leastsq()
print(' N fev = ', myfit.nfev)
print(myfit.chisqr, myfit.redchi, myfit.nfree)
report_fit(fit_params)
fit = residual(fit_params, x)
check_paras(fit_params, org_params)
示例6: __FitEvent
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def __FitEvent(self):
try:
dt = 1000./self.Fs # time-step in ms.
# edat=np.asarray( np.abs(self.eventData), dtype='float64' )
edat=self.dataPolarity*np.asarray( self.eventData, dtype='float64' )
# control numpy error reporting
np.seterr(invalid='ignore', over='ignore', under='ignore')
ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')
# estimate initial guess for events
initguess=self._characterizeevent(edat, np.abs(util.avg(edat[:10])), self.baseSD, self.InitThreshold, 6.)
self.nStates=len(initguess)-1
# setup fit params
params=Parameters()
for i in range(1, len(initguess)):
params.add('a'+str(i-1), value=initguess[i][0]-initguess[i-1][0])
params.add('mu'+str(i-1), value=initguess[i][1]*dt)
params.add('tau'+str(i-1), value=dt*7.5)
params.add('b', value=initguess[0][0])
optfit=Minimizer(self.__objfunc, params, fcn_args=(ts,edat,))
optfit.prepare_fit()
optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)
if optfit.success:
self.__recordevent(optfit)
else:
#print optfit.message, optfit.lmdif_message
self.rejectEvent('eFitConvergence')
except KeyboardInterrupt:
self.rejectEvent('eFitUserStop')
raise
except InvalidEvent:
self.rejectEvent('eInvalidEvent')
except:
self.rejectEvent('eFitFailure')
raise
示例7: test_constraints1
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def test_constraints1():
def residual(pars, x, sigma=None, data=None):
yg = gaussian(x, pars['amp_g'], pars['cen_g'], pars['wid_g'])
yl = lorentzian(x, pars['amp_l'], pars['cen_l'], pars['wid_l'])
model = yg + yl + pars['line_off'] + x * pars['line_slope']
if data is None:
return model
if sigma is None:
return (model - data)
return (model - data)/sigma
n = 601
xmin = 0.
xmax = 20.0
x = linspace(xmin, xmax, n)
data = (gaussian(x, 21, 8.1, 1.2) +
lorentzian(x, 10, 9.6, 2.4) +
random.normal(scale=0.23, size=n) +
x*0.5)
pfit = Parameters()
pfit.add(name='amp_g', value=10)
pfit.add(name='cen_g', value=9)
pfit.add(name='wid_g', value=1)
pfit.add(name='amp_tot', value=20)
pfit.add(name='amp_l', expr='amp_tot - amp_g')
pfit.add(name='cen_l', expr='1.5+cen_g')
pfit.add(name='wid_l', expr='2*wid_g')
pfit.add(name='line_slope', value=0.0)
pfit.add(name='line_off', value=0.0)
sigma = 0.021 # estimate of data error (for all data points)
myfit = Minimizer(residual, pfit,
fcn_args=(x,), fcn_kws={'sigma':sigma, 'data':data},
scale_covar=True)
myfit.prepare_fit()
init = residual(myfit.params, x)
result = myfit.leastsq()
print(' Nfev = ', result.nfev)
print( result.chisqr, result.redchi, result.nfree)
report_fit(result.params)
pfit= result.params
fit = residual(result.params, x)
assert(pfit['cen_l'].value == 1.5 + pfit['cen_g'].value)
assert(pfit['amp_l'].value == pfit['amp_tot'].value - pfit['amp_g'].value)
assert(pfit['wid_l'].value == 2 * pfit['wid_g'].value)
示例8: test_derive
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def test_derive():
def func(pars, x, data=None):
model = pars['a'] * np.exp(-pars['b'] * x) + pars['c']
if data is None:
return model
return model - data
def dfunc(pars, x, data=None):
v = np.exp(-pars['b']*x)
return np.array([v, -pars['a']*x*v, np.ones(len(x))])
def f(var, x):
return var[0] * np.exp(-var[1] * x) + var[2]
params1 = Parameters()
params1.add('a', value=10)
params1.add('b', value=10)
params1.add('c', value=10)
params2 = Parameters()
params2.add('a', value=10)
params2.add('b', value=10)
params2.add('c', value=10)
a, b, c = 2.5, 1.3, 0.8
x = np.linspace(0, 4, 50)
y = f([a, b, c], x)
data = y + 0.15*np.random.normal(size=len(x))
# fit without analytic derivative
min1 = Minimizer(func, params1, fcn_args=(x,), fcn_kws={'data': data})
out1 = min1.leastsq()
# fit with analytic derivative
min2 = Minimizer(func, params2, fcn_args=(x,), fcn_kws={'data': data})
out2 = min2.leastsq(Dfun=dfunc, col_deriv=1)
check_wo_stderr(out1.params['a'], out2.params['a'].value, 0.00005)
check_wo_stderr(out1.params['b'], out2.params['b'].value, 0.00005)
check_wo_stderr(out1.params['c'], out2.params['c'].value, 0.00005)
示例9: fitevent
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def fitevent(self, edat, initguess):
try:
dt = 1000./self.Fs # time-step in ms.
# control numpy error reporting
np.seterr(invalid='ignore', over='ignore', under='ignore')
ts = np.array([ t*dt for t in range(0,len(edat)) ], dtype='float64')
self.nStates=len(initguess)
initRCConst=dt*5.
# setup fit params
params=Parameters()
for i in range(0, len(initguess)):
params.add('a'+str(i), value=initguess[i][0])
params.add('mu'+str(i), value=initguess[i][1])
if self.LinkRCConst:
if i==0:
params.add('tau'+str(i), value=initRCConst)
else:
params.add('tau'+str(i), value=initRCConst, expr='tau0')
else:
params.add('tau'+str(i), value=initRCConst)
params.add('b', value=self.baseMean )
igdict=params.valuesdict()
optfit=Minimizer(self._objfunc, params, fcn_args=(ts,edat,))
optfit.prepare_fit()
result=optfit.leastsq(xtol=self.FitTol,ftol=self.FitTol,maxfev=self.FitIters)
if result.success:
tt=[init[0] for init, final in zip(igdict.items(), (result.params.valuesdict()).items()) if init==final]
if len(tt) > 0:
self.flagEvent('wInitGuessUnchanged')
self._recordevent(result)
else:
#print optfit.message, optfit.lmdif_message
self.rejectEvent('eFitConvergence')
except KeyboardInterrupt:
self.rejectEvent('eFitUserStop')
raise
except InvalidEvent:
self.rejectEvent('eInvalidEvent')
except:
self.rejectEvent('eFitFailure')
示例10: time_confinterval
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def time_confinterval(self):
np.random.seed(0)
x = np.linspace(0.3,10,100)
y = 1/(0.1*x)+2+0.1*np.random.randn(x.size)
p = Parameters()
p.add_many(('a', 0.1), ('b', 1))
def residual(p):
a = p['a'].value
b = p['b'].value
return 1/(a*x)+b-y
minimizer = Minimizer(residual, p)
out = minimizer.leastsq()
return conf_interval(minimizer, out)
示例11: test_ci_report
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def test_ci_report():
"""test confidence interval report"""
def residual(pars, x, data=None):
argu = (x*pars['decay'])**2
shift = pars['shift']
if abs(shift) > np.pi/2:
shift = shift - np.sign(shift)*np.pi
model = pars['amp']*np.sin(shift + x/pars['period']) * np.exp(-argu)
if data is None:
return model
return model - data
p_true = Parameters()
p_true.add('amp', value=14.0)
p_true.add('period', value=5.33)
p_true.add('shift', value=0.123)
p_true.add('decay', value=0.010)
n = 2500
xmin = 0.
xmax = 250.0
x = np.linspace(xmin, xmax, n)
data = residual(p_true, x) + np.random.normal(scale=0.7215, size=n)
fit_params = Parameters()
fit_params.add('amp', value=13.0)
fit_params.add('period', value=2)
fit_params.add('shift', value=0.0)
fit_params.add('decay', value=0.02)
mini = Minimizer(residual, fit_params, fcn_args=(x,),
fcn_kws={'data': data})
out = mini.leastsq()
report = fit_report(out)
assert(len(report) > 500)
ci, tr = conf_interval(mini, out, trace=True)
report = ci_report(ci)
assert(len(report) > 250)
示例12: test_peakfit
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def test_peakfit():
def residual(pars, x, data=None):
g1 = gaussian(x, pars['a1'], pars['c1'], pars['w1'])
g2 = gaussian(x, pars['a2'], pars['c2'], pars['w2'])
model = g1 + g2
if data is None:
return model
return (model - data)
n = 601
xmin = 0.
xmax = 15.0
noise = np.random.normal(scale=.65, size=n)
x = np.linspace(xmin, xmax, n)
org_params = Parameters()
org_params.add_many(('a1', 12.0, True, None, None, None),
('c1', 5.3, True, None, None, None),
('w1', 1.0, True, None, None, None),
('a2', 9.1, True, None, None, None),
('c2', 8.1, True, None, None, None),
('w2', 2.5, True, None, None, None))
data = residual(org_params, x) + noise
fit_params = Parameters()
fit_params.add_many(('a1', 8.0, True, None, 14., None),
('c1', 5.0, True, None, None, None),
('w1', 0.7, True, None, None, None),
('a2', 3.1, True, None, None, None),
('c2', 8.8, True, None, None, None))
fit_params.add('w2', expr='2.5*w1')
myfit = Minimizer(residual, fit_params, fcn_args=(x,),
fcn_kws={'data': data})
myfit.prepare_fit()
out = myfit.leastsq()
check_paras(out.params, org_params)
示例13: return
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
pars['wid_l'].value)
return (model - data)
n = 601
random.seed(0)
x = linspace(0, 20.0, n)
data = (gaussian(x, 21, 6.1, 1.2) +
lorentzian(x, 10, 9.6, 1.3) +
random.normal(scale=0.1, size=n))
pfit = Parameters()
pfit.add(name='amp_g', value=10)
pfit.add(name='amp_l', value=10)
pfit.add(name='cen_g', value=5)
pfit.add(name='peak_split', value=2.5, min=0, max=5, vary=True)
pfit.add(name='cen_l', expr='peak_split+cen_g')
pfit.add(name='wid_g', value=1)
pfit.add(name='wid_l', expr='wid_g')
mini = Minimizer(residual, pfit, fcn_args=(x, data))
out = mini.leastsq()
report_fit(out.params)
best_fit = data + out.residual
plt.plot(x, data, 'bo')
plt.plot(x, best_fit, 'r--')
plt.show()
开发者ID:DiamondLightSource,项目名称:auto_tomo_calibration-experimental,代码行数:32,代码来源:fit_with_inequality.py
示例14: extractfeatures_inside
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
#.........这里部分代码省略.........
data = array(data_deltaS)
print "\n================\nMean and SE (i.e VOI sample data)"
print mean_deltaS
print se_deltaS
# create a set of Parameters
params = Parameters()
params.add("amp", value=10, min=0)
params.add("alpha", value=1, min=0)
params.add("beta", value=0.05, min=0.0001, max=0.9)
# do fit, here with leastsq model
# define objective function: returns the array to be minimized
def fcn2min(params, t, data):
global model, model_res, x
""" model EMM for Bilateral DCE-MRI, subtract data"""
# unpack parameters:
# extract .value attribute for each parameter
amp = params["amp"].value # Upper limit of deltaS
alpha = params["alpha"].value # rate of signal increase min-1
beta = params["beta"].value # rate of signal decrease min-1
model = amp * (1 - exp(-alpha * t)) * exp(-beta * t)
x = linspace(0, t[4], 101)
model_res = amp * (1 - exp(-alpha * x)) * exp(-beta * x)
return model - data
#####
myfit = Minimizer(fcn2min, params, fcn_args=(t,), fcn_kws={"data": data})
myfit.prepare_fit()
myfit.leastsq()
# On a successful fit using the leastsq method, several goodness-of-fit statistics
# and values related to the uncertainty in the fitted variables will be calculated
print "myfit.success"
print myfit.success
print "myfit.residual"
print myfit.residual
print "myfit.chisqr"
print myfit.chisqr
print "myfit.redchi"
print myfit.redchi
# calculate final result
final = data + myfit.residual
# write error report
report_errors(params)
# Calculate R-square
# R_square = sum( y_fitted - y_mean)/ sum(y_data - y_mean)
R_square = sum((model - mean(data)) ** 2) / sum((data - mean(data)) ** 2)
print "R^2"
print R_square
self.amp = params["amp"].value
self.alpha = params["alpha"].value
self.beta = params["beta"].value
##################################################
# Now Calculate Extract parameters from model
self.iAUC1 = params["amp"].value * (
((1 - exp(-params["beta"].value * t[1])) / params["beta"].value)
+ (exp((-params["alpha"].value + params["beta"].value) * t[1]) - 1)
示例15: feffit
# 需要导入模块: from lmfit import Minimizer [as 别名]
# 或者: from lmfit.Minimizer import leastsq [as 别名]
def feffit(paramgroup, datasets, rmax_out=10, path_outputs=True, _larch=None, **kws):
"""execute a Feffit fit: a fit of feff paths to a list of datasets
Parameters:
------------
paramgroup: group containing parameters for fit
datasets: Feffit Dataset group or list of Feffit Dataset group.
rmax_out: maximum R value to calculate output arrays.
path_output: Flag to set whether all Path outputs should be written.
Returns:
---------
a fit results group. This will contain subgroups of:
datasets: an array of FeffitDataSet groups used in the fit.
params: This will be identical to the input parameter group.
fit: an object which points to the low-level fit.
Statistical parameters will be put into the params group. Each
dataset will have a 'data' and 'model' subgroup, each with arrays:
k wavenumber array of k
chi chi(k).
kwin window Omega(k) (length of input chi(k)).
r uniform array of R, out to rmax_out.
chir complex array of chi(R).
chir_mag magnitude of chi(R).
chir_pha phase of chi(R).
chir_re real part of chi(R).
chir_im imaginary part of chi(R).
"""
def _resid(params, datasets=None, paramgroup=None, **kwargs):
""" this is the residual function"""
params2group(params, paramgroup)
return concatenate([d._residual(paramgroup) for d in datasets])
if isNamedClass(datasets, FeffitDataSet):
datasets = [datasets]
params = group2params(paramgroup, _larch=_larch)
for ds in datasets:
if not isNamedClass(ds, FeffitDataSet):
print( "feffit needs a list of FeffitDataSets")
return
ds.prepare_fit()
fit = Minimizer(_resid, params,
fcn_kws=dict(datasets=datasets,
paramgroup=paramgroup),
scale_covar=True, **kws)
result = fit.leastsq()
params2group(result.params, paramgroup)
dat = concatenate([d._residual(paramgroup, data_only=True) for d in datasets])
n_idp = 0
for ds in datasets:
n_idp += ds.n_idp
# here we rescale chi-square and reduced chi-square to n_idp
npts = len(result.residual)
chi_square = result.chisqr * n_idp*1.0 / npts
chi_reduced = chi_square/(n_idp*1.0 - result.nvarys)
rfactor = (result.residual**2).sum() / (dat**2).sum()
# calculate 'aic', 'bic' rescaled to n_idp
# note that neg2_loglikel is -2*log(likelihood)
neg2_loglikel = n_idp * np.log(chi_square / n_idp)
aic = neg2_loglikel + 2 * result.nvarys
bic = neg2_loglikel + np.log(n_idp) * result.nvarys
# With scale_covar = True, Minimizer() scales the uncertainties
# by reduced chi-square assuming params.nfree is the correct value
# for degrees-of-freedom. But n_idp-params.nvarys is a better measure,
# so we rescale uncertainties here.
covar = getattr(result, 'covar', None)
# print("COVAR " , covar)
if covar is not None:
err_scale = (result.nfree / (n_idp - result.nvarys))
for name in result.var_names:
p = result.params[name]
if isParameter(p) and p.vary:
p.stderr *= sqrt(err_scale)
# next, propagate uncertainties to constraints and path parameters.
result.covar *= err_scale
vsave, vbest = {}, []
# 1. save current params
for vname in result.var_names:
par = result.params[vname]
vsave[vname] = par
vbest.append(par.value)
# 2. get correlated uncertainties, set params accordingly
uvars = correlated_values(vbest, result.covar)
#.........这里部分代码省略.........