本文整理汇总了Python中scipy.odr.ODR类的典型用法代码示例。如果您正苦于以下问题:Python ODR类的具体用法?Python ODR怎么用?Python ODR使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ODR类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: do_bestfit
def do_bestfit(self):
"""
do bestfit using scipy.odr
"""
self.check_important_variables()
x = np.array(self.args["x"])
y = np.array(self.args["y"])
if self.args.get("use_RealData", True):
realdata_kwargs = self.args.get("RealData_kwargs", {})
data = RealData(x, y, **realdata_kwargs)
else:
data_kwargs = self.args.get("Data_kwargs", {})
data = Data(x, y, **data_kwargs)
model = self.args.get("Model", None)
if model is None:
if "func" not in self.args.keys():
raise KeyError("Need fitting function")
model_kwargs = self.args.get("Model_kwargs", {})
model = Model(self.args["func"], **model_kwargs)
odr_kwargs = self.args.get("ODR_kwargs", {})
odr = ODR(data, model, **odr_kwargs)
self.output = odr.run()
if self.args.get("pprint", False):
self.output.pprint()
self.fit_args = self.output.beta
return self.fit_args
示例2: test_explicit
def test_explicit(self):
explicit_mod = Model(
self.explicit_fcn,
fjacb=self.explicit_fjb,
fjacd=self.explicit_fjd,
meta=dict(name='Sample Explicit Model',
ref='ODRPACK UG, pg. 39'),
)
explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.],
[1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6,
1213.8,1215.5,1212.])
explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1],
ifixx=[0,0,1,1,1,1,1,1,1,1,1,0])
explicit_odr.set_job(deriv=2)
out = explicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([ 1.2646548050648876e+03, -5.4018409956678255e+01,
-8.7849712165253724e-02]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([ 1.0349270280543437, 1.583997785262061 , 0.0063321988657267]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[ 4.4949592379003039e-01, -3.7421976890364739e-01,
-8.0978217468468912e-04],
[ -3.7421976890364739e-01, 1.0529686462751804e+00,
-1.9453521827942002e-03],
[ -8.0978217468468912e-04, -1.9453521827942002e-03,
1.6827336938454476e-05]]),
)
示例3: test_pearson
def test_pearson(self):
p_x = np.array([0.0, 0.9, 1.8, 2.6, 3.3, 4.4, 5.2, 6.1, 6.5, 7.4])
p_y = np.array([5.9, 5.4, 4.4, 4.6, 3.5, 3.7, 2.8, 2.8, 2.4, 1.5])
p_sx = np.array([0.03, 0.03, 0.04, 0.035, 0.07, 0.11, 0.13, 0.22, 0.74, 1.0])
p_sy = np.array([1.0, 0.74, 0.5, 0.35, 0.22, 0.22, 0.12, 0.12, 0.1, 0.04])
p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)
# Reverse the data to test invariance of results
pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)
p_mod = Model(self.pearson_fcn, meta=dict(name="Uni-linear Fit"))
p_odr = ODR(p_dat, p_mod, beta0=[1.0, 1.0])
pr_odr = ODR(pr_dat, p_mod, beta0=[1.0, 1.0])
out = p_odr.run()
assert_array_almost_equal(out.beta, np.array([5.4767400299231674, -0.4796082367610305]))
assert_array_almost_equal(out.sd_beta, np.array([0.3590121690702467, 0.0706291186037444]))
assert_array_almost_equal(
out.cov_beta,
np.array([[0.0854275622946333, -0.0161807025443155], [-0.0161807025443155, 0.003306337993922]]),
)
rout = pr_odr.run()
assert_array_almost_equal(rout.beta, np.array([11.4192022410781231, -2.0850374506165474]))
assert_array_almost_equal(rout.sd_beta, np.array([0.9820231665657161, 0.3070515616198911]))
assert_array_almost_equal(
rout.cov_beta,
np.array([[0.6391799462548782, -0.1955657291119177], [-0.1955657291119177, 0.0624888159223392]]),
)
示例4: ortho_regress
def ortho_regress(x, y):
linreg = linregress(x, y)
mod = Model(f)
dat = Data(x, y)
od = ODR(dat, mod, beta0=linreg[0:2])
out = od.run()
#print(list(out.beta))
#return list(out.beta) + [np.nan, np.nan, np.nan]
return(list(out.beta))
示例5: orth_regression
def orth_regression(model,obs):
linear = Model(f)
mydata = RealData(obs, model)
myodr = ODR(mydata, linear, beta0=[1., 0.])
myoutput = myodr.run()
params = myoutput.beta
gradient = params[0]
y_intercept = params[1]
res_var = myoutput.res_var
return np.around(gradient,2), np.around(y_intercept,2), np.around(res_var,2)
示例6: _run_odr
def _run_odr(self):
"""Run an ODR regression"""
linear = Model(self._modelODR)
mydata = Data(ravel(self._datax), ravel(self._datay), 1)
myodr = ODR(mydata, linear, beta0=self._guess, maxit=10000)
myoutput = myodr.run()
self._result = myoutput.beta
self._stdev = myoutput.sd_beta
self._covar = myoutput.cov_beta
self._odr = myoutput
示例7: test_ifixx
def test_ifixx(self):
x1 = [-2.01, -0.99, -0.001, 1.02, 1.98]
x2 = [3.98, 1.01, 0.001, 0.998, 4.01]
fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int)))
data = Data(np.vstack((x1, x2)), y=1, fix=fix)
model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True)
odr1 = ODR(data, model, beta0=np.array([1.]))
sol1 = odr1.run()
odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix)
sol2 = odr2.run()
assert_equal(sol1.beta, sol2.beta)
示例8: odrlin
def odrlin(x,y, sx, sy):
"""
Linear fit of 2-D data set made with Orthogonal Distance Regression
@params x, y: data to fit
@param sx, sy: respective errors of data to fit
"""
model = models.unilinear # defines model as beta[0]*x + beta[1]
data = RealData(x,y,sx=sx,sy=sy)
kinit = (y[-1]-y[0])/(x[-1]-x[0])
init = (kinit, y[0]-kinit*x[0])
linodr = ODR(data, model, init)
return linodr.run()
示例9: test_ticket_1253
def test_ticket_1253(self):
def linear(c, x):
return c[0]*x+c[1]
c = [2.0, 3.0]
x = np.linspace(0, 10)
y = linear(c, x)
model = Model(linear)
data = Data(x, y, wd=1.0, we=1.0)
job = ODR(data, model, beta0=[1.0, 1.0])
result = job.run()
assert_equal(result.info, 2)
示例10: slope
def slope(x, y, xerr, yerr, verbose=True, null=np.double(-9.99999488e+08)):
"""
Calculates the slope of a color trajectory.
Uses the scipy ODRpack and total least squares algorithm.
Parameters
----------
x, y : array-like
Colors or magnitudes to calculate slopes of.
xerr, yerr : array-like
Corresponding errors (stdev) on `x` and `y`.
Be sure to make sure that the HMKPNT errors are properly adjusted!
verbose : bool. optional
Whether to print a verbose output. Default true.
Returns
-------
slope : float
Slope (in rise/run) of the linear fit.
intercept : float
Y-value where the linear fit intercepts the Y-axis.
slope_error : float
The standard error on the fitted slope: an indication of fit quality.
Notes
-----
Much of this code is borrowed from the dosctring example found
at https://github.com/scipy/scipy/blob/master/scipy/odr/odrpack.py#L27 .
See http://docs.scipy.org/doc/scipy/reference/odr.html and
http://stackoverflow.com/questions/9376886/orthogonal-regression-fitting-in-scipy-least-squares-method
for further discussion.
"""
if (len(x) != len(y)) or len(x) == 0 or len(y) == 0:
return null, null, null
mydata = RealData(x, y, sx=xerr, sy=yerr)
# Someday, we may want to improve the "initial guess" with
# a leastsq first-pass.
myodr = ODR(mydata, linear, beta0=[1.,2.])
myoutput = myodr.run()
if verbose:
print myoutput.pprint()
return myoutput.beta[0], myoutput.beta[1], myoutput.sd_beta[0]
示例11: fit_odr
def fit_odr(self, t_scale, xmin=-sys.maxint, xmax=sys.maxint):
# initial guess for the parameters
params_initial = [1e-4, 1.0] #np.array([1e-4, 1.0]) #a, b
# function to fit
def f(B, x):
return B[0]*pow(x/t_scale, -B[1])
powerlaw = Model(f)
def powerlaw_err(x, a, b, aerr, berr, cov):
val = f([a,b], x)#f(x, a, b)
err = np.sqrt(pow(aerr/a, 2)+pow(np.log(x/t_scale), 2)*cov)#*val
return err
xdata = []
ydata = []
yerr = []
for ipoint, xpoint in enumerate(self.get_xdata()):
if xpoint>=xmin and xpoint<xmax:
xdata.append(xpoint)
ydata.append(self.get_ydata()[ipoint])
yerr.append(np.sqrt(self.get_yerr()[0][ipoint]*self.get_yerr()[1][ipoint]))
xdata = np.array(xdata)
ydata = np.array(ydata)
yerr = np.array(yerr)
if len(xdata)<3:
logging.info('Only {0} data points. Fitting is skipped.'.format(len(xdata)))
return 1
mydata = RealData(x=xdata, y=ydata, sy=yerr)
myodr = ODR(mydata, powerlaw, beta0=params_initial)
myoutput = myodr.run()
myoutput.pprint()
params_optimal = myoutput.beta
cov = myoutput.cov_beta
params_err = myoutput.sd_beta
#logging.info("""Opimized parameters: {0}
#Error: {1}""".format(params_optimal, params_err))
#params_err = np.sqrt(np.diag(cov))
for iparam, param, in enumerate(params_optimal):
logging.info("""Parameter {0}: {1} +/- {2}""".format(iparam, params_optimal[iparam], params_err[iparam]))
x_draw = np.linspace(min(xdata), max(xdata), 100)
y_draw = np.zeros_like(x_draw)
yerr_draw = np.zeros_like(y_draw)
for ix, x in enumerate(x_draw):
y_draw[ix] = f(params_optimal, x) #f(x, params_optimal[0], params_optimal[1])
yerr_draw[ix] = powerlaw_err(x, params_optimal[0], params_optimal[1], params_err[0], params_err[1], cov[1][1])
return ((params_optimal, params_err), (x_draw, y_draw, yerr_draw))
示例12: calculate_ortho_regression
def calculate_ortho_regression(x, y, samples):
sx = numpy.cov(x, y)[0][0]
sy = numpy.cov(x, y)[1][1]
linear = Model(func)
# data = Data(x, y, wd=1./pow(sx, 2), we=1./pow(sy, 2))
data = Data(x, y)
odr = ODR(data, linear, beta0=[1., 2.])
out = odr.run()
print '\n'
out.pprint()
return (out.beta[0], out.beta[1], out.res_var)
示例13: scipyODR
def scipyODR(recipe, *args, **kwargs):
from scipy.odr import Data, Model, ODR, RealData, odr_stop
# FIXME
# temporarily change _weights to _weights**2 to fit the ODR fits
a = [w ** 2 for w in recipe._weights]
recipe._weights = a
model = Model(recipe.evaluateODR,
# implicit=1,
meta=dict(name='ODR fit'),
)
x = [recipe._contributions.values()[0].profile.x]
y = [recipe._contributions.values()[0].profile.y]
dy = [recipe._contributions.values()[0].profile.dy]
cont = recipe._contributions.values()
for i in range(1, len(cont)):
xplus = x[-1][-1] - x[-1][-2] + x[-1][-1]
x.append(cont[i].profile.x + x[-1][-1] + xplus)
y.append(cont[i].profile.y)
dy.append(cont[i].profile.dy)
x.append(np.arange(len(recipe._restraintlist))
* (x[-1][-1] - x[-1][-2]) + x[-1][-1])
y.append(np.zeros_like(recipe._restraintlist))
dy = np.concatenate(dy)
dy = np.concatenate(
[dy, np.ones_like(recipe._restraintlist) + np.average(dy)])
data = RealData(x=np.concatenate(x), y=np.concatenate(y), sy=dy)
odr_kwargs = {}
if kwargs.has_key('maxiter'):
odr_kwargs['maxit'] = kwargs['maxiter']
odr = ODR(data, model, beta0=recipe.getValues(), **odr_kwargs)
odr.set_job(deriv=1)
out = odr.run()
# out.pprint()
# FIXME
# revert back
a = [np.sqrt(w) for w in recipe._weights]
recipe._weights = a
return {'x': out.beta,
'esd': out.sd_beta,
'cov': out.cov_beta,
'raw': out,
}
示例14: test_implicit
def test_implicit(self):
implicit_mod = Model(
self.implicit_fcn,
implicit=1,
meta=dict(name='Sample Implicit Model',
ref='ODRPACK UG, pg. 49'),
)
implicit_dat = Data([
[0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28,
-0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44],
[-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32,
-6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]],
1,
)
implicit_odr = ODR(implicit_dat, implicit_mod,
beta0=[-1.0, -3.0, 0.09, 0.02, 0.08])
out = implicit_odr.run()
assert_array_almost_equal(
out.beta,
np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354,
0.0162299708984738, 0.0797537982976416]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314,
0.0027500347539902, 0.0034962501532468]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.1089274602333052e+00, -1.9437686411979040e+00,
7.0263550868344446e-02, -4.7175267373474862e-02,
5.2515575927380355e-02],
[-1.9437686411979040e+00, 2.0481509222414456e+00,
-6.1600515853057307e-02, 4.6268827806232933e-02,
-5.8822307501391467e-02],
[7.0263550868344446e-02, -6.1600515853057307e-02,
2.8659542561579308e-03, -1.4628662260014491e-03,
1.4528860663055824e-03],
[-4.7175267373474862e-02, 4.6268827806232933e-02,
-1.4628662260014491e-03, 1.2855592885514335e-03,
-1.2692942951415293e-03],
[5.2515575927380355e-02, -5.8822307501391467e-02,
1.4528860663055824e-03, -1.2692942951415293e-03,
2.0778813389755596e-03]]),
)
示例15: test_lorentz
def test_lorentz(self):
l_sy = np.array([.29]*18)
l_sx = np.array([.000972971,.000948268,.000707632,.000706679,
.000706074, .000703918,.000698955,.000456856,
.000455207,.000662717,.000654619,.000652694,
.000000859202,.00106589,.00106378,.00125483, .00140818,.00241839])
l_dat = RealData(
[3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
3.6562, 3.62498, 3.55525, 3.41886],
[652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122,
957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5],
sx=l_sx,
sy=l_sy,
)
l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))
out = l_odr.run()
assert_array_almost_equal(
out.beta,
np.array([1.4306780846149925e+03, 1.3390509034538309e-01,
3.7798193600109009e+00]),
)
assert_array_almost_equal(
out.sd_beta,
np.array([7.3621186811330963e-01, 3.5068899941471650e-04,
2.4451209281408992e-04]),
)
assert_array_almost_equal(
out.cov_beta,
np.array([[2.4714409064597873e-01, -6.9067261911110836e-05,
-3.1236953270424990e-05],
[-6.9067261911110836e-05, 5.6077531517333009e-08,
3.6133261832722601e-08],
[-3.1236953270424990e-05, 3.6133261832722601e-08,
2.7261220025171730e-08]]),
)