本文整理汇总了Python中scipy.optimize.least_squares方法的典型用法代码示例。如果您正苦于以下问题:Python optimize.least_squares方法的具体用法?Python optimize.least_squares怎么用?Python optimize.least_squares使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.optimize
的用法示例。
在下文中一共展示了optimize.least_squares方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fit_line_width
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def fit_line_width(self, bl=2.5, nl=1.1, z0=1.9367, max_nfev=100, tol=1.e-3, verbose=False):
"""
Fit for emisson line width
Returns:
width/(1000 km/s), z, nfev, (nfev==max_nfev)
"""
from scipy.optimize import least_squares
init = [bl, nl, z0]
args = (self, verbose)
out = least_squares(self._objective_line_width, init, jac='2-point', method='lm', ftol=tol, xtol=tol, gtol=tol, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=max_nfev, verbose=0, args=args, kwargs={})
params = out.x
res = [out.x[0], out.x[1], out.x[2], out.nfev, out.nfev == max_nfev]
return res
示例2: test_args_kwargs
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def test_args_kwargs(self):
# Test that args and kwargs are passed correctly to the functions.
a = 3.0
for jac in ['2-point', '3-point', 'cs', jac_trivial]:
with suppress_warnings() as sup:
sup.filter(UserWarning,
"jac='(3-point|cs)' works equivalently to '2-point' for method='lm'")
res = least_squares(fun_trivial, 2.0, jac, args=(a,),
method=self.method)
res1 = least_squares(fun_trivial, 2.0, jac, kwargs={'a': a},
method=self.method)
assert_allclose(res.x, a, rtol=1e-4)
assert_allclose(res1.x, a, rtol=1e-4)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
args=(3, 4,), method=self.method)
assert_raises(TypeError, least_squares, fun_trivial, 2.0,
kwargs={'kaboom': 3}, method=self.method)
示例3: test_full_result
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def test_full_result(self):
# MINPACK doesn't work very well with factor=100 on this problem,
# thus using low 'atol'.
res = least_squares(fun_trivial, 2.0, method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.cost, 12.5)
assert_allclose(res.fun, 5)
assert_allclose(res.jac, 0, atol=1e-4)
assert_allclose(res.grad, 0, atol=1e-2)
assert_allclose(res.optimality, 0, atol=1e-2)
assert_equal(res.active_mask, 0)
if self.method == 'lm':
assert_(res.nfev < 30)
assert_(res.njev is None)
else:
assert_(res.nfev < 10)
assert_(res.njev < 10)
assert_(res.status > 0)
assert_(res.success)
示例4: test_full_result_single_fev
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def test_full_result_single_fev(self):
# MINPACK checks the number of nfev after the iteration,
# so it's hard to tell what he is going to compute.
if self.method == 'lm':
return
res = least_squares(fun_trivial, 2.0, method=self.method,
max_nfev=1)
assert_equal(res.x, np.array([2]))
assert_equal(res.cost, 40.5)
assert_equal(res.fun, np.array([9]))
assert_equal(res.jac, np.array([[4]]))
assert_equal(res.grad, np.array([36]))
assert_equal(res.optimality, 36)
assert_equal(res.active_mask, np.array([0]))
assert_equal(res.nfev, 1)
assert_equal(res.njev, 1)
assert_equal(res.status, 0)
assert_equal(res.success, 0)
示例5: test_rosenbrock_bounds
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def test_rosenbrock_bounds(self):
x0_1 = np.array([-2.0, 1.0])
x0_2 = np.array([2.0, 2.0])
x0_3 = np.array([-2.0, 2.0])
x0_4 = np.array([0.0, 2.0])
x0_5 = np.array([-1.2, 1.0])
problems = [
(x0_1, ([-np.inf, -1.5], np.inf)),
(x0_2, ([-np.inf, 1.5], np.inf)),
(x0_3, ([-np.inf, 1.5], np.inf)),
(x0_4, ([-np.inf, 1.5], [1.0, np.inf])),
(x0_2, ([1.0, 1.5], [3.0, 3.0])),
(x0_5, ([-50.0, 0.0], [0.5, 100]))
]
for x0, bounds in problems:
for jac, x_scale, tr_solver in product(
['2-point', '3-point', 'cs', jac_rosenbrock],
[1.0, [1.0, 0.5], 'jac'],
['exact', 'lsmr']):
res = least_squares(fun_rosenbrock, x0, jac, bounds,
x_scale=x_scale, tr_solver=tr_solver,
method=self.method)
assert_allclose(res.optimality, 0.0, atol=1e-5)
示例6: test_with_bounds
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def test_with_bounds(self):
p = BroydenTridiagonal()
for jac, jac_sparsity in product(
[p.jac, '2-point', '3-point', 'cs'], [None, p.sparsity]):
res_1 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, np.inf),
method=self.method,jac_sparsity=jac_sparsity)
res_2 = least_squares(
p.fun, p.x0, jac, bounds=(-np.inf, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
res_3 = least_squares(
p.fun, p.x0, jac, bounds=(p.lb, p.ub),
method=self.method, jac_sparsity=jac_sparsity)
assert_allclose(res_1.optimality, 0, atol=1e-10)
assert_allclose(res_2.optimality, 0, atol=1e-10)
assert_allclose(res_3.optimality, 0, atol=1e-10)
示例7: test_robustness
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def test_robustness(self):
for noise in [0.1, 1.0]:
p = ExponentialFittingProblem(1, 0.1, noise, random_seed=0)
for jac in ['2-point', '3-point', 'cs', p.jac]:
res_lsq = least_squares(p.fun, p.p0, jac=jac,
method=self.method)
assert_allclose(res_lsq.optimality, 0, atol=1e-2)
for loss in LOSSES:
if loss == 'linear':
continue
res_robust = least_squares(
p.fun, p.p0, jac=jac, loss=loss, f_scale=noise,
method=self.method)
assert_allclose(res_robust.optimality, 0, atol=1e-2)
assert_(norm(res_robust.x - p.p_opt) <
norm(res_lsq.x - p.p_opt))
示例8: solve
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def solve(self):
x0 = self._initial_guess()
ls_results = []
costs = []
for rot_y in [-2, -1, 0, 1]:
x0[6] = rot_y*np.pi/2
ls_result = least_squares(self._residuals, x0, jac="3-point")
ls_results.append(ls_result)
costs.append(ls_result.cost)
self.result = ls_results[np.argmin(costs)]
params = self.result.x
return params
示例9: __init__
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def __init__(self, ml, pcov=None, nfev=None, **kwargs):
"""Solver based on Scipy's least_squares method [scipy_ref]_.
Notes
-----
This class is the default solve method called by the pastas Model solve
method. All kwargs provided to the Model.solve() method are forwarded
to the solver. From there, they are forwarded to Scipy least_squares
solver.
Examples
--------
>>> ml.solve(solver=ps.LeastSquares)
References
----------
.. [scipy_ref] https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
"""
BaseSolver.__init__(self, ml=ml, pcov=pcov, nfev=nfev, **kwargs)
示例10: fit_beta_lsq
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def fit_beta_lsq(t, l, bounds=(0, np.inf), fix_l0=False, beta_0=None):
tau = t - np.min(t)
l0 = np.mean(l[:, tau == 0])
if beta_0 is None:
beta_0 = 1
if fix_l0:
f_lsq = lambda b: (sol_u(tau, l0, 0, b) - l).flatten()
ret = least_squares(f_lsq, beta_0, bounds=bounds)
beta = ret.x
else:
f_lsq = lambda p: (sol_u(tau, p[1], 0, p[0]) - l).flatten()
ret = least_squares(f_lsq, np.array([beta_0, l0]), bounds=bounds)
beta = ret.x[0]
l0 = ret.x[1]
return beta, l0
示例11: leastsq
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def leastsq(self, xs, ys, params=None, method='leastsq'):
if params is None:
params = self.guess(xs, ys)
if method == 'least_squares':
bounds = self.bounds(params)
params = self.adapt_params_to_bounds(params, bounds)
result = least_squares(self.error, params, bounds=bounds, args=(xs, ys))
return result['x'],
else:
result = leastsq(self.error, params, args=(xs, ys))
return result[0],
示例12: run_min
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def run_min(args, initial_guess): # (np.random.random(6)).tolist()
print
res = least_squares(cost_func, initial_guess, args=args, method="lm", ftol=1e-15, max_nfev=100000) # 1e-10
return res
示例13: scale_to_photometry
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def scale_to_photometry(self, tfit=None, tol=1.e-4, order=0, init=None, fit_background=True, Rspline=50, use_fit=True, **kwargs):
"""Compute scale factor between spectra and photometry
method : 'Powell' or 'BFGS' work well, latter a bit faster but less robust
New implementation of Levenberg-Markwardt minimization
TBD
"""
from scipy.optimize import minimize, least_squares
if self.Nphot == 0:
return np.array([10.])
if (tfit is None) & (fit_background):
wspline = np.arange(4200, 2.5e4)
#Rspline = 50
df_spl = len(utils.log_zgrid(zr=[wspline[0], wspline[-1]], dz=1./Rspline))
tspline = utils.bspline_templates(wspline, df=df_spl+2, log=True, clip=0.0001)
tfit = self.template_at_z(z=0, templates=tspline, include_photometry=False, fit_background=fit_background, draws=1000)
if use_fit:
oned = self.oned_spectrum(tfit=tfit, loglam=False)
wmi = np.min([oned[k]['wave'].min() for k in oned])
wma = np.max([oned[k]['wave'].max() for k in oned])
clip = (tfit['line1d'].wave > wmi) & (tfit['line1d'].wave < wma) & (tfit['line1d_err'] > 0)
spl_temp = utils.SpectrumTemplate(wave=tfit['line1d'].wave[clip], flux=tfit['line1d'].flux[clip], err=tfit['line1d_err'][clip])
args = (self, {'spl': spl_temp})
else:
oned = self.oned_spectrum(tfit=tfit, loglam=False)
args = (self, oned)
if init is None:
init = np.zeros(order+1)
init[0] = 10.
scale_fit = least_squares(self._objective_scale_direct, init, jac='2-point', method='lm', ftol=tol, xtol=tol, gtol=tol, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=None, verbose=0, args=args, kwargs={})
# pscale = scale_fit.x
return scale_fit
示例14: _old_scale_to_photometry
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def _old_scale_to_photometry(self, z=0, templates={}, tol=1.e-4, order=0, init=None, method='lm', fit_background=True):
"""Compute scale factor between spectra and photometry
method : 'Powell' or 'BFGS' work well, latter a bit faster but less robust
New implementation of Levenberg-Markwardt minimization
TBD
"""
from scipy.optimize import minimize, least_squares
if self.Nphot == 0:
return np.array([10.])
AxT, data = self.xfit_at_z(z=z, templates=templates, fitter='nnls',
fit_background=fit_background,
get_uncertainties=False,
get_design_matrix=True)
if init is None:
init = np.zeros(order+1)
init[0] = 10.
if method == 'lm':
scale_fit = least_squares(self.objfun_scale, init, jac='2-point', method='lm', ftol=tol, xtol=tol, gtol=tol, x_scale=1.0, loss='linear', f_scale=1.0, diff_step=None, tr_solver=None, tr_options={}, jac_sparsity=None, max_nfev=None, verbose=0, args=(AxT, data, self, 'resid'), kwargs={})
else:
scale_fit = minimize(self.objfun_scale, init, args=(AxT, data, self, 'chi2'), method=method, jac=None, hess=None, hessp=None, tol=tol, callback=None, options=None)
# pscale = scale_fit.x
return scale_fit
示例15: test_basic
# 需要导入模块: from scipy import optimize [as 别名]
# 或者: from scipy.optimize import least_squares [as 别名]
def test_basic(self):
# Test that the basic calling sequence works.
res = least_squares(fun_trivial, 2., method=self.method)
assert_allclose(res.x, 0, atol=1e-4)
assert_allclose(res.fun, fun_trivial(res.x))