本文整理汇总了Python中scipy.optimize.OptimizeResult.nit方法的典型用法代码示例。如果您正苦于以下问题:Python OptimizeResult.nit方法的具体用法?Python OptimizeResult.nit怎么用?Python OptimizeResult.nit使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.optimize.OptimizeResult
的用法示例。
在下文中一共展示了OptimizeResult.nit方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: result
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import nit [as 别名]
def result(self):
""" The OptimizeResult """
res = OptimizeResult()
res.x = self.es.xbest
res.fun = self.es.ebest
res.nit = self._iter
res.ncall = self.owf.nb_fun_call
return res
示例2: result
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import nit [as 别名]
def result(self):
""" The OptimizeResult """
res = OptimizeResult()
res.x = self._xmin
res.fun = self._fvalue
res.message = self._message
res.nit = self._step_record
return res
示例3: steepest_decent
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import nit [as 别名]
def steepest_decent(fun, x0, fprime, args, tol=1.0e-4, maxiter=1000,
callback=None):
'''最急降下法
'''
x = numpy.array(x0)
for itr in xrange(maxiter):
direction = -1 * fprime(x, *args)
alpha, obj_current, obj_next = armijo_stepsize(fun, x, fprime, direction, args=args)
if numpy.linalg.norm(obj_current - obj_next) < tol:
break
x = x + alpha * direction
if callback is not None:
callback(x)
result = OptimizeResult()
result.x = x
result.fun = fun(x, *args)
result.nit = itr
return result
示例4: newton_method
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import nit [as 别名]
def newton_method(fun, x0, fprime, args, tol=1.0e-4, maxiter=1000,
callback=None):
'''ニュートン法 ステップサイズにArmijo条件
'''
x = numpy.array(x0)
A, b = args
for itr in xrange(maxiter):
direction = -1 * numpy.linalg.solve(A, fprime(x, *args))
alpha, obj_current, obj_next = armijo_stepsize(fun, x, fprime, direction, args=args)
if numpy.linalg.norm(obj_current - obj_next) < tol:
break
x = x + alpha * direction
if callback is not None:
callback(x)
result = OptimizeResult()
result.x = x
result.fun = fun(x, *args)
result.nit = itr
return result
示例5: dual_annealing
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import nit [as 别名]
def dual_annealing(func, x0, bounds, args=(), maxiter=1000,
local_search_options={}, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
x0 : ndarray, shape(n,)
A single initial starting point coordinates. If ``None`` is provided,
initial coordinates are automatically generated (using the ``reset``
method from the internal ``EnergyState`` class).
bounds : sequence, shape (n, 2)
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining bounds for the objective function parameter.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
local_search_options : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (0, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {int or `numpy.random.RandomState` instance}, optional
If `seed` is not specified the `numpy.random.RandomState` singleton is
used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``RandomState`` instance, then that
instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution
function and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occured in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
#.........这里部分代码省略.........
示例6: optimize_stiefel
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import nit [as 别名]
def optimize_stiefel(func, X0, args=(), tau_max=.5, max_it=1, tol=1e-6,
disp=False, tau_find_freq=100):
"""
Optimize a function over a Stiefel manifold.
:param func: Function to be optimized
:param X0: Initial point for line search
:param tau_max: Maximum step size
:param max_it: Maximum number of iterations
:param tol: Tolerance criteria to terminate line search
:param disp: Choose whether to display output
:param args: Extra arguments passed to the function
"""
tol = float(tol)
assert tol > 0, 'Tolerance must be positive'
max_it = int(max_it)
assert max_it > 0, 'The maximum number of iterations must be a positive '\
+ 'integer'
tau_max = float(tau_max)
assert tau_max > 0, 'The parameter `tau_max` must be positive.'
k = 0
X = X0.copy()
nit = 0
nfev = 0
success = False
if disp:
print 'Stiefel Optimization'.center(80)
print '{0:4s} {1:11s} {2:5s}'.format('It', 'F', '(F - F_old) / F_old')
print '-' * 30
ls_func = LSFunc()
ls_func.func = func
decrease_tau = False
tau_max0 = tau_max
while nit <= max_it:
nit += 1
F, G = func(X, *args)
F_old = F
nfev += 1
A = compute_A(G, X)
ls_func.A = A
ls_func.X = X
ls_func.func_args = args
ls_func.tau_max = tau_max
increased_tau = False
if nit == 1 or decrease_tau or nit % tau_find_freq == 0:
# Need to minimize ls_func with respect to each argument
tau_init = np.linspace(-10, 0., 3)[:, None]
tau_d = np.linspace(-10, 0., 50)[:, None]
tau_all, F_all = pybgo.minimize(ls_func, tau_init, tau_d, fixed_noise=1e-16,
add_at_least=1, tol=1e-2, scale=True,
train_every=1)[:2]
nfev += tau_all.shape[0]
idx = np.argmin(F_all)
tau = np.exp(tau_all[idx, 0]) * tau_max
if tau_max - tau <= 1e-6:
tau_max = 1.2 * tau_max
if disp:
print 'increasing tau_max to {0:1.5e}'.format(tau_max)
increased_tau = True
if decrease_tau:
tau_max = .8 * tau_max
if disp:
print 'decreasing max_tau to {0:1.5e}'.format(tau_max)
decrease_tau = False
F = F_all[idx, 0]
else:
F = ls_func([np.log(tau / tau_max)])
delta_F = (F_old - F) / np.abs(F_old)
if delta_F < 0:
if disp:
print '*** backtracking'
nit -= 1
decrease_tau = True
continue
X_old = X
X = Y_func(tau, X, A)
if disp:
print '{0:4s} {1:1.5e} {2:5e} tau = {3:1.3e}, tau_max = {4:1.3e}'.format(
str(nit).zfill(4), F, delta_F, tau, tau_max)
if delta_F <= tol:
if disp:
print '*** Converged ***'
success = True
break
res = OptimizeResult()
res.tau_max = tau_max
res.X = X
res.nfev = nfev
res.nit = nit
res.fun = F
res.success = success
return res