本文整理汇总了Python中scipy.optimize.OptimizeResult.x方法的典型用法代码示例。如果您正苦于以下问题:Python OptimizeResult.x方法的具体用法?Python OptimizeResult.x怎么用?Python OptimizeResult.x使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scipy.optimize.OptimizeResult
的用法示例。
在下文中一共展示了OptimizeResult.x方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: result
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def result(self):
""" The OptimizeResult """
res = OptimizeResult()
res.x = self._xmin
res.fun = self._fvalue
res.message = self._message
res.nit = self._step_record
return res
示例2: result
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def result(self):
""" The OptimizeResult """
res = OptimizeResult()
res.x = self.es.xbest
res.fun = self.es.ebest
res.nit = self._iter
res.ncall = self.owf.nb_fun_call
return res
示例3: scipy_nlopt_cobyla
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def scipy_nlopt_cobyla(*args, **kwargs):
"""Wraps nlopt library cobyla function to be compatible with scipy optimize
parameters:
args[0]: target, function to be minimized
args[1]: x0, starting point for minimization
bounds: list of bounds for the movement
[[min, max], [min, max], ...]
ftol_rel: same as in nlopt
xtol_rel: same as in nlopt
one of the tol_rel should be specified
returns:
OptimizeResult() object with properly set x, fun, success.
status is not set when nlopt.RoundoffLimited is raised
"""
answ = OptimizeResult()
bounds = kwargs['bounds']
opt = nlopt.opt(nlopt.LN_COBYLA, len(args[1]))
opt.set_lower_bounds([i[0] for i in bounds])
opt.set_upper_bounds([i[1] for i in bounds])
if 'ftol_rel' in kwargs.keys():
opt.set_ftol_rel(kwargs['ftol_rel'])
if 'xtol_rel' in kwargs.keys():
opt.set_ftol_rel(kwargs['xtol_rel'])
opt.set_min_objective(args[0])
x0 = list(args[1])
try:
x1 = opt.optimize(x0)
except nlopt.RoundoffLimited:
answ.x = x0
answ.fun = args[0](x0)
answ.success = False
answ.message = 'nlopt.RoundoffLimited'
return answ
answ.x = x1
answ.fun = args[0](x1)
answ.success = True if opt.last_optimize_result() in [3, 4] else False
answ.status = opt.last_optimize_result()
if not answ.fun == opt.last_optimum_value():
print 'Something\'s wrong, ', answ.fun, opt.last_optimum_value()
return answ
示例4: setup_method
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def setup_method(self):
self.x0 = np.array(1)
self.f0 = 0
minres = OptimizeResult()
minres.x = self.x0
minres.fun = self.f0
self.storage = Storage(minres)
示例5: _tree_minimize
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def _tree_minimize(func, dimensions, base_estimator, n_calls,
n_points, n_random_starts, random_state=None):
rng = check_random_state(random_state)
space = Space(dimensions)
# Initialize with random points
if n_random_starts <= 0:
raise ValueError(
"Expected n_random_starts > 0, got %d" % n_random_starts)
if n_calls <= 0:
raise ValueError(
"Expected n_calls > 0, got %d" % n_random_starts)
if n_calls < n_random_starts:
raise ValueError(
"Expected n_calls >= %d, got %d" % (n_random_starts, n_calls))
Xi = space.rvs(n_samples=n_random_starts, random_state=rng)
yi = [func(x) for x in Xi]
if np.ndim(yi) != 1:
raise ValueError(
"The function to be optimized should return a scalar")
# Tree-based optimization loop
models = []
n_model_iter = n_calls - n_random_starts
for i in range(n_model_iter):
rgr = clone(base_estimator)
rgr.fit(space.transform(Xi), yi)
models.append(rgr)
# `rgr` predicts constants for each leaf which means that the EI
# has zero gradient over large distances. As a result we can not
# use gradient based optimizers like BFGS, so using random sampling
# for the moment.
X = space.transform(space.rvs(n_samples=n_points,
random_state=rng))
values = -gaussian_ei(X, rgr, np.min(yi))
next_x = X[np.argmin(values)]
next_x = space.inverse_transform(next_x.reshape((1, -1)))[0]
next_y = func(next_x)
Xi = np.vstack((Xi, next_x))
yi.append(next_y)
res = OptimizeResult()
best = np.argmin(yi)
res.x = Xi[best]
res.fun = yi[best]
res.func_vals = np.array(yi)
res.x_iters = Xi
res.models = models
res.space = space
return res
示例6: test_higher_f_rejected
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def test_higher_f_rejected(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 + 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_equal(self.x0, minres.x)
assert_equal(self.f0, minres.fun)
assert_(not ret)
示例7: test_lower_f_accepted
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def test_lower_f_accepted(self):
new_minres = OptimizeResult()
new_minres.x = self.x0 + 1
new_minres.fun = self.f0 - 1
ret = self.storage.update(new_minres)
minres = self.storage.get_lowest()
assert_(self.x0 != minres.x)
assert_(self.f0 != minres.fun)
assert_(ret)
示例8: scipy_graduate_walk
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def scipy_graduate_walk(*args, **kwargs):
"""Scipy-compatible graduate_walk function wrapper.
parameters:
args[0]: target, function to be minimized
args[1]: x0, starting point for minimization
dx=1e-8: step in change of the point
dx_start=0.1: starting value for dx step. Must be bigger that dx.
dx_step=0.1: change of dx on each iteration. Should be less than 1.
diagonal=False: defines directions for point movements. See
generate_all_directions
generate_nondiagonal_directions
for more information.
bounds=None: list of bounds for the movement
[[min, max], [min, max], ...]
if set to None, bounds are ignored
ytol=1e-8: relative tolerance for search stop. See graduate_walk for
more info.
returns:
OptimizeResult() object with properly set x, fun, nfev.
success is always set to True, status to 1
"""
target = args[0]
x0 = args[1]
dx = kwargs['dx'] if 'dx' in list(kwargs.keys()) else 1e-8
dx_start = kwargs['dx_start'] if 'dx_start' in list(kwargs.keys()) else 0.1
dx_step = kwargs['dx_step'] if 'dx_step' in list(kwargs.keys()) else 0.1
if 'diagonal' in list(kwargs.keys()) and kwargs['diagonal']:
directions = generate_all_directions(len(x0))
else:
directions = generate_nondiagonal_directions(len(x0))
if 'bounds' in list(kwargs.keys()) and kwargs['bounds'] is not None:
bounds = Bounds(kwargs['bounds'])
else:
bounds = None
ytol_rel = kwargs['ytol_rel'] if 'ytol_rel' in list(kwargs.keys()) else 1e-8
res = graduate_walk(target, x0, dx, directions, dx_start, dx_step,
bounds=bounds, ytol_rel=ytol_rel)
answ = OptimizeResult()
answ.x = res['x0']
answ.fun = res['fval']
answ.success = True
answ.status = 1
answ.nfev = res['fnval']
return answ
示例9: create_result
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def create_result(Xi, yi, space=None, rng=None, specs=None, models=None):
"""
Initialize an `OptimizeResult` object.
Parameters
----------
* `Xi` [list of lists, shape=(n_iters, n_features)]:
Location of the minimum at every iteration.
* `yi` [array-like, shape=(n_iters,)]:
Minimum value obtained at every iteration.
* `space` [Space instance, optional]:
Search space.
* `rng` [RandomState instance, optional]:
State of the random state.
* `specs` [dict, optional]:
Call specifications.
* `models` [list, optional]:
List of fit surrogate models.
Returns
-------
* `res` [`OptimizeResult`, scipy object]:
OptimizeResult instance with the required information.
"""
res = OptimizeResult()
yi = np.asarray(yi)
if np.ndim(yi) == 2:
res.log_time = np.ravel(yi[:, 1])
yi = np.ravel(yi[:, 0])
best = np.argmin(yi)
res.x = Xi[best]
res.fun = yi[best]
res.func_vals = yi
res.x_iters = Xi
res.models = models
res.space = space
res.random_state = rng
res.specs = specs
return res
示例10: steepest_decent
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def steepest_decent(fun, x0, fprime, args, tol=1.0e-4, maxiter=1000,
callback=None):
'''最急降下法
'''
x = numpy.array(x0)
for itr in xrange(maxiter):
direction = -1 * fprime(x, *args)
alpha, obj_current, obj_next = armijo_stepsize(fun, x, fprime, direction, args=args)
if numpy.linalg.norm(obj_current - obj_next) < tol:
break
x = x + alpha * direction
if callback is not None:
callback(x)
result = OptimizeResult()
result.x = x
result.fun = fun(x, *args)
result.nit = itr
return result
示例11: newton_method
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def newton_method(fun, x0, fprime, args, tol=1.0e-4, maxiter=1000,
callback=None):
'''ニュートン法 ステップサイズにArmijo条件
'''
x = numpy.array(x0)
A, b = args
for itr in xrange(maxiter):
direction = -1 * numpy.linalg.solve(A, fprime(x, *args))
alpha, obj_current, obj_next = armijo_stepsize(fun, x, fprime, direction, args=args)
if numpy.linalg.norm(obj_current - obj_next) < tol:
break
x = x + alpha * direction
if callback is not None:
callback(x)
result = OptimizeResult()
result.x = x
result.fun = fun(x, *args)
result.nit = itr
return result
示例12: solve
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def solve(self):
"""
Runs the DifferentialEvolutionSolver.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If `polish`
was employed, and a lower minimum was obtained by the polishing,
then OptimizeResult also contains the ``jac`` attribute.
"""
nit, warning_flag = 0, False
status_message = _status_message['success']
# The population may have just been initialized (all entries are
# np.inf). If it has you have to calculate the initial energies.
# Although this is also done in the evolve generator it's possible
# that someone can set maxiter=0, at which point we still want the
# initial energies to be calculated (the following loop isn't run).
if np.all(np.isinf(self.population_energies)):
self.population_energies[:] = self._calculate_population_energies(
self.population)
self._promote_lowest_energy()
# do the optimisation.
for nit in xrange(1, self.maxiter + 1):
# evolve the population by a generation
try:
next(self)
except StopIteration:
warning_flag = True
if self._nfev > self.maxfun:
status_message = _status_message['maxfev']
elif self._nfev == self.maxfun:
status_message = ('Maximum number of function evaluations'
' has been reached.')
break
if self.disp:
print("differential_evolution step %d: f(x)= %g"
% (nit,
self.population_energies[0]))
# should the solver terminate?
convergence = self.convergence
if (self.callback and
self.callback(self._scale_parameters(self.population[0]),
convergence=self.tol / convergence) is True):
warning_flag = True
status_message = ('callback function requested stop early '
'by returning True')
break
if np.any(np.isinf(self.population_energies)):
intol = False
else:
intol = (np.std(self.population_energies) <=
self.atol +
self.tol * np.abs(np.mean(self.population_energies)))
if warning_flag or intol:
break
else:
status_message = _status_message['maxiter']
warning_flag = True
DE_result = OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=self._nfev,
nit=nit,
message=status_message,
success=(warning_flag is not True))
if self.polish:
result = minimize(self.func,
np.copy(DE_result.x),
method='L-BFGS-B',
bounds=self.limits.T)
self._nfev += result.nfev
DE_result.nfev = self._nfev
if result.fun < DE_result.fun:
DE_result.fun = result.fun
DE_result.x = result.x
DE_result.jac = result.jac
# to keep internal state consistent
self.population_energies[0] = result.fun
self.population[0] = self._unscale_parameters(result.x)
return DE_result
示例13: dummy_minimize
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def dummy_minimize(func, dimensions, n_calls=100, random_state=None):
"""Random search by uniform sampling within the given bounds.
Parameters
----------
* `func` [callable]:
Function to minimize. Should take a array of parameters and
return the function values.
* `dimensions` [list, shape=(n_dims,)]:
List of search space dimensions.
Each search dimension can be defined either as
- a `(upper_bound, lower_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(upper_bound, lower_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
* `n_calls` [int, default=100]:
Number of calls to `func` to find the minimum.
* `random_state` [int, RandomState instance, or None (default)]:
Set random state to something other than None for reproducible
results.
Returns
-------
* `res` [`OptimizeResult`, scipy object]:
The optimization result returned as a OptimizeResult object.
Important attributes are:
- `x` [float]: location of the minimum.
- `fun` [float]: function value at the minimum.
- `x_iters` [array]: location of function evaluation for each
iteration.
- `func_vals` [array]: function value for each iteration.
- `space` [Space]: the optimisation space.
For more details related to the OptimizeResult object, refer
http://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html
"""
rng = check_random_state(random_state)
space = Space(dimensions)
X = space.rvs(n_samples=n_calls, random_state=rng)
init_y = func(X[0])
if not np.isscalar(init_y):
raise ValueError(
"The function to be optimized should return a scalar")
y = np.asarray([init_y] + [func(X[i]) for i in range(1, n_calls)])
res = OptimizeResult()
best = np.argmin(y)
res.x = X[best]
res.fun = y[best]
res.func_vals = y
res.x_iters = X
res.space = space
return res
示例14: _tree_minimize
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def _tree_minimize(func, dimensions, base_estimator, n_calls,
n_points, n_random_starts, x0=None, y0=None,
random_state=None, acq="EI", xi=0.01, kappa=1.96):
rng = check_random_state(random_state)
space = Space(dimensions)
# Initialize with provided points (x0 and y0) and/or random points
if n_calls <= 0:
raise ValueError(
"Expected `n_calls` > 0, got %d" % n_random_starts)
if x0 is None:
x0 = []
elif not isinstance(x0[0], list):
x0 = [x0]
if not isinstance(x0, list):
raise ValueError("`x0` should be a list, but got %s" % type(x0))
n_init_func_calls = len(x0) if y0 is not None else 0
n_total_init_calls = n_random_starts + n_init_func_calls
if n_total_init_calls <= 0:
# if x0 is not provided and n_random_starts is 0 then
# it will ask for n_random_starts to be > 0.
raise ValueError(
"Expected `n_random_starts` > 0, got %d" % n_random_starts)
if n_calls < n_total_init_calls:
raise ValueError(
"Expected `n_calls` >= %d, got %d" % (n_total_init_calls, n_calls))
if y0 is None and x0:
y0 = [func(x) for x in x0]
elif x0:
if isinstance(y0, Iterable):
y0 = list(y0)
elif isinstance(y0, numbers.Number):
y0 = [y0]
else:
raise ValueError(
"`y0` should be an iterable or a scalar, got %s" % type(y0))
if len(x0) != len(y0):
raise ValueError("`x0` and `y0` should have the same length")
if not all(map(np.isscalar, y0)):
raise ValueError("`y0` elements should be scalars")
else:
y0 = []
Xi = x0 + space.rvs(n_samples=n_random_starts, random_state=rng)
yi = y0 + [func(x) for x in Xi[len(x0):]]
if np.ndim(yi) != 1:
raise ValueError("`func` should return a scalar")
# Tree-based optimization loop
models = []
n_model_iter = n_calls - n_total_init_calls
for i in range(n_model_iter):
rgr = clone(base_estimator)
rgr.fit(space.transform(Xi), yi)
models.append(rgr)
# `rgr` predicts constants for each leaf which means that the EI
# has zero gradient over large distances. As a result we can not
# use gradient based optimizers like BFGS, so using random sampling
# for the moment.
X = space.transform(space.rvs(n_samples=n_points,
random_state=rng))
values = _gaussian_acquisition(
X=X, model=rgr, y_opt=np.min(yi), method=acq,
xi=xi, kappa=kappa)
next_x = X[np.argmin(values)]
next_x = space.inverse_transform(next_x.reshape((1, -1)))[0]
next_y = func(next_x)
Xi.append(next_x)
yi.append(next_y)
res = OptimizeResult()
best = np.argmin(yi)
res.x = Xi[best]
res.fun = yi[best]
res.func_vals = np.array(yi)
res.x_iters = Xi
res.models = models
res.space = space
res.random_state = rng
return res
示例15: solve
# 需要导入模块: from scipy.optimize import OptimizeResult [as 别名]
# 或者: from scipy.optimize.OptimizeResult import x [as 别名]
def solve(self):
"""
Runs the DifferentialEvolutionSolver.
Returns
-------
res : OptimizeResult
The optimization result represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the optimizer exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes. If polish
was employed, then OptimizeResult also contains the ``hess_inv`` and
``jac`` attributes.
"""
nfev, nit, warning_flag = 0, 0, False
status_message = _status_message['success']
# calculate energies to start with
for index, candidate in enumerate(self.population):
parameters = self._scale_parameters(candidate)
self.population_energies[index] = self.func(parameters,
*self.args)
nfev += 1
if nfev > self.maxfun:
warning_flag = True
status_message = _status_message['maxfev']
break
minval = np.argmin(self.population_energies)
# put the lowest energy into the best solution position.
lowest_energy = self.population_energies[minval]
self.population_energies[minval] = self.population_energies[0]
self.population_energies[0] = lowest_energy
self.population[[0, minval], :] = self.population[[minval, 0], :]
if warning_flag:
return OptimizeResult(
x=self.x,
fun=self.population_energies[0],
nfev=nfev,
nit=nit,
message=status_message,
success=(warning_flag != True))
# do the optimisation.
for nit in range(1, self.maxiter + 1):
if self.dither is not None:
self.scale = self.random_number_generator.rand(
) * (self.dither[1] - self.dither[0]) + self.dither[0]
for candidate in range(np.size(self.population, 0)):
if nfev > self.maxfun:
warning_flag = True
status_message = _status_message['maxfev']
break
trial = self._mutate(candidate)
self._ensure_constraint(trial)
parameters = self._scale_parameters(trial)
energy = self.func(parameters, *self.args)
nfev += 1
if energy < self.population_energies[candidate]:
self.population[candidate] = trial
self.population_energies[candidate] = energy
if energy < self.population_energies[0]:
self.population_energies[0] = energy
self.population[0] = trial
# stop when the fractional s.d. of the population is less than tol
# of the mean energy
convergence = (np.std(self.population_energies) /
np.abs(np.mean(self.population_energies) +
_MACHEPS))
if self.disp:
print("differential_evolution step %d: f(x)= %g"
% (nit,
self.population_energies[0]))
if (self.callback and
self.callback(self._scale_parameters(self.population[0]),
convergence=self.tol / convergence) is True):
warning_flag = True
status_message = ('callback function requested stop early '
'by returning True')
break
if convergence < self.tol or warning_flag:
break
else:
status_message = _status_message['maxiter']
#.........这里部分代码省略.........