本文整理汇总了Python中nlopt.opt函数的典型用法代码示例。如果您正苦于以下问题:Python opt函数的具体用法?Python opt怎么用?Python opt使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了opt函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: minimizeCustom
def minimizeCustom(self, p, q, **kwargs):
S = numpy.matrix(numpy.identity(4))
# TODO: try using functions from the nlopt module
def objectiveFunc(*args, **kwargs):
d = p
m = q
params = args[0]
if args[1].size > 0: # gradient
args[1][:] = numpy.array([pi / 100, pi / 100, pi / 100, 0.01, 0.01, 0.01]) # arbitrary gradient
# transform = numpy.matrix(numpy.identity(4))
translate = numpyTransform.translation(params[3:6])
rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
transform = translate * rotx * roty * rotz
Dicp = numpyTransform.transformPoints(transform, d)
# err = self.rms_error(m, Dicp)
err = numpy.mean(numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1)))
# err = numpy.sqrt(numpy.sum((m - Dicp) ** 2, axis=1))
return err
x0 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
if 'optAlg' in kwargs:
opt = nlopt.opt(kwargs['optAlg'], 6)
else:
opt = nlopt.opt(nlopt.GN_CRS2_LM, 6)
opt.set_min_objective(objectiveFunc)
opt.set_lower_bounds([-pi, -pi, -pi, -3.0, -3.0, -3.0])
opt.set_upper_bounds([pi, pi, pi, 3.0, 3.0, 3.0])
opt.set_maxeval(1500)
params = opt.optimize(x0)
# output = scipy.optimize.leastsq(objectiveFunc, x0, args=funcArgs)
# params = output[0]
# params = scipy.optimize.fmin(objectiveFunc, x0, args=funcArgs)
# constraints = []
# varBounds = [(-pi, pi), (-pi, pi), (-pi, pi), (-3.0, 3.0), (-3.0, 3.0), (-3.0, 3.0)]
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
# output = scipy.optimize.fmin_l_bfgs_b(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
# params = output[0]
# print 'Min error:', output[1]
# params = scipy.optimize.fmin_tnc(objectiveFunc, x0, bounds=varBounds, args=funcArgs, approx_grad=True)
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
# params = scipy.optimize.fmin_slsqp(objectiveFunc, x0, eqcons=constraints, bounds=varBounds, args=funcArgs)
translate = numpyTransform.translation(params[3:6])
rotx = numpyTransform.rotation(params[0], [1, 0, 0], N=4)
roty = numpyTransform.rotation(params[1], [0, 1, 0], N=4)
rotz = numpyTransform.rotation(params[2], [0, 0, 1], N=4)
transform = translate * rotx * roty * rotz
return rotx * roty * rotz, S
示例2: optimise_hypers
def optimise_hypers(criterion, optParams):
objective = lambda theta, grad: criterion(*unpack(theta, unpackinfo))
theta_low, _ = pack(optParams.sigma.lowerBound, optParams.noise.lowerBound)
theta_0, unpackinfo = pack(optParams.sigma.initialVal, optParams.noise.initialVal)
theta_high, _ = pack(optParams.sigma.upperBound, optParams.noise.upperBound)
nParams = theta_0.shape[0]
opt = nl.opt(nl.LN_BOBYQA, nParams)
opt.set_lower_bounds(theta_low)
opt.set_upper_bounds(theta_high)
opt.set_min_objective(objective)
opt.set_maxtime(optParams.walltime)
if optParams.global_opt is True:
opt = nl.opt(nl.G_MLSL_LDS, nParams)
local_opt = nl.opt(nl.LN_BOBYQA, nParams)
local_opt.set_ftol_rel(1e-4)
opt.set_local_optimizer(local_opt)
else:
opt.set_ftol_rel(1e-6)
assert( (theta_low<=theta_0).all())
assert( (theta_high>=theta_0).all())
theta_opt = opt.optimize(theta_0)
sigma, noise_sigma = unpack(theta_opt, unpackinfo)
opt_val = opt.last_optimum_value()
return sigma, noise_sigma, opt_val
示例3: _optimize_CRS2_LM
def _optimize_CRS2_LM(self, vector):
"""
Controlled random search with local mutations
"""
# Create a global optimizer
opt = nlopt.opt(nlopt.GN_CRS2_LM, vector.size)
opt.set_min_objective(self._objective)
lower_bounds, upper_bounds = self._getBounds()
opt.set_lower_bounds(lower_bounds)
opt.set_upper_bounds(upper_bounds)
neval = 10000 * opt.get_dimension() # TODO allow to tune this parameter
opt.set_maxeval(neval)
# Optimize parameters
vector = opt.optimize(vector) # TODO check optimizer status
self.loss = opt.last_optimum_value()
assert self._objective(vector, None) == self.loss
# Create a local optimizer
opt = nlopt.opt(nlopt.LN_BOBYQA, opt.get_dimension())
opt.set_min_objective(self._objective)
opt.set_lower_bounds(lower_bounds)
opt.set_upper_bounds(upper_bounds)
opt.set_xtol_rel(1e-3)
opt.set_maxeval(neval)
opt.set_initial_step(1e-3 * (upper_bounds-lower_bounds))
# Optimize parameters
vector = opt.optimize(vector) # TODO check optimizer status
self.loss = opt.last_optimum_value()
assert self._objective(vector, None) == self.loss
return vector
示例4: nlopt_test
def nlopt_test():
'''This is from the tutorial'''
raise SkipTest
def myfunc(x, grad):
if grad.size > 0:
grad[0] = 0.0
grad[1] = old_div(0.5, math.sqrt(x[1]))
return math.sqrt(x[1])
def myconstraint(x, grad, a, b):
if grad.size > 0:
grad[0] = 3 * a * (a*x[0] + b)**2
grad[1] = -1.0
return (a*x[0] + b)**3 - x[1]
opt = nlopt.opt(nlopt.LD_MMA, 2)
opt.set_lower_bounds([-float('inf'), 0])
opt.set_min_objective(myfunc)
opt.add_inequality_constraint(lambda x,grad: myconstraint(x,grad,2,0), 1e-8)
opt.add_inequality_constraint(lambda x,grad: myconstraint(x,grad,-1,1), 1e-8)
opt.set_xtol_rel(1e-4)
x = opt.optimize([1.234, 5.678])
minf = opt.last_optimum_value()
print("optimum at ", x[0],x[1])
print("minimum value = ", minf)
print("result code = ", opt.last_optimize_result())
示例5: __init__
def __init__(self, function, parameters, ftol=1e-5, verbosity=1):
super(BOBYQAMinimizer, self).__init__(function, parameters, ftol, verbosity)
# setup the bobyqa minimizer
self.x0 = map(lambda x: x.value, self.parameters.values())
self.lowerBounds = map(lambda x: x.minValue, self.parameters.values())
self.upperBounds = map(lambda x: x.maxValue, self.parameters.values())
self.steps = map(lambda x: x.delta, self.parameters.values())
self.objectiveFunction = function
def wrapper(x, grad):
if grad.size > 0:
print("This won't ever happen, since BOBYQA does not use derivatives")
return self.objectiveFunction(x)
pass
self.wrapper = wrapper
self.bob = nlopt.opt(nlopt.LN_BOBYQA, self.Npar)
self.bob.set_min_objective(self.wrapper)
self.bob.set_ftol_abs(ftol)
# Stop if the value of all the parameter change by less than 1%
self.bob.set_xtol_rel(0.001)
self.bob.set_initial_step(self.steps)
self.bob.set_lower_bounds(self.lowerBounds)
self.bob.set_upper_bounds(self.upperBounds)
示例6: start_training
def start_training(f):
""" define the training parameters
"""
opt=nlopt.opt(nlopt.GN_DIRECT_L,f.get_len_output())
# build the boundaries
minout=[]
maxout=[]
startout=[]
for i in range(f.get_len_output()-1):
minout.append(f.get_output(i))
for i in range(1,f.get_len_output()):
maxout.append(f.get_output(i))
for i in range(f.get_len_output()):
startout.append(f.get_output(i))
minout.insert(0,minout[0]-(minout[1]-minout[0]))
maxout.append(maxout[-1]+(maxout[-1]-maxout[-2]))
print 'minout:',minout
print 'maxout:',maxout
print 'start:', startout
opt.set_lower_bounds(np.array(minout))
opt.set_upper_bounds(np.array(maxout))
opt.set_initial_step((f.get_output(1)-f.get_output(0))/500.)
opt.set_min_objective(f.myfunc)
opt.set_ftol_rel((f.get_output(1)-f.get_output(0))/100000.)
opt.set_maxtime(60) # 60 s
xopt=opt.optimize(np.array(startout))
opt_val=opt.last_optimum_value()
result=opt.last_optimize_result()
print ' *************Result of Optimization*****************'
print 'max:', opt_val
print 'parameter:', xopt
# set the best values
for i in range(f.get_len_output()):
f.set_output(i,xopt[i])
示例7: direct
def direct(self, alpha):
import nlopt
fn = lambda x, grad: self.objective_func(x, grad, alpha)
# Using DIRECT as the optimization scheme
opt = nlopt.opt(nlopt.GN_DIRECT, self.dim)
# Set the objective
opt.set_max_objective(fn)
# Set the maximum number of iterations
opt.set_maxeval(self.maxeval)
# Set lower and upper bounds
opt.set_lower_bounds(self.lb)
opt.set_upper_bounds(self.ub)
# Optimize with starting point
x = opt.optimize(self.start_point)
#minf = opt.last_optimum_value()
#print "optimum at ", x[0]
#print "minimum value = ", minf
#print "result code = ", opt.last_optimize_result()
return x
示例8: run
def run(self):
ff = FFEvaluate(self.molecule)
results = []
for iframe in range(self.molecule.numFrames):
self.molecule.frame = iframe
directory = os.path.join(self.directory, '%05d' % iframe)
os.makedirs(directory, exist_ok=True)
pickleFile = os.path.join(directory, 'data.pkl')
if self._completed(directory):
with open(pickleFile, 'rb') as fd:
result = pickle.load(fd)
logger.info('Loading QM data from %s' % pickleFile)
else:
result = QMResult()
result.errored = False
result.coords = self.molecule.coords[:, :, iframe:iframe + 1].copy()
if self.optimize:
opt = nlopt.opt(nlopt.LN_COBYLA, result.coords.size)
opt.set_min_objective(lambda x, _: ff.run(x.reshape((-1, 3)))['total'])
if self.restrained_dihedrals is not None:
for dihedral in self.restrained_dihedrals:
indices = dihedral.copy()
ref_angle = np.deg2rad(dihedralAngle(self.molecule.coords[indices, :, iframe]))
def constraint(x, _):
coords = x.reshape((-1, 3))
angle = np.deg2rad(dihedralAngle(coords[indices]))
return np.sin(.5*(angle - ref_angle))
opt.add_equality_constraint(constraint)
opt.set_xtol_abs(1e-3) # Similar to Psi4 default
opt.set_maxeval(1000*opt.get_dimension())
opt.set_initial_step(1e-3)
result.coords = opt.optimize(result.coords.ravel()).reshape((-1, 3, 1))
logger.info('Optimization status: %d' % opt.last_optimize_result())
result.energy = ff.run(result.coords[:, :, 0])['total']
result.dipole = self.molecule.getDipole()
if self.optimize:
assert opt.last_optimum_value() == result.energy # A self-consistency test
# Compute ESP values
if self.esp_points is not None:
assert self.molecule.numFrames == 1
result.esp_points = self.esp_points
distances = cdist(result.esp_points, result.coords[:, :, 0]) # Angstrom
distances *= const.physical_constants['Bohr radius'][0] / const.angstrom # Angstrom --> Bohr
result.esp_values = np.dot(np.reciprocal(distances), self.molecule.charge) # Hartree/Bohr
with open(pickleFile, 'wb') as fd:
pickle.dump(result, fd)
results.append(result)
return results
示例9: run
def run(self):
"""
Run ESP charge fitting
Return
------
results : dict
Dictionary with the fitted charges and fitting loss value
"""
# Get charge bounds
lower_bounds, upper_bounds = self._get_bounds()
# Set up NLopt
opt = nlopt.opt(nlopt.LN_COBYLA, self.ngroups)
opt.set_min_objective(self._compute_objective)
opt.set_lower_bounds(lower_bounds)
opt.set_upper_bounds(upper_bounds)
opt.add_equality_constraint(self._compute_constraint)
opt.set_xtol_rel(1.e-6)
opt.set_maxeval(1000*self.ngroups)
opt.set_initial_step(0.001)
# Optimize the charges
group_charges = opt.optimize(np.zeros(self.ngroups) + 0.001) # TODO: a more elegant way to set initial charges
# TODO: check optimizer status
charges = self._map_groups_to_atoms(group_charges)
loss = self._compute_objective(group_charges, None)
return {'charges': charges, 'loss': loss}
示例10: test_make_nlopt_fun_neldermead
def test_make_nlopt_fun_neldermead(start_point):
x0 = start_point
opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(x0))
obj_fun = make_nlopt_fun(rosen, jac=False)
opt.set_min_objective(obj_fun)
assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
assert np.isclose(opt.last_optimum_value(), 0)
示例11: test_make_nlopt_fun_grad1
def test_make_nlopt_fun_grad1(start_point):
x0 = start_point
opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
obj_fun = make_nlopt_fun(rosen_couple, jac=rosen_der)
opt.set_min_objective(obj_fun)
assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
assert np.isclose(opt.last_optimum_value(), 0)
示例12: test_make_nlopt_fun_bobyqa
def test_make_nlopt_fun_bobyqa(start_point):
x0 = start_point
opt = nlopt.opt(nlopt.LN_BOBYQA, len(x0))
obj_fun = make_nlopt_fun(rosen, jac=False)
opt.set_min_objective(obj_fun)
opt.set_ftol_abs(1e-11)
assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
assert np.isclose(opt.last_optimum_value(), 0)
示例13: test_make_nlopt_fun_grad_free1
def test_make_nlopt_fun_grad_free1(start_point):
# When using derivative-free optimization methods, gradient information
# supplied in any form is disregarded without warning.
x0 = start_point
opt = nlopt.opt(nlopt.LN_NELDERMEAD, len(x0))
obj_fun = make_nlopt_fun(rosen_couple, jac=True)
opt.set_min_objective(obj_fun)
assert np.allclose(opt.optimize(x0), np.array([1., 1., 1., 1., 1.]))
assert np.isclose(opt.last_optimum_value(), 0)
示例14: mle
def mle(self, params, maxiter=100):
opt = nlopt.opt(nlopt.LN_COBYLA, params.size)
opt.set_min_objective(self.likelihood)
opt.set_maxeval(maxiter)
opt.set_lower_bounds(np.zeros( params.size) )
opt.set_initial_step(np.linalg.norm(params))
opt.set_ftol_rel(1e-3)
params = opt.optimize( params )
return params
示例15: test_make_nlopt_fun_grad5
def test_make_nlopt_fun_grad5(start_point):
# Of course, you can use gradient-based optimization and not supply
# any gradient information at your own discretion.
# No warning are raised.
x0 = start_point
opt = nlopt.opt(nlopt.LD_LBFGS, len(x0))
obj_fun = make_nlopt_fun(rosen, jac=False)
opt.set_min_objective(obj_fun)
assert np.allclose(opt.optimize(x0), x0)