本文整理汇总了Python中openopt.NLP.solve方法的典型用法代码示例。如果您正苦于以下问题:Python NLP.solve方法的具体用法?Python NLP.solve怎么用?Python NLP.solve使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openopt.NLP
的用法示例。
在下文中一共展示了NLP.solve方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: startOptimization
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def startOptimization(self, root, varsRoot, AddVar, currValues, \
ValsColumnName, ObjEntry, ExperimentNumber, Next, NN, goal, objtol, C):
AddVar.destroy()
ValsColumnName.set('Experiment parameters')
n = len(self.NameEntriesList)
Names, Lb, Ub, Tol, x0 = [], [], [], [], []
for i in range(n):
N, L, U, T, valEntry = \
self.NameEntriesList[i], self.LB_EntriesList[i], self.UB_EntriesList[i], self.TolEntriesList[i], self.ValueEntriesList[i]
N.config(state=DISABLED)
L.config(state=DISABLED)
U.config(state=DISABLED)
T.config(state=DISABLED)
#valEntry.config(state=DISABLED)
name, lb, ub, tol, val = N.get(), L.get(), U.get(), T.get(), valEntry.get()
Names.append(name)
x0.append(float(val))
Lb.append(float(lb) if lb != '' else -inf)
Ub.append(float(ub) if ub != '' else inf)
# TODO: fix zero
Tol.append(float(tol) if tol != '' else 0)
x0, Tol, Lb, Ub = asfarray(x0), asfarray(Tol), asfarray(Lb), asfarray(Ub)
x0 *= xtolScaleFactor / Tol
#self.x0 = copy(x0)
from openopt import NLP, oosolver
p = NLP(objective, x0, lb = Lb * xtolScaleFactor / Tol, ub=Ub * xtolScaleFactor / Tol)
self.prob = p
#calculated_points = [(copy(x0), copy(float(ObjEntry.get())))
p.args = (Tol, self, ObjEntry, p, root, ExperimentNumber, Next, NN, objtol, C)
#p.graphics.rate = -inf
#p.f_iter = 2
solver = oosolver('bobyqa', useStopByException = False)
p.solve(solver, iprint = 1, goal = goal)#, plot=1, xlabel='nf')
self.solved = True
if p.stopcase >= 0:
self.ValsColumnName.set('Best parameters')
NN.set('Best obtained objective value:')
#Next.config(state=DISABLED)
Next.destroy()
#reverse = True if goal == 'min' else False
calculated_items = self.calculated_points.items() if isinstance(self.calculated_points, dict) else self.calculated_points
vals = [calculated_items[i][1] for i in range(len(calculated_items))]
ind = argsort(vals)
j = ind[0] if goal == 'min' else ind[-1]
key, val = calculated_items[j]
text_coords = key.split(' ')
for i in range(len(self.ValueEntriesList)):
self.ValueEntriesList[i].delete(0, END)
self.ValueEntriesList[i].insert(0, text_coords[i])
ObjEntry.delete(0, END)
obj_tol = self.ObjTolEntry.get()
val = float(val) * 1e4 * objtol
ObjEntry.insert(0, str(val))
ObjEntry.config(state=DISABLED)
示例2: _min
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def _min(self, func, x0, *args, **kwargs):
if _USE_OPENOPT:
if 'solver' in kwargs:
solver = kwargs['solver']
del kwargs['solver']
else:
solver = 'ralg'
if 'df' in kwargs:
df = kwargs['df']
del kwargs['df']
else:
df = self._diff
p = NLP(
func,
x0,
args=(
self.mx,
self.my,
self.size),
df=df,
**kwargs)
z = p.solve(solver)
return z.xf, z.ff, z.istop > 0, z.msg
else:
a = minimize(
func,
x0,
args=(
self.mx,
self.my,
self.size),
*args,
**kwargs)
return a.x, a.fun, a.success, a.message
示例3: wls_fit
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def wls_fit(function, initial_guess, X, Y, weights=None, lb=None, ub=None):
"""[Inputs]
function is of form:
def function(coeffs, xdata)
"""
if weights is None:
weights = [1] * len(X)
def penalty(c):
fit = function(c, X)
error = (weights * (Y - fit) ** 2).sum()
return error
problem = NLP(penalty, initial_guess)
if lb is not None:
problem.lb = lb
if ub is not None:
problem.ub = ub
solver = 'ipopt'
result = problem.solve(solver)
coeffs = result.xf
return coeffs
示例4: garchfit
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def garchfit(self,initvalue):
"""
estimate GARCH(1,1) paramters by maximum likelihood method.
Optimization should be under the following constraints:
ARCH + GARCH < 1 (Stationarity)
All parameters >= 0 (Non-negative)
-------------------------------------------------------------------------
InitValue = [ARCH; GARCH; Constant]
"""
try:
from openopt import NLP
lb = [0.0001, 0.0001, 0.] #lower bound
A = [1, 1, 0]
b = 1.0
p = NLP(self.f,initvalue,lb=lb,A=A,b=b)
r = p.solve('ralg')
return r.xf
except ImportError:
print "Openopt is not installed, will use scipy.fmin_cobyla instead"
print "the result may not accurate though"
params = fmin_cobyla(self.f,initvalue,cons=[lambda x:1-(x[0]+x[1]),
lambda x:x[0],
lambda x:x[2],
lambda x:x[1]])
return params
示例5: test_openopt
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def test_openopt():
p = NLP(residual, x0, lb=lb, ub=ub,
iprint=1, plot=True)
# uncomment the following (and set scale = 1 above) to use openopt's
# scaling mechanism. This only seems to work with a few solvers, though.
#p.scale = np.array([1, 1e6, 1e6, 1e6, 1e6, 1])
r = p.solve('ralg') # OpenOpt solver, seems to work well,
示例6: gridmax
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def gridmax(al,gam,w,D,th,tl,steps):
grid = linspace(0.000,1.0,steps)
pisep = 0
for n in grid:
gridh = linspace(n,1,steps-n*steps)
for nh in gridh:
pinowsep = profitsseparating(n,nh,al,gam,w,D,th,tl)
if pinowsep >pisep:
pisep = pinowsep
solutioneffortsep = [n,nh]
x0 = [solutioneffortsep[0],solutioneffortsep[1]]
lb = [0,0]
ub = [1,1]
A= [1,-1]
b=[0]
f=lambda x: -profitsseparating(x[0],x[1],al,gam,w,D,th,tl)#note that the functions below search for a minimum, hence the "-"
p=NLP(f,x0,lb=lb,ub=ub,A=A,b=b,contol = 1e-8,gtol = 1e-10,ftol = 1e-12)
solver='ralg'
r=p.solve(solver)
#the "2 program" simply assumes ef=0 and maximizes over efh only; the result is then compared to the other program above. This is done because there is a local maximum at ef=0 (see paper)
f2=lambda x: -profitsseparating(0,x[0],al,gam,w,D,th,tl)
lb2=[0]
ulb2=[1]
p2=NLP(f2,solutioneffortsep[1],lb=lb2,ub=ulb2,contol = 1e-8,gtol = 1e-10,ftol = 1e-12)
r2=p2.solve(solver)
if r.ff<r2.ff:
print 'solver result with gamma=',gam,', alpha=',al,', w=',w,' and D=',D,', th=',th,', tl=',tl,' the effort levels are: ', r.xf
ref=r.xf[0]
refh=r.xf[1]
piff=r.ff
else:
print 'solver result with gamma=',gam,', alpha=',al,', w=',w,' and D=',D,', th=',th,', tl=',tl,' the effort levels are : 0',r2.xf
ref=0
refh=r2.xf[0]
piff=r2.ff
print ref,refh
print 'ub1 is ', ub1(ref,refh, al,gam,util(w),util(w-D),th,tl), '; ul1 is ',ul1(ref,refh, al,gam,util(w),util(w-D),th,tl)
print 'ub2 is ', ub2(ref,refh, al,gam,util(w),util(w-D),th,tl), '; ul2 is ',ul2(ref,refh, al,gam,util(w),util(w-D),th,tl)
euff=al*(betah(ref,al,th,tl)*ul1(ref,refh, al,gam,util(w),util(w-D),th,tl)+(1-betah(ref,al,th,tl))*ub1(ref,refh, al,gam,util(w),util(w-D),th,tl))+(1-al)*(betal(ref,al,th,tl)*ul2(ref,refh, al,gam,util(w),util(w-D),th,tl)+(1-betal(ref,al,th,tl))*ub2(ref,refh, al,gam,util(w),util(w-D),th,tl))-cost(ref,gam)
print 'expected utility under this contract is ', euff
print 'expected solver profits are ',-piff
return [-piff,euff]#this return is used for creating the graph
示例7: balance
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def balance(sam, debug=False):
try:
table = sam.array()
except AttributeError:
table = np.array(sam)
assert table.shape[0] == table.shape[1]
size = table.shape[0]
x0 = np.array([v for v in table.flatten() if v !=0])
def transform(ox):
ret = np.zeros_like(table)
i = 0
for r in range(size):
for c in range(size):
if table[r, c] != 0:
ret[r, c] = ox[i]
i += 1
return ret
def objective(ox):
ox = np.square((ox - x0) / x0)
return np.sum(ox)
def constraints(ox):
ox = transform(ox)
ret = np.sum(ox, 0) - np.sum(ox, 1)
return ret
print constraints(x0)
if debug:
print("--- balance ---")
p = NLP(objective, x0, h=constraints, iprint = 50 * int(debug), maxIter = 100000, maxFunEvals = 1e7, name = 'NLP_1')
r = p.solve('ralg', plot=0)
if debug:
print 'constraints'
print constraints(r.xf)
assert r.isFeasible
try:
return sam.replace(transform(r.xf))
except UnboundLocalError:
return transform(r.xf)
示例8: getDirectionOptimPoint
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def getDirectionOptimPoint(p, func, x, direction, forwardMultiplier = 2.0, maxiter = 150, xtol = None, maxConstrLimit = None, \
alpha_lb = 0, alpha_ub = inf, \
rightLocalization = 0, leftLocalization = 0, \
rightBorderForLocalization = 0, leftBorderForLocalization = None):
if all(direction==0): p.err('nonzero direction is required')
if maxConstrLimit is None:
lsFunc = funcDirectionValue
args = (func, x, direction)
else:
lsFunc = funcDirectionValueWithMaxConstraintLimit
args = (func, x, direction, maxConstrLimit, p)
prev_alpha, new_alpha = alpha_lb, min(alpha_lb+0.5, alpha_ub)
prev_val = lsFunc(prev_alpha, *args)
for i in range(p.maxLineSearch):
if lsFunc(new_alpha, *args)>prev_val or new_alpha==alpha_ub: break
else:
if i != 0: prev_alpha = min(alpha_lb, new_alpha)
new_alpha *= forwardMultiplier
if i == p.maxLineSearch-1: p.debugmsg('getDirectionOptimPoint: maxLineSearch is exeeded')
lb, ub = prev_alpha, new_alpha
if xtol is None: xtol = p.xtol / 2.0
# NB! goldenSection solver ignores x0
p_LS = NLP(lsFunc, x0=0, lb = lb, ub = ub, iprint = -1, \
args=args, xtol = xtol, maxIter = maxiter, contol = p.contol)# contol is used in funcDirectionValueWithMaxConstraintLimit
r = p_LS.solve('goldenSection', useOOiterfcn=False, rightLocalization=rightLocalization, leftLocalization=leftLocalization, rightBorderForLocalization=rightBorderForLocalization, leftBorderForLocalization=leftBorderForLocalization)
if r.istop == IS_MAX_ITER_REACHED:
p.warn('getDirectionOptimPoint: max iter has been exceeded')
alpha_opt = r.special.rightXOptBorder
R = DirectionOptimPoint()
R.leftAlphaOptBorder = r.special.leftXOptBorder
R.leftXOptBorder = x + R.leftAlphaOptBorder * direction
R.rightAlphaOptBorder = r.special.rightXOptBorder
R.rightXOptBorder = x + R.rightAlphaOptBorder * direction
R.x = x + alpha_opt * direction
R.alpha = alpha_opt
R.evalsF = r.evals['f']+i
return R
示例9: phScenSolve
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def phScenSolve(scen,rho,verbose=0):
Wfilename = 'work/W%d.npy' %(scen); W=load(Wfilename);
orho,nscen,nwells,nx,LB,UB = readref()
# print 'rho=%g,nx=%d' %(rho,nx)
# refcard = load('work/refcard.npy')
# orho,nscen,nwells,nx,LB,UB = readcard(refcard)
qfn = 'work/qhat%d.npy' %(nx); qhat = load(qfn)
H = GenHydroScen(nx,nscen,nwells,scen)
e = lambda q,H,W,rho,qhat: gwrfull(q,H,W,rho,qhat)
q0 = qhat; # we should read qsol!
# q0 = array([0.0025,-0.0038,0.0018,-0.0092])
which_opt = 2 #0 slsqp,1 cobyla, 2 NLP
if which_opt>0:
if which_opt==1:
up = lambda q,H,W,rho,qhat,scen: min(q-LB)
dn = lambda q,H,W,rho,qhat,scen: max(UB-q)
qopt = fmin_cobyla(e,q0,cons=[up,dn],iprint=0,
args=[H,W,rho,qhat],rhoend=0.0001)
# qopt = fmin_brute(q0,H,W,rho,qhat,scen)
else:
eNLP = lambda q: gwrfull(q,H,W,rho,qhat)
popt = NLP(eNLP,q0,lb=LB*ones((nwells,1)),
ub=UB*ones((nwells,1)),iprint=-1)
ropt = popt.solve('ralg')
qopt = ropt.xf
qopt = qopt.reshape(1,-1)
else:
bounds = [(LB,UB) for i in range(size(qhat))]
# print bounds
qopt = fmin_slsqp(e,q0,bounds=bounds,iprint=0,
args=[H,W,rho,qhat,scen],acc=0.001)
filename = 'work/qsol%d' %(scen); save(filename,squeeze(qopt))
print 'qsol[%d] =' %(scen),qopt
# qpert = zeros((1,nwells));
# for i in range(nwells):
## qpert[:,i]= qopt[:,i]+0.01*random.randn()
# print 'qpert[%d]=' %(scen),qpert
# z1=gwrfullCH(qopt,H,W,rho,qhat,scen)
# z2=gwrfullCH(qpert,H,W,rho,qhat,scen)
# print 'TicToc=%g' %(z1-z2)
if verbose: scenvecprint(scen,qopt)
return
示例10: run
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def run(self, plot=True):
"""
Solves the optimization problem.
"""
# Initial try
p0 = self.get_p0()
#Lower bounds and Upper bounds (HARDCODED FOR QUADTANK)
lbound = N.array([0.0001]*len(p0))
if self.gridsize == 1:
ubound = [10.0]*(self.gridsize*self.nbr_us)
else:
ubound = [10.0]*(self.gridsize*self.nbr_us) + [0.20,0.20,0.20,0.20,N.inf]*((self.gridsize-1))
#UPPER BOUND FOR VDP
#ubound = [0.75]*(self.gridsize*self.nbr_us)+[N.inf]*((self.gridsize-1)*self.nbr_ys)
if self.verbosity >= Multiple_Shooting.NORMAL:
print 'Initial parameter vector: '
print p0
print 'Lower bound:', len(lbound)
print 'Upper bound:', len(ubound)
# Get OpenOPT handler
p_solve = NLP(self.f,p0,lb = lbound, ub=ubound,maxFunEvals = self.maxFeval, maxIter = self.maxIter, ftol=self.ftol, maxTime=self.maxTime)
#If multiple shooting is preformed or single shooting
if self.gridsize > 1:
p_solve.h = self.h
if plot:
p_solve.plot = 1
self.opt = p_solve.solve(self.optMethod)
return self.opt
示例11: optimise_openopt
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def optimise_openopt(target_speed):
from openopt import NLP
def fitfun(gene):
ontimes = np.require(gene[:12].copy(), requirements=['C', 'A', 'O', 'W'])
offtimes = np.require(ontimes + gene[12:].copy(), requirements=['C', 'A', 'O', 'W'])
currents = [243.]*12
flyer.prop.overwriteCoils(ontimes.ctypes.data_as(c_double_p), offtimes.ctypes.data_as(c_double_p))
flyer.preparePropagation(currents)
flyer.propagate(0)
pos = flyer.finalPositions[0]
vel = flyer.finalVelocities[0]
ind = np.where((pos[:, 2] > 268.) & (vel[:, 2] < 1.1*target_speed) & (vel[:, 2] > 0.9*target_speed))[0] # all particles that reach the end
print 'good particles:', ind.shape[0]
return -1.*ind.shape[0]
initval = np.append(flyer.ontimes, flyer.offtimes - flyer.ontimes)
lb = np.array(24*[0])
ub = np.array(12*[600] + 12*[85])
p = NLP(fitfun, initval, lb=lb, ub=ub)
r = p.solve('bobyqa', plot=0)
return r
示例12: fit_node
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
def fit_node(self,index):
qnode=self.qlist[index]
print qnode.q
th=qnode.th_condensed['a3']
counts=qnode.th_condensed['counts']
counts_err=qnode.th_condensed['counts_err']
print qnode.th_condensed['counts'].std()
print qnode.th_condensed['counts'].mean()
maxval=qnode.th_condensed['counts'].max()
minval=qnode.th_condensed['counts'].min()
diff=qnode.th_condensed['counts'].max()-qnode.th_condensed['counts'].min()\
-qnode.th_condensed['counts'].mean()
sig=qnode.th_condensed['counts'].std()
if diff-2*sig>0:
#the difference between the high and low point and
#the mean is greater than 3 sigma so we have a signal
p0=findpeak(th,counts,1)
print 'p0',p0
#Area center width Bak
center=p0[0]
width=p0[1]
sigma=width/2/N.sqrt(2*N.log(2))
Imax=maxval-minval
area=Imax*(N.sqrt(2*pi)*sigma)
print 'Imax',Imax
pin=[area,center,width,0]
if 1:
p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
#p.lb=lowerm
#p.ub=upperm
p.args.f=(th,counts,counts_err)
p.plot = 0
p.iprint = 1
p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6
# for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gradtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
#print 'maxiter', p.maxiter
#print 'maxfun', p.maxfun
p.maxIter=50
# p.maxfun=100
#p.df_iter = 50
p.maxTime = 4000
#r=p.solve('scipy_cobyla')
#r=p.solve('scipy_lbfgsb')
#r = p.solve('algencan')
print 'ralg'
r = p.solve('ralg')
print 'done'
pfit=r.xf
print 'pfit openopt',pfit
print 'r dict', r.__dict__
if 0:
print 'curvefit'
print sys.executable
pfit,popt=curve_fit(gauss2, th, counts, p0=pfit, sigma=counts_err)
print 'p,popt', pfit,popt
perror=N.sqrt(N.diag(popt))
print 'perror',perror
chisqr=chisq(pfit,th,counts,counts_err)
dof=len(th)-len(pfit)
print 'chisq',chisqr
if 0:
oparam=scipy.odr.Model(gauss)
mydatao=scipy.odr.RealData(th,counts,sx=None,sy=counts_err)
myodr = scipy.odr.ODR(mydatao, oparam, beta0=pfit)
myoutput=myodr.run()
myoutput.pprint()
pfit=myoutput.beta
if 1:
print 'mpfit'
p0=pfit
parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
parinfo=[]
for i in range(len(p0)):
parinfo.append(copy.deepcopy(parbase))
for i in range(len(p0)):
parinfo[i]['value']=p0[i]
fa = {'x':th, 'y':counts, 'err':counts_err}
#parinfo[1]['fixed']=1
#parinfo[2]['fixed']=1
m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
if (m.status <= 0):
print 'error message = ', m.errmsg
params=m.params
pfit=params
perror=m.perror
#chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
chisqr=chisq(pfit,th,counts,counts_err)
#.........这里部分代码省略.........
示例13: ModelSelector
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
class ModelSelector(object):
"""Model selection facility.
Select a model among multiple models (i.e., a parametric model,
parametrized by a set of hyperparamenters).
"""
def __init__(self, parametric_model, dataset):
"""TODO:
"""
self.parametric_model = parametric_model
self.dataset = dataset
self.hyperparameters_best = None
self.log_marginal_likelihood_best = None
self.problem = None
pass
def max_log_marginal_likelihood(self, hyp_initial_guess, maxiter=1,
optimization_algorithm="scipy_cg", ftol=1.0e-3, fixedHypers=None,
use_gradient=False, logscale=False):
"""
Set up the optimization problem in order to maximize
the log_marginal_likelihood.
Parameters
----------
parametric_model : Classifier
the actual parameteric model to be optimized.
hyp_initial_guess : numpy.ndarray
set of hyperparameters' initial values where to start
optimization.
optimization_algorithm : string
actual name of the optimization algorithm. See
http://scipy.org/scipy/scikits/wiki/NLP
for a comprehensive/updated list of available NLP solvers.
(Defaults to 'ralg')
ftol : float
threshold for the stopping criterion of the solver,
which is mapped in OpenOpt NLP.ftol
(Defaults to 1.0e-3)
fixedHypers : numpy.ndarray (boolean array)
boolean vector of the same size of hyp_initial_guess;
'False' means that the corresponding hyperparameter must
be kept fixed (so not optimized).
(Defaults to None, which during means all True)
Notes
-----
The maximization of log_marginal_likelihood is a non-linear
optimization problem (NLP). This fact is confirmed by Dmitrey,
author of OpenOpt.
"""
self.problem = None
self.use_gradient = use_gradient
self.logscale = logscale # use log-scale on hyperparameters to enhance numerical stability
self.optimization_algorithm = optimization_algorithm
self.hyp_initial_guess = np.array(hyp_initial_guess)
self.hyp_initial_guess_log = np.log(self.hyp_initial_guess)
if fixedHypers is None:
fixedHypers = np.zeros(self.hyp_initial_guess.shape[0],dtype=bool)
pass
self.freeHypers = -fixedHypers
if self.logscale:
self.hyp_running_guess = self.hyp_initial_guess_log.copy()
else:
self.hyp_running_guess = self.hyp_initial_guess.copy()
pass
self.f_last_x = None
def f(x):
"""
Wrapper to the log_marginal_likelihood to be
maximized.
"""
# XXX EO: since some OpenOpt NLP solvers does not
# implement lower bounds the hyperparameters bounds are
# implemented inside PyMVPA: (see dmitrey's post on
# [SciPy-user] 20080628).
#
# XXX EO: OpenOpt does not implement optimization of a
# subset of the hyperparameters so it is implemented here.
#
# XXX EO: OpenOpt does not implement logrithmic scale of
# the hyperparameters (to enhance numerical stability), so
# it is implemented here.
self.f_last_x = x.copy()
self.hyp_running_guess[self.freeHypers] = x
# REMOVE print "guess:",self.hyp_running_guess,x
try:
if self.logscale:
self.parametric_model.set_hyperparameters(np.exp(self.hyp_running_guess))
else:
self.parametric_model.set_hyperparameters(self.hyp_running_guess)
pass
except InvalidHyperparameterError:
if __debug__: debug("MOD_SEL","WARNING: invalid hyperparameters!")
return -np.inf
try:
self.parametric_model.train(self.dataset)
except (np.linalg.linalg.LinAlgError, SL.basic.LinAlgError, ValueError):
#.........这里部分代码省略.........
示例14: NLP
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
p = NLP(f, x0, c=c, h=h, gtol=gtol, contol=contol, iprint = 1, maxIter = 700, maxFunEvals = 1e7, name = 'NLP_1')
#p = NLP(f, x0, gtol=gtol, contol=contol, iprint = 50, maxIter = 10000, maxFunEvals = 1e7, name = 'NLP_1')
#optional: graphic output, requires pylab (matplotlib)
p.plot = True
solver = 'ralg'
#solver = 'scipy_cobyla'
#solver = 'algencan'
#solver = 'ipopt'
#solver = 'scipy_slsqp'
# solve the problem
r = p.solve(solver, plot=0) # string argument is solver name
# r.xf and r.ff are optim point and optim objFun value
# r.ff should be something like 132.05
#print r.xf, r.ff
print r.xf
q, qd, qdd = get_ref_traj(r.xf, tf, rate, num_dof, N)
pl.figure()
pl.plot(q)
pl.show()
results = {}
示例15: matrix
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import solve [as 别名]
"""
this is an example of using d2f - Hesse matrix (2nd derivatives)
d2c, d2h, d2l are intended to be implemented soon
and to be connected to ALGENCAN and/or CVXOPT
and/or other NLP solvers
//Dmitrey
"""
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros, diag
N = 300
M = 5
ff = lambda x: ((x-M)**4).sum()
p = NLP(ff, cos(arange(N)))
p.df = lambda x: 4*(x-M)**3
p.d2f = lambda x: diag(12*(x-M)**2)
# other valid assignment:
# p = NLP(lambda x: ((x-M)**4).sum(), cos(arange(N)), df = lambda x: 4*(x-M)**3, d2f = lambda x: diag(12*(x-M)**2))
# or
# p = NLP(x0 = cos(arange(N)), f = lambda x: ((x-M)**4).sum(), df = lambda x: 4*(x-M)**3, d2f = lambda x: diag(12*(x-M)**2))
r = p.solve('scipy_ncg')
print('objfunc val: %e' % r.ff) # it should be a small positive like 5.23656378549e-08