本文整理汇总了Python中openopt.NLP.h方法的典型用法代码示例。如果您正苦于以下问题:Python NLP.h方法的具体用法?Python NLP.h怎么用?Python NLP.h使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openopt.NLP
的用法示例。
在下文中一共展示了NLP.h方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import h [as 别名]
def run(self, plot=True):
"""
Solves the optimization problem.
"""
# Initial try
p0 = self.get_p0()
#Lower bounds and Upper bounds (HARDCODED FOR QUADTANK)
lbound = N.array([0.0001]*len(p0))
if self.gridsize == 1:
ubound = [10.0]*(self.gridsize*self.nbr_us)
else:
ubound = [10.0]*(self.gridsize*self.nbr_us) + [0.20,0.20,0.20,0.20,N.inf]*((self.gridsize-1))
#UPPER BOUND FOR VDP
#ubound = [0.75]*(self.gridsize*self.nbr_us)+[N.inf]*((self.gridsize-1)*self.nbr_ys)
if self.verbosity >= Multiple_Shooting.NORMAL:
print 'Initial parameter vector: '
print p0
print 'Lower bound:', len(lbound)
print 'Upper bound:', len(ubound)
# Get OpenOPT handler
p_solve = NLP(self.f,p0,lb = lbound, ub=ubound,maxFunEvals = self.maxFeval, maxIter = self.maxIter, ftol=self.ftol, maxTime=self.maxTime)
#If multiple shooting is preformed or single shooting
if self.gridsize > 1:
p_solve.h = self.h
if plot:
p_solve.plot = 1
self.opt = p_solve.solve(self.optMethod)
return self.opt
示例2: dc
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import h [as 别名]
r[0] += 15 #incorrect derivative
r[8] += 80 #incorrect derivative
return r
p.df = df
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
def dc(x):
r = zeros((2, p.n))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2] + 15 #incorrect derivative
return r
p.dc = dc
p.h = lambda x: (1e1*(x[-1]-1)**4, (x[-2]-1.5)**4)
def dh(x):
r = zeros((2, p.n))
r[0,-1] = 1e1*4*(x[-1]-1)**3
r[1,-2] = 4*(x[-2]-1.5)**3 + 15 #incorrect derivative
return r
p.dh = dh
p.checkdf()
p.checkdc()
p.checkdh()
"""
you can use p.checkdF(x) for other point than x0 (F is f, c or h)
p.checkdc(myX)
or
示例3: NLP
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import h [as 别名]
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))
p.df = lambda x: 2*(x-M)
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
def dc(x):
r = zeros((2, p.n))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2]
return r
p.dc = dc
h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
p.h = lambda x: (h1(x), h2(x))
def dh(x):
r = zeros((2, p.n))
r[0,-1] = 1e1*4*(x[-1]-1)**3
r[1,-2] = 4*(x[-2]-1.5)**3
return r
p.dh = dh
p.lb = -6*ones(N)
p.ub = 6*ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5
#r = p.solve('ipopt', showLS=0, xtol=1e-7, maxIter = 1504)
#solver = 'ipopt'
示例4: criterium
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import h [as 别名]
# (except maxfun, maxiter)
# Note that in ALGENCAN gradtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
#print 'maxiter', p.maxiter
#print 'maxfun', p.maxfun
p.maxIter=50
# p.maxfun=100
#p.df_iter = 50
p.maxTime = 4000
h_args=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
if 0:
#p.h=[pos_sum,neg_sum]
p.h=[pos_sum,neg_sum]
p.c=[chisq]
# p.h=[pos_sum,neg_sum]
p.args.h=h_args
p.args.c=h_args
p.dh=[pos_sum_grad,neg_sum_grad]
p.df=chisq_grad
if 1:
#p.h=[pos_sum,neg_sum,chisq]
p.c=[chisq]
p.h=[pos_sum,neg_sum]
p.args.h=h_args
p.args.c=h_args
p.dh=[pos_sum_grad,neg_sum_grad]
p.dc=chisq_grad
示例5: c1
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import h [as 别名]
# p.c = (lambda x: c1(x), lambda x : c2(x), lambda x : c3(x))
# p.c = lambda x: numpy.array(c1(x), c2(x), c3(x))
# def c(x):
# return c1(x), c2(x), c3(x)
# p.c = c
# non-linear equality constraints h(x) = 0
# 1e6*(x[last]-1)**4 = 0
# (x[last-1]-1.5)**4 = 0
#h1 = lambda x: 1e4*(x[-1]-1)**4
#h2 = lambda x: (x[-2]-1.5)**4
#p.h = [h1, h2]
h_args=(h,k,l,fq,fqerr,x,z,cosmat_list)
p.h=[pos_sum,neg_sum,chisq]
# p.h=[pos_sum,neg_sum]
p.args.h=h_args
p.args.f=(h,k,l,fq,fqerr,x,z,cosmat_list)
#p.args.f=h_args
# dh(x)/dx: non-lin eq constraints gradients (optional):
#def DH(x):
# r = zeros((2, p.n))
# r[0, -1] = 1e4*4 * (x[-1]-1)**3
# r[1, -2] = 4 * (x[-2]-1.5)**3
# return r
#p.dh = DH
# p.dh=[chisq_grad,pos_sum_grad,]
p.contol = 1e-2#3 # required constraints tolerance, default for NLP is 1e-6
# for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
示例6: well
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import h [as 别名]
they will be passed to derivative function as well (if you have supplied it)
"""
from openopt import NLP
from numpy import asfarray
f = lambda x, a: (x**2).sum() + a * x[0]**4
x0 = [8, 15, 80]
p = NLP(f, x0)
#using c(x)<=0 constraints
p.c = lambda x, b, c: (x[0]-4)**2 - 1 + b*x[1]**4 + c*x[2]**4
#using h(x)=0 constraints
p.h = lambda x, d: (x[2]-4)**2 + d*x[2]**4 - 15
p.args.f = 4 # i.e. here we use a=4
# so it's the same to "a = 4; p.args.f = a" or just "p.args.f = a = 4"
p.args.c = (1,2)
p.args.h = 15
# Note 1: using tuple p.args.h = (15,) is valid as well
# Note 2: if all your funcs use same args, you can just use
# p.args = (your args)
# Note 3: you could use f = lambda x, a: (...); c = lambda x, a, b: (...); h = lambda x, a: (...)
示例7: manage
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import h [as 别名]
"""
OpenOpt GUI:
function manage() usage example
"""
from openopt import NLP, manage
from numpy import cos, arange, ones, asarray, abs, zeros
N = 50
M = 5
p = NLP(lambda x: ((x-M)**2).sum(), cos(arange(N)))
p.lb, p.ub = -6*ones(N), 6*ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
p.h = (lambda x: 1e1*(x[-1]-1)**4, lambda x: (x[-2]-1.5)**4)
"""
minTime is used here
for to provide enough time for user
to play with GUI
"""
minTime = 1.5 # sec
p.name = 'GUI_example'
p.minTime = minTime
"""
hence maxIter, maxFunEvals etc
will not trigger till minTime
only same iter point x_k-1=x_k