本文整理汇总了Python中openopt.NLP.df方法的典型用法代码示例。如果您正苦于以下问题:Python NLP.df方法的具体用法?Python NLP.df怎么用?Python NLP.df使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openopt.NLP
的用法示例。
在下文中一共展示了NLP.df方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: NLP
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import df [as 别名]
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))
p.df = lambda x: 2*(x-M)
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
def dc(x):
r = zeros((2, p.n))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2]
return r
p.dc = dc
h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
p.h = lambda x: (h1(x), h2(x))
def dh(x):
r = zeros((2, p.n))
r[0,-1] = 1e1*4*(x[-1]-1)**3
r[1,-2] = 4*(x[-2]-1.5)**3
return r
p.dh = dh
p.lb = -6*ones(N)
p.ub = 6*ones(N)
p.lb[3] = 5.5
示例2: NLP
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import df [as 别名]
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))
def df(x):
r = 2*(x-M)
r[0] += 15 #incorrect derivative
r[8] += 80 #incorrect derivative
return r
p.df = df
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
def dc(x):
r = zeros((2, p.n))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2] + 15 #incorrect derivative
return r
p.dc = dc
p.h = lambda x: (1e1*(x[-1]-1)**4, (x[-2]-1.5)**4)
def dh(x):
r = zeros((2, p.n))
r[0,-1] = 1e1*4*(x[-1]-1)**3
r[1,-2] = 4*(x[-2]-1.5)**3 + 15 #incorrect derivative
return r
示例3:
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import df [as 别名]
p.maxIter=50
# p.maxfun=100
#p.df_iter = 50
p.maxTime = 4000
h_args=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
if 0:
#p.h=[pos_sum,neg_sum]
p.h=[pos_sum,neg_sum]
p.c=[chisq]
# p.h=[pos_sum,neg_sum]
p.args.h=h_args
p.args.c=h_args
p.dh=[pos_sum_grad,neg_sum_grad]
p.df=chisq_grad
if 1:
#p.h=[pos_sum,neg_sum,chisq]
p.c=[chisq]
p.h=[pos_sum,neg_sum]
p.args.h=h_args
p.args.c=h_args
p.dh=[pos_sum_grad,neg_sum_grad]
p.dc=chisq_grad
#p.dh=[pos_sum_grad,neg_sum_grad,neg_sum_grad]
p.df = S_grad
if 0:
print 'checking'
p.checkdf()
示例4: NLP
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import df [as 别名]
x0fn = 'x0_' + infn[:-4] + '.txt'
x0 = None
if os.path.isfile(x0fn):
x0 = np.loadtxt(x0fn)
if x0 is None or x0.shape[0] != 2 * m + n:
x0 = np.ones(2 * m + n)
# x0[m + 1: 2 * m + 1] = I0 / 10
logger.debug(x0.shape)
lb = np.zeros(2 * m + n)
ub = np.ones(2 * m + n)
ub[m:] = np.inf
p = NLP(error_function2, x0, maxIter=1e5, maxFunEvals=1e7, lb=lb, ub=ub)
p.args.f = (I, m)
p.df = grad
p.checkdf()
r = p.solve('ralg', plot=1)
x = r.xf
logger.info(x)
xfn = 'x_' + infn[:-4] + '.txt'
np.savetxt(xfn, x)
# p = NLP(error_function2, x, maxIter=1e4, maxFunEvals=1e6, lb=lb, ub=ub)
# p.args.f = (I, m)
# r = p.solve('ralg', plot=1)
# x = r.xf
# logger.info(x)
示例5: min
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import df [as 别名]
from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array
N = 50
# objfunc:
# (x0-1)^4 + (x2-1)^4 + ... +(x49-1)^4 -> min (N=nVars=50)
f = lambda x : ((x-1)**4).sum()
x0 = cos(arange(N))
p = NLP(f, x0, maxIter = 1e3, maxFunEvals = 1e5)
# f(x) gradient (optional):
p.df = lambda x: 4*(x-1)**3
# lb<= x <= ub:
# x4 <= -2.5
# 3.5 <= x5 <= 4.5
# all other: lb = -5, ub = +15
p.lb = -5*ones(N)
p.ub = 15*ones(N)
p.ub[4] = -2.5
p.lb[5], p.ub[5] = 3.5, 4.5
# Ax <= b
# x0+...+xN>= 1.1*N
# x9 + x19 <= 1.5
# x10+x11 >= 1.6
p.A = zeros((3, N))
p.A[0, 9] = 1
示例6: matrix
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import df [as 别名]
"""
this is an example of using d2f - Hesse matrix (2nd derivatives)
d2c, d2h, d2l are intended to be implemented soon
and to be connected to ALGENCAN and/or CVXOPT
and/or other NLP solvers
//Dmitrey
"""
from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros, diag
N = 300
M = 5
ff = lambda x: ((x-M)**4).sum()
p = NLP(ff, cos(arange(N)))
p.df = lambda x: 4*(x-M)**3
p.d2f = lambda x: diag(12*(x-M)**2)
# other valid assignment:
# p = NLP(lambda x: ((x-M)**4).sum(), cos(arange(N)), df = lambda x: 4*(x-M)**3, d2f = lambda x: diag(12*(x-M)**2))
# or
# p = NLP(x0 = cos(arange(N)), f = lambda x: ((x-M)**4).sum(), df = lambda x: 4*(x-M)**3, d2f = lambda x: diag(12*(x-M)**2))
r = p.solve('scipy_ncg')
print('objfunc val: %e' % r.ff) # it should be a small positive like 5.23656378549e-08
示例7: NLP
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import df [as 别名]
from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array, sin, cos, sign, abs, inf
N = 1500
K = 50
# 1st arg - objective function
# 2nd arg - x0
p = NLP(lambda x: (abs(x-5)).sum(), 8*cos(arange(N)), iprint = 50, maxIter = 1e3)
# f(x) gradient (optional):
p.df = lambda x: sign(x-5)
p.lb = 5*ones(N) + sin(arange(N)) - 0.1
p.ub = 5*ones(N) + sin(arange(N)) + 0.1
p.lb[:N/4] = -inf
p.ub[3*N/4:] = inf
#p.ub[4] = 4
#p.lb[5], p.ub[5] = 8, 15
#A = zeros((K, N))
#b = zeros(K)
#for i in xrange(K):
# A[i] = 1+cos(i+arange(N))
# b[i] = sin(i)
#p.A = A
#p.b = b
#p.Aeq = zeros(p.n)
#p.Aeq[100:102] = 1