当前位置: 首页>>代码示例>>Python>>正文


Python NLP.ub方法代码示例

本文整理汇总了Python中openopt.NLP.ub方法的典型用法代码示例。如果您正苦于以下问题:Python NLP.ub方法的具体用法?Python NLP.ub怎么用?Python NLP.ub使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在openopt.NLP的用法示例。


在下文中一共展示了NLP.ub方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: wls_fit

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import ub [as 别名]
def wls_fit(function, initial_guess, X, Y, weights=None, lb=None, ub=None):
    """[Inputs]
        function is of form:
            def function(coeffs, xdata)
    """

    if weights is None:
        weights = [1] * len(X)

    def penalty(c):
        fit = function(c, X)
        error = (weights * (Y - fit) ** 2).sum()
        return error

    problem = NLP(penalty, initial_guess)

    if lb is not None:
        problem.lb = lb
    if ub is not None:
        problem.ub = ub

    solver = 'ipopt'
    result = problem.solve(solver)

    coeffs = result.xf
    return coeffs
开发者ID:bowlofstew,项目名称:historical_data,代码行数:28,代码来源:nerdy.py

示例2: h2

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import ub [as 别名]
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2]
    return r
p.dc = dc

h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
p.h = lambda x: (h1(x), h2(x))

def dh(x):
    r = zeros((2, p.n))
    r[0,-1] = 1e1*4*(x[-1]-1)**3
    r[1,-2] = 4*(x[-2]-1.5)**3
    return r
p.dh = dh

p.lb = -6*ones(N)
p.ub = 6*ones(N)
p.lb[3] = 5.5
p.ub[4] = 4.5

#r = p.solve('ipopt', showLS=0, xtol=1e-7, maxIter = 1504)
#solver = 'ipopt'
solver = 'ralg'
#solver = 'scipy_slsqp'
#solver = 'algencan'
r = p.solve(solver, maxIter = 1504, plot=1)
#!! fmin_cobyla can't use user-supplied gradient
#r = p.solve('scipy_cobyla')

开发者ID:AlbertHolmes,项目名称:openopt,代码行数:31,代码来源:nlp_2.py

示例3: len

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import ub [as 别名]
        print len(p0)
        lowerm=1e-4*N.ones(len(p0))
        #lowerm[0:3]=[-1,-1,-1]
        upperm=N.ones(len(p0))
    if 1:
        p = NLP(Entropy, p0, maxIter = 1e3, maxFunEvals = 1e5)

    if 0:
        p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)


    if 0:
        p = NLP(max_wrap, p0, maxIter = 1e3, maxFunEvals = 1e5)
    if 0:
        p.lb=lowerm
        p.ub=upperm
        p.args.f=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
        p.plot = 0
        p.iprint = 1
        p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

    # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
    # (except maxfun, maxiter)
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
        p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
        p.maxIter=50
    #    p.maxfun=100
开发者ID:reflectometry,项目名称:WRed,代码行数:32,代码来源:maxent_test_iter2.py

示例4: NLP

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import ub [as 别名]
    p = NLP(Entropy, p0, maxIter = 1e3, maxFunEvals = 1e5)
    #p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)
    # f(x) gradient (optional):
#    p.df = S_grad
#    p.d2f=S_hessian
#    p.userProvided.d2f=True
    
    
    # lb<= x <= ub:
    # x4 <= -2.5
    # 3.5 <= x5 <= 4.5
    # all other: lb = -5, ub = +15
    #p.lb =1e-7*N.ones(p.n)
    #p.ub = N.ones(p.n)
    p.lb =1e-7*N.ones(p0.shape)
    p.ub = N.ones(p0.shape)
    #p.ub[4] = -2.5
    #p.lb[5], p.ub[5] = 3.5, 4.5

# non-linear inequality constraints c(x) <= 0
# 2*x0^4 <= 1/32
# x1^2+x2^2 <= 1/8
# x25^2 +x25*x35 + x35^2<= 2.5

#p.c = lambda x: [2* x[0] **4-1./32, x[1]**2+x[2]**2 - 1./8, x[25]**2 + x[35]**2 + x[25]*x[35] -2.5]
# other valid c:
# p.c = [lambda x: c1(x), lambda x : c2(x), lambda x : c3(x)]
# p.c = (lambda x: c1(x), lambda x : c2(x), lambda x : c3(x))
# p.c = lambda x: numpy.array(c1(x), c2(x), c3(x))
# def c(x):
#      return c1(x), c2(x), c3(x)
开发者ID:liuhuiwisdom,项目名称:WRed,代码行数:33,代码来源:maxent_test.py

示例5: min

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import ub [as 别名]
# objfunc:
# (x0-1)^4 + (x2-1)^4 + ... +(x49-1)^4 -> min (N=nVars=50)
f = lambda x : ((x-1)**4).sum()
x0 = cos(arange(N))
p = NLP(f, x0, maxIter = 1e3, maxFunEvals = 1e5)

# f(x) gradient (optional):
p.df = lambda x: 4*(x-1)**3


# lb<= x <= ub:
# x4 <= -2.5
# 3.5 <= x5 <= 4.5
# all other: lb = -5, ub = +15
p.lb = -5*ones(N)
p.ub = 15*ones(N)
p.ub[4] = -2.5
p.lb[5], p.ub[5] = 3.5, 4.5



# Ax <= b
# x0+...+xN>= 1.1*N
# x9 + x19 <= 1.5
# x10+x11 >= 1.6
p.A = zeros((3, N))
p.A[0, 9] = 1
p.A[0, 19] = 1
p.A[1, 10:12] = -1
p.A[2] = -ones(N)
p.b = [1.5, -1.6, -1.1*N]
开发者ID:AlbertHolmes,项目名称:openopt,代码行数:33,代码来源:nlp_ALGENCAN.py

示例6: NLP

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import ub [as 别名]
from openopt import NLP

from numpy import cos, arange, ones, asarray, zeros, mat, array, sin, cos, sign, abs, inf
N = 1500
K = 50
# 1st arg - objective function
# 2nd arg - x0

p = NLP(lambda x: (abs(x-5)).sum(), 8*cos(arange(N)), iprint = 50, maxIter = 1e3)

# f(x) gradient (optional):
p.df = lambda x: sign(x-5)

p.lb = 5*ones(N) + sin(arange(N)) - 0.1
p.ub = 5*ones(N) + sin(arange(N)) + 0.1
p.lb[:N/4] = -inf
p.ub[3*N/4:] = inf

#p.ub[4] = 4
#p.lb[5], p.ub[5] = 8, 15

#A = zeros((K, N))
#b = zeros(K)
#for i in xrange(K):
#    A[i] = 1+cos(i+arange(N))
#    b[i] = sin(i)
#p.A = A
#p.b = b

#p.Aeq = zeros(p.n)
#p.Aeq[100:102] = 1
开发者ID:AlbertHolmes,项目名称:openopt,代码行数:33,代码来源:nlp_4.py


注:本文中的openopt.NLP.ub方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。