当前位置: 首页>>代码示例>>Python>>正文


Python openopt.NLP类代码示例

本文整理汇总了Python中openopt.NLP的典型用法代码示例。如果您正苦于以下问题:Python NLP类的具体用法?Python NLP怎么用?Python NLP使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了NLP类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _min

    def _min(self, func, x0, *args, **kwargs):
        if _USE_OPENOPT:
            if 'solver' in kwargs:
                solver = kwargs['solver']
                del kwargs['solver']
            else:
                solver = 'ralg'
            if 'df' in kwargs:
                df = kwargs['df']
                del kwargs['df']
            else:
                df = self._diff
            p = NLP(
                func,
                x0,
                args=(
                    self.mx,
                    self.my,
                    self.size),
                df=df,
                **kwargs)
            z = p.solve(solver)

            return z.xf, z.ff, z.istop > 0, z.msg
        else:
            a = minimize(
                func,
                x0,
                args=(
                    self.mx,
                    self.my,
                    self.size),
                *args,
                **kwargs)
            return a.x, a.fun, a.success, a.message
开发者ID:jfrelinger,项目名称:fcm,代码行数:35,代码来源:align_data.py

示例2: wls_fit

def wls_fit(function, initial_guess, X, Y, weights=None, lb=None, ub=None):
    """[Inputs]
        function is of form:
            def function(coeffs, xdata)
    """

    if weights is None:
        weights = [1] * len(X)

    def penalty(c):
        fit = function(c, X)
        error = (weights * (Y - fit) ** 2).sum()
        return error

    problem = NLP(penalty, initial_guess)

    if lb is not None:
        problem.lb = lb
    if ub is not None:
        problem.ub = ub

    solver = 'ipopt'
    result = problem.solve(solver)

    coeffs = result.xf
    return coeffs
开发者ID:bowlofstew,项目名称:historical_data,代码行数:26,代码来源:nerdy.py

示例3: garchfit

    def garchfit(self,initvalue):
        """
        estimate GARCH(1,1) paramters by maximum likelihood method. 
        Optimization should be under the following constraints:
        ARCH + GARCH < 1 (Stationarity)
        All parameters >= 0 (Non-negative)
        -------------------------------------------------------------------------
        InitValue = [ARCH; GARCH; Constant]
        """

        try:
            from openopt import NLP
            lb = [0.0001, 0.0001, 0.] #lower bound
            A = [1, 1, 0]
            b = 1.0
            p = NLP(self.f,initvalue,lb=lb,A=A,b=b)
            r = p.solve('ralg')
        
            return r.xf
        except ImportError:
            print "Openopt is not installed, will use scipy.fmin_cobyla instead"
            print "the result may not accurate though"
            params = fmin_cobyla(self.f,initvalue,cons=[lambda x:1-(x[0]+x[1]),
                                                   lambda x:x[0],
                                                   lambda x:x[2],
                                                   lambda x:x[1]])
            return params
开发者ID:pyfun,项目名称:msf,代码行数:27,代码来源:garch.py

示例4: test_openopt

def test_openopt():
    p = NLP(residual, x0, lb=lb, ub=ub,
            iprint=1, plot=True)
    # uncomment the following (and set scale = 1 above) to use openopt's
    # scaling mechanism.  This only seems to work with a few solvers, though.
    #p.scale = np.array([1, 1e6, 1e6, 1e6, 1e6, 1])

    r = p.solve('ralg')             # OpenOpt solver, seems to work well,
开发者ID:RebeccaWPerry,项目名称:holography-gpu,代码行数:8,代码来源:disable_openopt.py

示例5: startOptimization

 def startOptimization(self, root, varsRoot, AddVar, currValues, \
                       ValsColumnName, ObjEntry, ExperimentNumber, Next, NN, goal, objtol, C):
     AddVar.destroy()
     ValsColumnName.set('Experiment parameters')
     n = len(self.NameEntriesList)
     Names, Lb, Ub, Tol, x0 = [], [], [], [], []
     for i in range(n):
         N, L, U, T, valEntry = \
         self.NameEntriesList[i], self.LB_EntriesList[i], self.UB_EntriesList[i], self.TolEntriesList[i], self.ValueEntriesList[i]
         N.config(state=DISABLED)
         L.config(state=DISABLED)
         U.config(state=DISABLED)
         T.config(state=DISABLED)
         #valEntry.config(state=DISABLED)
         name, lb, ub, tol, val = N.get(), L.get(), U.get(), T.get(), valEntry.get()
         Names.append(name)
         x0.append(float(val))
         Lb.append(float(lb) if lb != '' else -inf)
         Ub.append(float(ub) if ub != '' else inf)
         
         # TODO: fix zero
         Tol.append(float(tol) if tol != '' else 0) 
         
     x0, Tol, Lb, Ub = asfarray(x0), asfarray(Tol), asfarray(Lb), asfarray(Ub)
     x0 *= xtolScaleFactor / Tol
     #self.x0 = copy(x0)
     from openopt import NLP, oosolver
     p = NLP(objective, x0, lb = Lb * xtolScaleFactor / Tol, ub=Ub * xtolScaleFactor / Tol)
     self.prob = p
     #calculated_points = [(copy(x0), copy(float(ObjEntry.get())))
     p.args = (Tol, self, ObjEntry, p, root, ExperimentNumber, Next, NN, objtol, C)
     #p.graphics.rate = -inf
     #p.f_iter = 2
     solver = oosolver('bobyqa', useStopByException = False)
     p.solve(solver, iprint = 1, goal = goal)#, plot=1, xlabel='nf')
     self.solved = True
     if p.stopcase >= 0:
         self.ValsColumnName.set('Best parameters')
         NN.set('Best obtained objective value:')
     #Next.config(state=DISABLED)
     Next.destroy()
     #reverse = True if goal == 'min' else False
     
     calculated_items = self.calculated_points.items() if isinstance(self.calculated_points, dict) else self.calculated_points
     vals = [calculated_items[i][1] for i in range(len(calculated_items))]
     ind = argsort(vals)
     j = ind[0] if goal == 'min' else ind[-1]
     key, val = calculated_items[j]
     text_coords = key.split(' ')
     for i in range(len(self.ValueEntriesList)):
         self.ValueEntriesList[i].delete(0, END)
         self.ValueEntriesList[i].insert(0, text_coords[i])
     ObjEntry.delete(0, END)
     obj_tol = self.ObjTolEntry.get()
     val = float(val) * 1e4 * objtol
     ObjEntry.insert(0, str(val))
     ObjEntry.config(state=DISABLED)
开发者ID:AlbertHolmes,项目名称:openopt,代码行数:57,代码来源:mfa.py

示例6: balance

def balance(sam, debug=False):
    try:
        table = sam.array()
    except AttributeError:
        table = np.array(sam)

    assert table.shape[0] == table.shape[1]
    size = table.shape[0]
    x0 = np.array([v for v in table.flatten() if v !=0])

    def transform(ox):
        ret = np.zeros_like(table)
        i = 0
        for r in range(size):
            for c in range(size):
                if table[r, c] != 0:
                    ret[r, c] = ox[i]
                    i += 1
        return ret
    
    def objective(ox):
        ox = np.square((ox - x0) / x0)
        return np.sum(ox)

    def constraints(ox):
        ox = transform(ox)
        ret = np.sum(ox, 0) - np.sum(ox, 1)
        return ret

    print constraints(x0)

    if debug:
        print("--- balance ---")
    p = NLP(objective, x0, h=constraints, iprint = 50 * int(debug), maxIter = 100000, maxFunEvals = 1e7, name = 'NLP_1') 
    r = p.solve('ralg', plot=0)
    if debug:        
        print 'constraints'
        print constraints(r.xf)
    assert r.isFeasible

    try:
        return sam.replace(transform(r.xf))
    except UnboundLocalError:
        return transform(r.xf)
开发者ID:DavoudTaghawiNejad,项目名称:python-cge,代码行数:44,代码来源:balance.py

示例7: getDirectionOptimPoint

def getDirectionOptimPoint(p, func, x, direction, forwardMultiplier = 2.0, maxiter = 150, xtol = None, maxConstrLimit = None,  \
                           alpha_lb = 0,  alpha_ub = inf,  \
                           rightLocalization = 0,  leftLocalization = 0, \
                           rightBorderForLocalization = 0, leftBorderForLocalization = None):
    if all(direction==0): p.err('nonzero direction is required')

    if maxConstrLimit is None:
        lsFunc = funcDirectionValue
        args = (func, x, direction)
    else:
        lsFunc = funcDirectionValueWithMaxConstraintLimit
        args = (func, x, direction, maxConstrLimit, p)

    prev_alpha, new_alpha = alpha_lb, min(alpha_lb+0.5, alpha_ub)
    prev_val = lsFunc(prev_alpha, *args)
    for i in range(p.maxLineSearch):
        if lsFunc(new_alpha, *args)>prev_val or new_alpha==alpha_ub: break
        else:
            if i != 0: prev_alpha = min(alpha_lb, new_alpha)
            new_alpha *= forwardMultiplier

    if i == p.maxLineSearch-1: p.debugmsg('getDirectionOptimPoint: maxLineSearch is exeeded')
    lb, ub = prev_alpha, new_alpha

    if xtol is None: xtol = p.xtol / 2.0
    # NB! goldenSection solver ignores x0
    p_LS = NLP(lsFunc, x0=0, lb = lb,  ub = ub, iprint = -1, \
               args=args, xtol = xtol, maxIter = maxiter, contol = p.contol)# contol is used in funcDirectionValueWithMaxConstraintLimit


    r = p_LS.solve('goldenSection', useOOiterfcn=False, rightLocalization=rightLocalization, leftLocalization=leftLocalization, rightBorderForLocalization=rightBorderForLocalization, leftBorderForLocalization=leftBorderForLocalization)
    if r.istop == IS_MAX_ITER_REACHED:
        p.warn('getDirectionOptimPoint: max iter has been exceeded')
    alpha_opt = r.special.rightXOptBorder

    R = DirectionOptimPoint()
    R.leftAlphaOptBorder = r.special.leftXOptBorder
    R.leftXOptBorder = x + R.leftAlphaOptBorder * direction
    R.rightAlphaOptBorder = r.special.rightXOptBorder
    R.rightXOptBorder = x + R.rightAlphaOptBorder * direction
    R.x = x + alpha_opt * direction
    R.alpha = alpha_opt
    R.evalsF = r.evals['f']+i
    return R
开发者ID:AlbertHolmes,项目名称:openopt,代码行数:44,代码来源:UkrOptMisc.py

示例8: phScenSolve

def phScenSolve(scen,rho,verbose=0):
    Wfilename = 'work/W%d.npy' %(scen); W=load(Wfilename); 
    orho,nscen,nwells,nx,LB,UB = readref()
#    print 'rho=%g,nx=%d' %(rho,nx)
#    refcard = load('work/refcard.npy')
#    orho,nscen,nwells,nx,LB,UB = readcard(refcard)
    qfn = 'work/qhat%d.npy' %(nx); qhat = load(qfn)
    H = GenHydroScen(nx,nscen,nwells,scen)
    e = lambda q,H,W,rho,qhat: gwrfull(q,H,W,rho,qhat)
    q0 = qhat; # we should read qsol!
#    q0 = array([0.0025,-0.0038,0.0018,-0.0092])
    which_opt = 2 #0 slsqp,1 cobyla, 2 NLP
    if which_opt>0:
       if which_opt==1:
           up = lambda q,H,W,rho,qhat,scen: min(q-LB)
           dn = lambda q,H,W,rho,qhat,scen: max(UB-q)
           qopt = fmin_cobyla(e,q0,cons=[up,dn],iprint=0,
		    args=[H,W,rho,qhat],rhoend=0.0001)
#       qopt = fmin_brute(q0,H,W,rho,qhat,scen)
       else:
           eNLP = lambda q: gwrfull(q,H,W,rho,qhat)
           popt = NLP(eNLP,q0,lb=LB*ones((nwells,1)),
		ub=UB*ones((nwells,1)),iprint=-1)
           ropt = popt.solve('ralg')
           qopt = ropt.xf
           qopt = qopt.reshape(1,-1)
    else:
       bounds = [(LB,UB) for i in range(size(qhat))]
#       print bounds
       qopt = fmin_slsqp(e,q0,bounds=bounds,iprint=0,
		args=[H,W,rho,qhat,scen],acc=0.001)
   
    filename = 'work/qsol%d' %(scen);    save(filename,squeeze(qopt))
    print 'qsol[%d] =' %(scen),qopt
  #  qpert = zeros((1,nwells));
 #  for i in range(nwells):
##       qpert[:,i]= qopt[:,i]+0.01*random.randn()
#    print 'qpert[%d]=' %(scen),qpert
#    z1=gwrfullCH(qopt,H,W,rho,qhat,scen)
#    z2=gwrfullCH(qpert,H,W,rho,qhat,scen)
#    print 'TicToc=%g' %(z1-z2)
    if verbose: scenvecprint(scen,qopt)
    return
开发者ID:sergiolucero,项目名称:pygwr,代码行数:43,代码来源:gwrlib.py

示例9: gridmax

def gridmax(al,gam,w,D,th,tl,steps):
	grid = linspace(0.000,1.0,steps)
	pisep = 0
	for n in grid:
		gridh = linspace(n,1,steps-n*steps)
		for nh in gridh:
			pinowsep = profitsseparating(n,nh,al,gam,w,D,th,tl) 
			if pinowsep >pisep:
				pisep = pinowsep
				solutioneffortsep = [n,nh]
	x0 = [solutioneffortsep[0],solutioneffortsep[1]]
	lb = [0,0]
	ub = [1,1]
	A= [1,-1]
	b=[0]
	f=lambda x: -profitsseparating(x[0],x[1],al,gam,w,D,th,tl)#note that the functions below search for a minimum, hence the "-"
	p=NLP(f,x0,lb=lb,ub=ub,A=A,b=b,contol = 1e-8,gtol = 1e-10,ftol = 1e-12)
	solver='ralg'
	r=p.solve(solver)
	#the "2 program" simply assumes ef=0 and maximizes over efh only; the result is then compared to the other program above. This is done because there is a local maximum at ef=0 (see paper)
	f2=lambda x: -profitsseparating(0,x[0],al,gam,w,D,th,tl)
	lb2=[0]
	ulb2=[1]
	p2=NLP(f2,solutioneffortsep[1],lb=lb2,ub=ulb2,contol = 1e-8,gtol = 1e-10,ftol = 1e-12)
	r2=p2.solve(solver)
	if r.ff<r2.ff:
		print 'solver result with gamma=',gam,', alpha=',al,', w=',w,' and D=',D,', th=',th,', tl=',tl,' the effort levels are: ', r.xf
		ref=r.xf[0]
		refh=r.xf[1]
		piff=r.ff
	else:
		print 'solver result with gamma=',gam,', alpha=',al,', w=',w,' and D=',D,', th=',th,', tl=',tl,' the effort levels are : 0',r2.xf
		ref=0
		refh=r2.xf[0]
		piff=r2.ff
	print ref,refh
	print 'ub1 is ', ub1(ref,refh, al,gam,util(w),util(w-D),th,tl), '; ul1 is ',ul1(ref,refh, al,gam,util(w),util(w-D),th,tl)
	print 'ub2 is ', ub2(ref,refh, al,gam,util(w),util(w-D),th,tl), '; ul2 is ',ul2(ref,refh, al,gam,util(w),util(w-D),th,tl)
	euff=al*(betah(ref,al,th,tl)*ul1(ref,refh, al,gam,util(w),util(w-D),th,tl)+(1-betah(ref,al,th,tl))*ub1(ref,refh, al,gam,util(w),util(w-D),th,tl))+(1-al)*(betal(ref,al,th,tl)*ul2(ref,refh, al,gam,util(w),util(w-D),th,tl)+(1-betal(ref,al,th,tl))*ub2(ref,refh, al,gam,util(w),util(w-D),th,tl))-cost(ref,gam)
	print 'expected utility under this contract is ', euff
	print 'expected solver profits are ',-piff
	return [-piff,euff]#this return is used for creating the graph
开发者ID:schottmueller,项目名称:schottmueller.github.io,代码行数:42,代码来源:current_example1113.py

示例10: run

    def run(self, plot=True):
        """
        Solves the optimization problem.
        """        
        # Initial try
        p0 = self.get_p0()
        
        #Lower bounds and Upper bounds (HARDCODED FOR QUADTANK)
        lbound = N.array([0.0001]*len(p0))
        if self.gridsize == 1:
            ubound = [10.0]*(self.gridsize*self.nbr_us)
        else:
            ubound = [10.0]*(self.gridsize*self.nbr_us) + [0.20,0.20,0.20,0.20,N.inf]*((self.gridsize-1))

        
        #UPPER BOUND FOR VDP
        #ubound = [0.75]*(self.gridsize*self.nbr_us)+[N.inf]*((self.gridsize-1)*self.nbr_ys)
        
        if self.verbosity >= Multiple_Shooting.NORMAL:
            print 'Initial parameter vector: '
            print p0
            print 'Lower bound:', len(lbound)
            print 'Upper bound:', len(ubound)

        # Get OpenOPT handler
        p_solve = NLP(self.f,p0,lb = lbound, ub=ubound,maxFunEvals = self.maxFeval, maxIter = self.maxIter, ftol=self.ftol, maxTime=self.maxTime)
        
        #If multiple shooting is preformed or single shooting
        if self.gridsize > 1:
            p_solve.h  = self.h
        
        if plot:
            p_solve.plot = 1

        self.opt = p_solve.solve(self.optMethod)        
        
        return self.opt
开发者ID:jnorthrup,项目名称:jmodelica,代码行数:37,代码来源:assimulo_shooting.py

示例11: optimise_openopt

def optimise_openopt(target_speed):
	from openopt import NLP
	def fitfun(gene):
		ontimes = np.require(gene[:12].copy(), requirements=['C', 'A', 'O', 'W'])
		offtimes = np.require(ontimes + gene[12:].copy(), requirements=['C', 'A', 'O', 'W'])
		currents = [243.]*12
		flyer.prop.overwriteCoils(ontimes.ctypes.data_as(c_double_p), offtimes.ctypes.data_as(c_double_p))
		flyer.preparePropagation(currents)
		flyer.propagate(0)

		pos = flyer.finalPositions[0]
		vel = flyer.finalVelocities[0]
		
		ind = np.where((pos[:, 2] > 268.) & (vel[:, 2] < 1.1*target_speed) & (vel[:, 2] > 0.9*target_speed))[0] # all particles that reach the end
		print 'good particles:', ind.shape[0]
		return -1.*ind.shape[0]

	initval = np.append(flyer.ontimes, flyer.offtimes - flyer.ontimes)
	lb = np.array(24*[0])
	ub = np.array(12*[600] + 12*[85])

	p = NLP(fitfun, initval, lb=lb, ub=ub)
	r = p.solve('bobyqa', plot=0)
	return r
开发者ID:softleygroup,项目名称:zflyer,代码行数:24,代码来源:optimise_zeeman.py

示例12: milpTransfer

def milpTransfer(originProb):
    newProb = NLP(originProb.f, originProb.x0)
    originProb.fill(newProb)
    newProb.discreteVars = originProb.discreteVars
    def err(s): # to prevent text output
        raise OpenOptException(s)
    newProb.err = err
    for fn in ['df', 'd2f', 'c', 'dc', 'h', 'dh']:
        if hasattr(originProb, fn) and getattr(originProb.userProvided, fn) or originProb.isFDmodel:
            setattr(newProb, fn, getattr(originProb, fn))
    
    newProb.plot = 0
    newProb.iprint = -1
    newProb.nlpSolver = originProb.nlpSolver 
    return newProb
开发者ID:PythonCharmers,项目名称:OOSuite,代码行数:15,代码来源:branb_oo.py

示例13: f

subjected to 
y > 5
4x-5z < -1
(x-10)^2 + (y+1)^2 < 50
'''

from openopt import NLP
from numpy import *
 
x0 = [0,0,0] # start point estimation
 
# define objective function as a Python language function
# of course, you can use "def f(x):" for multi-line functions instead of "f = lambda x:"
f = lambda x: (x[0]-1)**2 + (x[1]-2)**2 + (x[2]-3)**4
 
# form box-bound constraints lb <= x <= ub
lb = [-inf, 5, -inf] # lower bound
 
# form general linear constraints Ax <= b
A = [4, 0, -5]
b = -1
 
# form general nonlinear constraints c(x) <= 0
c = lambda x: (x[0] - 10)**2 + (x[1]+1) ** 2 - 50
 
# optionally you can provide derivatives (user-supplied or from automatic differentiation)
# for objective function and/or nonlinear constraints, see further doc below
 
p = NLP(f, x0, lb=lb, A=A, b=b, c=c)
r = p.solve('ralg')
print r.xf # [ 6.25834211  4.99999931  5.20667372]
开发者ID:baothien,项目名称:tiensy,代码行数:31,代码来源:openopt_test.py

示例14: min

from openopt import NLP
from numpy import cos, arange, ones, asarray, zeros, mat, array

N = 50
# objfunc:
# (x0-1)^4 + (x2-1)^4 + ... +(x49-1)^4 -> min (N=nVars=50)
f = lambda x : ((x-1)**4).sum()
x0 = cos(arange(N))
p = NLP(f, x0, maxIter = 1e3, maxFunEvals = 1e5)

# f(x) gradient (optional):
p.df = lambda x: 4*(x-1)**3


# lb<= x <= ub:
# x4 <= -2.5
# 3.5 <= x5 <= 4.5
# all other: lb = -5, ub = +15
p.lb = -5*ones(N)
p.ub = 15*ones(N)
p.ub[4] = -2.5
p.lb[5], p.ub[5] = 3.5, 4.5



# Ax <= b
# x0+...+xN>= 1.1*N
# x9 + x19 <= 1.5
# x10+x11 >= 1.6
p.A = zeros((3, N))
p.A[0, 9] = 1
开发者ID:AlbertHolmes,项目名称:openopt,代码行数:31,代码来源:nlp_ALGENCAN.py

示例15: NLP

from openopt import NLP
from numpy import cos, arange, ones, asarray, abs, zeros
N = 30
M = 5
ff = lambda x: ((x-M)**2).sum()
p = NLP(ff, cos(arange(N)))
p.df =  lambda x: 2*(x-M)
p.c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]

def dc(x):
    r = zeros((2, p.n))
    r[0,0] = 2 * 4 * x[0]**3
    r[1,1] = 2 * x[1]
    r[1,2] = 2 * x[2]
    return r
p.dc = dc

h1 = lambda x: 1e1*(x[-1]-1)**4
h2 = lambda x: (x[-2]-1.5)**4
p.h = lambda x: (h1(x), h2(x))

def dh(x):
    r = zeros((2, p.n))
    r[0,-1] = 1e1*4*(x[-1]-1)**3
    r[1,-2] = 4*(x[-2]-1.5)**3
    return r
p.dh = dh

p.lb = -6*ones(N)
p.ub = 6*ones(N)
p.lb[3] = 5.5
开发者ID:AlbertHolmes,项目名称:openopt,代码行数:31,代码来源:nlp_2.py


注:本文中的openopt.NLP类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。