本文整理汇总了Python中openopt.NLP.iprint方法的典型用法代码示例。如果您正苦于以下问题:Python NLP.iprint方法的具体用法?Python NLP.iprint怎么用?Python NLP.iprint使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类openopt.NLP
的用法示例。
在下文中一共展示了NLP.iprint方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: milpTransfer
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import iprint [as 别名]
def milpTransfer(originProb):
newProb = NLP(originProb.f, originProb.x0)
originProb.fill(newProb)
newProb.discreteVars = originProb.discreteVars
def err(s): # to prevent text output
raise OpenOptException(s)
newProb.err = err
for fn in ['df', 'd2f', 'c', 'dc', 'h', 'dh']:
if hasattr(originProb, fn) and getattr(originProb.userProvided, fn) or originProb.isFDmodel:
setattr(newProb, fn, getattr(originProb, fn))
newProb.plot = 0
newProb.iprint = -1
newProb.nlpSolver = originProb.nlpSolver
return newProb
示例2: fit_node
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import iprint [as 别名]
def fit_node(self,index):
qnode=self.qlist[index]
print qnode.q
th=qnode.th_condensed['a3']
counts=qnode.th_condensed['counts']
counts_err=qnode.th_condensed['counts_err']
print qnode.th_condensed['counts'].std()
print qnode.th_condensed['counts'].mean()
maxval=qnode.th_condensed['counts'].max()
minval=qnode.th_condensed['counts'].min()
diff=qnode.th_condensed['counts'].max()-qnode.th_condensed['counts'].min()\
-qnode.th_condensed['counts'].mean()
sig=qnode.th_condensed['counts'].std()
if diff-2*sig>0:
#the difference between the high and low point and
#the mean is greater than 3 sigma so we have a signal
p0=findpeak(th,counts,1)
print 'p0',p0
#Area center width Bak
center=p0[0]
width=p0[1]
sigma=width/2/N.sqrt(2*N.log(2))
Imax=maxval-minval
area=Imax*(N.sqrt(2*pi)*sigma)
print 'Imax',Imax
pin=[area,center,width,0]
if 1:
p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
#p.lb=lowerm
#p.ub=upperm
p.args.f=(th,counts,counts_err)
p.plot = 0
p.iprint = 1
p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6
# for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gradtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
#print 'maxiter', p.maxiter
#print 'maxfun', p.maxfun
p.maxIter=50
# p.maxfun=100
#p.df_iter = 50
p.maxTime = 4000
#r=p.solve('scipy_cobyla')
#r=p.solve('scipy_lbfgsb')
#r = p.solve('algencan')
print 'ralg'
r = p.solve('ralg')
print 'done'
pfit=r.xf
print 'pfit openopt',pfit
print 'r dict', r.__dict__
if 0:
print 'curvefit'
print sys.executable
pfit,popt=curve_fit(gauss2, th, counts, p0=pfit, sigma=counts_err)
print 'p,popt', pfit,popt
perror=N.sqrt(N.diag(popt))
print 'perror',perror
chisqr=chisq(pfit,th,counts,counts_err)
dof=len(th)-len(pfit)
print 'chisq',chisqr
if 0:
oparam=scipy.odr.Model(gauss)
mydatao=scipy.odr.RealData(th,counts,sx=None,sy=counts_err)
myodr = scipy.odr.ODR(mydatao, oparam, beta0=pfit)
myoutput=myodr.run()
myoutput.pprint()
pfit=myoutput.beta
if 1:
print 'mpfit'
p0=pfit
parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
parinfo=[]
for i in range(len(p0)):
parinfo.append(copy.deepcopy(parbase))
for i in range(len(p0)):
parinfo[i]['value']=p0[i]
fa = {'x':th, 'y':counts, 'err':counts_err}
#parinfo[1]['fixed']=1
#parinfo[2]['fixed']=1
m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
if (m.status <= 0):
print 'error message = ', m.errmsg
params=m.params
pfit=params
perror=m.perror
#chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
chisqr=chisq(pfit,th,counts,counts_err)
#.........这里部分代码省略.........
示例3: NLP
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import iprint [as 别名]
upperm=N.ones(len(p0))
if 1:
p = NLP(Entropy, p0, maxIter = 1e3, maxFunEvals = 1e5)
if 0:
p = NLP(chisq, p0, maxIter = 1e3, maxFunEvals = 1e5)
if 0:
p = NLP(max_wrap, p0, maxIter = 1e3, maxFunEvals = 1e5)
if 0:
p.lb=lowerm
p.ub=upperm
p.args.f=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
p.plot = 0
p.iprint = 1
p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6
# for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gradtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
#print 'maxiter', p.maxiter
#print 'maxfun', p.maxfun
p.maxIter=50
# p.maxfun=100
#p.df_iter = 50
p.maxTime = 4000
h_args=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
示例4: help
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import iprint [as 别名]
# see also: help(NLP) -> maxTime, maxCPUTime, ftol and xtol
# that are connected to / used in lincher and some other solvers
# optional: check of user-supplied derivatives
p.checkdf()
p.checkdc()
p.checkdh()
# last but not least:
# please don't forget,
# Python indexing starts from ZERO!!
p.plot = 0
p.iprint = 0
p.df_iter = 4
p.maxTime = 4000
p.debug=1
#r = p.solve('algencan')
r = p.solve('ralg')
#r = p.solve('lincher')
"""
typical output:
OpenOpt checks user-supplied gradient df (size: (50,))
according to:
prob.diffInt = 1e-07
prob.check.maxViolation = 1e-05
max(abs(df_user - df_numerical)) = 2.50111104094e-06
示例5: test
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import iprint [as 别名]
def test(complexity=0, **kwargs):
n = 15 * (complexity+1)
x0 = 15*cos(arange(n)) + 8
f = lambda x: ((x-15)**2).sum()
df = lambda x: 2*(x-15)
c = lambda x: [2* x[0] **4-32, x[1]**2+x[2]**2 - 8]
# dc(x)/dx: non-lin ineq constraints gradients (optional):
def dc(x):
r = zeros((len(c(x0)), n))
r[0,0] = 2 * 4 * x[0]**3
r[1,1] = 2 * x[1]
r[1,2] = 2 * x[2]
return r
hp = 2
h1 = lambda x: (x[-1]-13)**hp
h2 = lambda x: (x[-2]-17)**hp
h = lambda x:[h1(x), h2(x)]
# dh(x)/dx: non-lin eq constraints gradients (optional):
def dh(x):
r = zeros((2, n))
r[0, -1] = hp*(x[-1]-13)**(hp-1)
r[1, -2] = hp*(x[-2]-17)**(hp-1)
return r
lb = -8*ones(n)
ub = 15*ones(n)+8*cos(arange(n))
ind = 3
A = zeros((2, n))
A[0, ind:ind+2] = 1
A[1, ind+2:ind+4] = 1
b = [15, 8]
Aeq = zeros(n)
Aeq[ind+4:ind+8] = 1
beq = 45
########################################################
colors = ['b', 'k', 'y', 'g', 'r']
#solvers = ['ipopt', 'ralg','scipy_cobyla']
solvers = ['ralg','scipy_slsqp', 'ipopt']
solvers = [ 'ralg', 'scipy_slsqp']
solvers = [ 'ralg']
solvers = [ 'r2']
solvers = [ 'ralg', 'scipy_slsqp']
########################################################
for i, solver in enumerate(solvers):
p = NLP(f, x0, df=df, c=c, h=h, dc=dc, dh=dh, lb=lb, ub=ub, A=A, b=b, Aeq=Aeq, beq=beq, maxIter = 1e4, \
show = solver==solvers[-1], color=colors[i], **kwargs )
if not kwargs.has_key('iprint'): p.iprint = -1
# p.checkdf()
# p.checkdc()
# p.checkdh()
r = p.solve(solver)
if r.istop>0: return True, r, p
else: return False, r, p
示例6: fitpeak
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import iprint [as 别名]
def fitpeak(x,y,yerr):
maxval=x.max()
minval=x.min()
diff=y.max()-y.min()-y.mean()
sig=y.std()
print 'diff',diff,'std',sig
if diff-1*sig>0:
#the difference between the high and low point and
#the mean is greater than 3 sigma so we have a signal
p0=findpeak(x,y,2)
print 'p0',p0
#Area center width Bak area2 center2 width2
center1=p0[0]
width1=p0[1]
center2=p0[2]
width2=p0[3]
sigma=width/2/N.sqrt(2*N.log(2))
ymax=maxval-minval
area=ymax*(N.sqrt(2*pi)*sigma)
print 'ymax',ymax
pin=[area,center1,width1,0,area,center2,width2]
if 1:
p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
#p.lb=lowerm
#p.ub=upperm
p.args.f=(x,y,yerr)
p.plot = 0
p.iprint = 1
p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6
# for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gradtol means norm of projected gradient of the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
#print 'maxiter', p.maxiter
#print 'maxfun', p.maxfun
p.maxIter=50
# p.maxfun=100
#p.df_iter = 50
p.maxTime = 4000
#r=p.solve('scipy_cobyla')
#r=p.solve('scipy_lbfgsb')
#r = p.solve('algencan')
print 'ralg'
r = p.solve('ralg')
print 'done'
pfit=r.xf
print 'pfit openopt',pfit
print 'r dict', r.__dict__
if 1:
print 'mpfit'
p0=pfit
parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
parinfo=[]
for i in range(len(p0)):
parinfo.append(copy.deepcopy(parbase))
for i in range(len(p0)):
parinfo[i]['value']=p0[i]
fa = {'x':x, 'y':y, 'err':yerr}
#parinfo[1]['fixed']=1
#parinfo[2]['fixed']=1
m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
if (m.status <= 0):
print 'error message = ', m.errmsg
params=m.params
pfit=params
perror=m.perror
#chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
chisqr=chisq(pfit,x,y,yerr)
dof=m.dof
#Icalc=gauss(pfit,th)
#print 'mpfit chisqr', chisqr
ycalc=gauss(pfit,x)
if 1:
width_x=N.linspace(p0[0]-p0[1],p0[0]+p0[1],100)
width_y=N.ones(width_x.shape)*(maxval-minval)/2
pos_y=N.linspace(minval,maxval,100)
pos_x=N.ones(pos_y.shape)*p0[0]
if 0:
pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
pylab.plot(width_x,width_y)
pylab.plot(pos_x,pos_y)
pylab.plot(x,ycalc)
pylab.show()
else:
#fix center
#fix width
print 'no peak'
#Area center width Bak
area=0
#.........这里部分代码省略.........
示例7: solve
# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import iprint [as 别名]
def solve(self):
p=NLP(self.cost_function,self.initial_solution,Aeq=self.Aeq,beq=self.beq,A=self.A,b=self.b,lb=self.lb,ub=self.ub)
p.iprint = -1
r=p.solve('ralg')
return [r.xf,r.ff]
开发者ID:drvinceknight,项目名称:Partial_Sum_Transportation_Polytope,代码行数:7,代码来源:Partial_Sum_Transportation_Problem.py