当前位置: 首页>>代码示例>>Python>>正文


Python NLP.maxIter方法代码示例

本文整理汇总了Python中openopt.NLP.maxIter方法的典型用法代码示例。如果您正苦于以下问题:Python NLP.maxIter方法的具体用法?Python NLP.maxIter怎么用?Python NLP.maxIter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在openopt.NLP的用法示例。


在下文中一共展示了NLP.maxIter方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: fit_node

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import maxIter [as 别名]
    def fit_node(self,index):
        qnode=self.qlist[index]
        print qnode.q
        th=qnode.th_condensed['a3']
        counts=qnode.th_condensed['counts']
        counts_err=qnode.th_condensed['counts_err']
        print qnode.th_condensed['counts'].std()
        print qnode.th_condensed['counts'].mean()
        maxval=qnode.th_condensed['counts'].max()
        minval=qnode.th_condensed['counts'].min()
        diff=qnode.th_condensed['counts'].max()-qnode.th_condensed['counts'].min()\
            -qnode.th_condensed['counts'].mean()
        sig=qnode.th_condensed['counts'].std()

        if diff-2*sig>0:
            #the difference between the high and low point and
            #the mean is greater than 3 sigma so we have a signal
            p0=findpeak(th,counts,1)
            print 'p0',p0
            #Area center width Bak
            center=p0[0]
            width=p0[1]
            sigma=width/2/N.sqrt(2*N.log(2))
            Imax=maxval-minval
            area=Imax*(N.sqrt(2*pi)*sigma)
            print 'Imax',Imax
            pin=[area,center,width,0]





            if 1:
                p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
                #p.lb=lowerm
                #p.ub=upperm
                p.args.f=(th,counts,counts_err)
                p.plot = 0
                p.iprint = 1
                p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

    # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
    # (except maxfun, maxiter)
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
                p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
                p.maxIter=50
    #    p.maxfun=100

        #p.df_iter = 50
                p.maxTime = 4000
        #r=p.solve('scipy_cobyla')
            #r=p.solve('scipy_lbfgsb')
                #r = p.solve('algencan')
                print 'ralg'
                r = p.solve('ralg')
                print 'done'
                pfit=r.xf
                print 'pfit openopt',pfit
                print 'r dict', r.__dict__

            if 0:
                print 'curvefit'
                print sys.executable
                pfit,popt=curve_fit(gauss2, th, counts, p0=pfit, sigma=counts_err)
                print 'p,popt', pfit,popt
                perror=N.sqrt(N.diag(popt))
                print 'perror',perror
                chisqr=chisq(pfit,th,counts,counts_err)
                dof=len(th)-len(pfit)
                print 'chisq',chisqr
            if 0:
                oparam=scipy.odr.Model(gauss)
                mydatao=scipy.odr.RealData(th,counts,sx=None,sy=counts_err)
                myodr = scipy.odr.ODR(mydatao, oparam, beta0=pfit)
                myoutput=myodr.run()
                myoutput.pprint()
                pfit=myoutput.beta
            if 1:
                print 'mpfit'
                p0=pfit
                parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
                parinfo=[]
                for i in range(len(p0)):
                    parinfo.append(copy.deepcopy(parbase))
                for i in range(len(p0)):
                    parinfo[i]['value']=p0[i]
                fa = {'x':th, 'y':counts, 'err':counts_err}
                #parinfo[1]['fixed']=1
                #parinfo[2]['fixed']=1
                m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
                if (m.status <= 0):
                    print 'error message = ', m.errmsg
                params=m.params
                pfit=params
                perror=m.perror
                #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
                chisqr=chisq(pfit,th,counts,counts_err)
#.........这里部分代码省略.........
开发者ID:reflectometry,项目名称:WRed,代码行数:103,代码来源:classify_files_structure.py

示例2: fitpeak

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import maxIter [as 别名]
def fitpeak(x,y,yerr):
    maxval=x.max()
    minval=x.min()
    diff=y.max()-y.min()-y.mean()
    sig=y.std()
    print 'diff',diff,'std',sig
    if diff-1*sig>0:
        #the difference between the high and low point and
        #the mean is greater than 3 sigma so we have a signal
        p0=findpeak(x,y,2)
        print 'p0',p0
        #Area center width Bak area2 center2 width2
        center1=p0[0]
        width1=p0[1]
        center2=p0[2]
        width2=p0[3]
        sigma=width/2/N.sqrt(2*N.log(2))
        ymax=maxval-minval
        area=ymax*(N.sqrt(2*pi)*sigma)
        print 'ymax',ymax
        pin=[area,center1,width1,0,area,center2,width2]





        if 1:
            p = NLP(chisq, pin, maxIter = 1e3, maxFunEvals = 1e5)
            #p.lb=lowerm
            #p.ub=upperm
            p.args.f=(x,y,yerr)
            p.plot = 0
            p.iprint = 1
            p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

# for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
# (except maxfun, maxiter)
# Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
# so it should be something like 1e-3...1e-5
            p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
    #print 'maxiter', p.maxiter
    #print 'maxfun', p.maxfun
            p.maxIter=50
#    p.maxfun=100

    #p.df_iter = 50
            p.maxTime = 4000
    #r=p.solve('scipy_cobyla')
        #r=p.solve('scipy_lbfgsb')
            #r = p.solve('algencan')
            print 'ralg'
            r = p.solve('ralg')
            print 'done'
            pfit=r.xf
            print 'pfit openopt',pfit
            print 'r dict', r.__dict__
        if 1: 
            print 'mpfit'
            p0=pfit
            parbase={'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
            parinfo=[]
            for i in range(len(p0)):
                parinfo.append(copy.deepcopy(parbase))
            for i in range(len(p0)): 
                parinfo[i]['value']=p0[i]
            fa = {'x':x, 'y':y, 'err':yerr}
            #parinfo[1]['fixed']=1
            #parinfo[2]['fixed']=1
            m = mpfit(myfunct_res, p0, parinfo=parinfo,functkw=fa)
            if (m.status <= 0): 
                print 'error message = ', m.errmsg
            params=m.params
            pfit=params
            perror=m.perror
            #chisqr=(myfunct_res(m.params, x=th, y=counts, err=counts_err)[1]**2).sum()
            chisqr=chisq(pfit,x,y,yerr)
            dof=m.dof
            #Icalc=gauss(pfit,th)
            #print 'mpfit chisqr', chisqr
        ycalc=gauss(pfit,x)

        if 1:
            width_x=N.linspace(p0[0]-p0[1],p0[0]+p0[1],100)
            width_y=N.ones(width_x.shape)*(maxval-minval)/2
            pos_y=N.linspace(minval,maxval,100)
            pos_x=N.ones(pos_y.shape)*p0[0]
            if 0:
                
                pylab.errorbar(th,counts,counts_err,marker='s',linestyle='None',mfc='black',mec='black',ecolor='black')
                pylab.plot(width_x,width_y)
                pylab.plot(pos_x,pos_y)
                pylab.plot(x,ycalc)
                pylab.show()

    else:
        #fix center
        #fix width
        print 'no peak'
        #Area center width Bak
        area=0
#.........这里部分代码省略.........
开发者ID:liuhuiwisdom,项目名称:WRed,代码行数:103,代码来源:srfe2as2_caxis.py

示例3: criterium

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import maxIter [as 别名]
    if 0:
        p.lb=lowerm
        p.ub=upperm
        p.args.f=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)
        p.plot = 0
        p.iprint = 1
        p.contol = 1e-5#3 # required constraints tolerance, default for NLP is 1e-6

    # for ALGENCAN solver gradtol is the only one stop criterium connected to openopt
    # (except maxfun, maxiter)
    # Note that in ALGENCAN gradtol means norm of projected gradient of  the Augmented Lagrangian
    # so it should be something like 1e-3...1e-5
        p.gradtol = 1e-5#5 # gradient stop criterium (default for NLP is 1e-6)
        #print 'maxiter', p.maxiter
        #print 'maxfun', p.maxfun
        p.maxIter=50
    #    p.maxfun=100

        #p.df_iter = 50
        p.maxTime = 4000
        h_args=(h,k,l,fq,fqerr,x,z,cosmat_list,coslist,flist)

        if 0:
            #p.h=[pos_sum,neg_sum]
            p.h=[pos_sum,neg_sum]
            p.c=[chisq]
    #    p.h=[pos_sum,neg_sum]
            p.args.h=h_args
            p.args.c=h_args
            p.dh=[pos_sum_grad,neg_sum_grad]
            p.df=chisq_grad
开发者ID:reflectometry,项目名称:WRed,代码行数:33,代码来源:maxent_test_iter2.py

示例4: range

# 需要导入模块: from openopt import NLP [as 别名]
# 或者: from openopt.NLP import maxIter [as 别名]
colors = colors[:len(solvers)]
lines, results = [], {}
for j in range(len(solvers)):
    solver = solvers[j]
    color = colors[j]
    p = NLP(f, x0, name = 'bench2', df = df, c=c, dc = dc, h=h, dh = dh, lb = lb, ub = ub, gtol=gtol, ftol = ftol, maxFunEvals = 1e7, maxIter = maxIter, maxTime = maxTime,  plot = 1, color = color, iprint = 10, legend = [solvers[j]], show=False,  contol = contol)
#    p = NLP(f, x0, name = 'bench2', df = df, c=c, dc = dc, lb = lb, ub = ub, gtol=gtol, ftol = ftol, maxFunEvals = 1e7, maxIter = 1e4, maxTime = maxTime,  plot = 1, color = color, iprint = 0, legend = [solvers[j]], show=False,  contol = contol)
    if solver[:4] == ['ralg']:
        pass
#        p.gtol = 1e-8
#        p.ftol = 1e-7
#        p.xtol = 1e-7
    elif solver == 'lincher':
        #p.iprint = 1
        p.maxTime = 1e15
        p.maxIter = 100

##    p.check.df = 1
##    p.check.dc = 1
##    p.check.dh = 1
    r = p.solve(solver)
    for fn in ('h','c'):
        if not r.evals.has_key(fn): r.evals[fn]=0 # if no c or h are used in problem
    results[solver] = (r.ff, p.getMaxResidual(r.xf), r.elapsed['solver_time'], r.elapsed['solver_cputime'], r.evals['f'], r.evals['c'], r.evals['h'])
    subplot(2,1,1)
    F0 = asscalar(p.f(p.x0))
    lines.append(plot([0, 1e-15], [F0, F0], color= colors[j]))


for i in range(2):
    subplot(2,1,i+1)
开发者ID:AlbertHolmes,项目名称:openopt,代码行数:33,代码来源:nlp_bench_2.py


注:本文中的openopt.NLP.maxIter方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。