本文整理汇总了C#中minlbfgsstate类的典型用法代码示例。如果您正苦于以下问题:C# minlbfgsstate类的具体用法?C# minlbfgsstate怎么用?C# minlbfgsstate使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
minlbfgsstate类属于命名空间,在下文中一共展示了minlbfgsstate类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: minlbfgsiteration
/*************************************************************************
-- ALGLIB --
Copyright 20.03.2009 by Bochkanov Sergey
*************************************************************************/
public static bool minlbfgsiteration(minlbfgsstate state)
{
bool result = new bool();
int n = 0;
int m = 0;
int maxits = 0;
double epsf = 0;
double epsg = 0;
double epsx = 0;
int i = 0;
int j = 0;
int ic = 0;
int mcinfo = 0;
double v = 0;
double vv = 0;
int i_ = 0;
//
// Reverse communication preparations
// I know it looks ugly, but it works the same way
// anywhere from C++ to Python.
//
// This code initializes locals by:
// * random values determined during code
// generation - on first subroutine call
// * values from previous call - on subsequent calls
//
if( state.rstate.stage>=0 )
{
n = state.rstate.ia[0];
m = state.rstate.ia[1];
maxits = state.rstate.ia[2];
i = state.rstate.ia[3];
j = state.rstate.ia[4];
ic = state.rstate.ia[5];
mcinfo = state.rstate.ia[6];
epsf = state.rstate.ra[0];
epsg = state.rstate.ra[1];
epsx = state.rstate.ra[2];
v = state.rstate.ra[3];
vv = state.rstate.ra[4];
}
else
{
n = -983;
m = -989;
maxits = -834;
i = 900;
j = -287;
ic = 364;
mcinfo = 214;
epsf = -338;
epsg = -686;
epsx = 912;
v = 585;
vv = 497;
}
if( state.rstate.stage==0 )
{
goto lbl_0;
}
if( state.rstate.stage==1 )
{
goto lbl_1;
}
if( state.rstate.stage==2 )
{
goto lbl_2;
}
if( state.rstate.stage==3 )
{
goto lbl_3;
}
//
// Routine body
//
//
// Unload frequently used variables from State structure
// (just for typing convinience)
//
n = state.n;
m = state.m;
epsg = state.epsg;
epsf = state.epsf;
epsx = state.epsx;
maxits = state.maxits;
state.repterminationtype = 0;
state.repiterationscount = 0;
state.repnfev = 0;
//
// Calculate F/G at the initial point
//.........这里部分代码省略.........
示例2: problems
/*************************************************************************
This subroutine restarts LBFGS algorithm from new point. All optimization
parameters are left unchanged.
This function allows to solve multiple optimization problems (which
must have same number of dimensions) without object reallocation penalty.
INPUT PARAMETERS:
State - structure used to store algorithm state
X - new starting point.
-- ALGLIB --
Copyright 30.07.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgsrestartfrom(minlbfgsstate state,
double[] x)
{
int i_ = 0;
alglib.ap.assert(alglib.ap.len(x)>=state.n, "MinLBFGSRestartFrom: Length(X)<N!");
alglib.ap.assert(apserv.isfinitevector(x, state.n), "MinLBFGSRestartFrom: X contains infinite or NaN values!");
for(i_=0; i_<=state.n-1;i_++)
{
state.x[i_] = x[i_];
}
state.rstate.ia = new int[5+1];
state.rstate.ra = new double[1+1];
state.rstate.stage = -1;
clearrequestfields(state);
}
示例3: N
/*************************************************************************
This subroutine turns on verification of the user-supplied analytic
gradient:
* user calls this subroutine before optimization begins
* MinLBFGSOptimize() is called
* prior to actual optimization, for each component of parameters being
optimized X[i] algorithm performs following steps:
* two trial steps are made to X[i]-TestStep*S[i] and X[i]+TestStep*S[i],
where X[i] is i-th component of the initial point and S[i] is a scale
of i-th parameter
* if needed, steps are bounded with respect to constraints on X[]
* F(X) is evaluated at these trial points
* we perform one more evaluation in the middle point of the interval
* we build cubic model using function values and derivatives at trial
points and we compare its prediction with actual value in the middle
point
* in case difference between prediction and actual value is higher than
some predetermined threshold, algorithm stops with completion code -7;
Rep.VarIdx is set to index of the parameter with incorrect derivative.
* after verification is over, algorithm proceeds to the actual optimization.
NOTE 1: verification needs N (parameters count) gradient evaluations. It
is very costly and you should use it only for low dimensional
problems, when you want to be sure that you've correctly
calculated analytic derivatives. You should not use it in the
production code (unless you want to check derivatives provided by
some third party).
NOTE 2: you should carefully choose TestStep. Value which is too large
(so large that function behaviour is significantly non-cubic) will
lead to false alarms. You may use different step for different
parameters by means of setting scale with MinLBFGSSetScale().
NOTE 3: this function may lead to false positives. In case it reports that
I-th derivative was calculated incorrectly, you may decrease test
step and try one more time - maybe your function changes too
sharply and your step is too large for such rapidly chanding
function.
INPUT PARAMETERS:
State - structure used to store algorithm state
TestStep - verification step:
* TestStep=0 turns verification off
* TestStep>0 activates verification
-- ALGLIB --
Copyright 24.05.2012 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgssetgradientcheck(minlbfgsstate state,
double teststep)
{
alglib.ap.assert(math.isfinite(teststep), "MinLBFGSSetGradientCheck: TestStep contains NaN or Infinite");
alglib.ap.assert((double)(teststep)>=(double)(0), "MinLBFGSSetGradientCheck: invalid argument TestStep(TestStep<0)");
state.teststep = teststep;
}
示例4: O
/*************************************************************************
This function sets exact low-rank preconditioner for Hessian matrix
H=D+W'*C*W, where:
* H is a Hessian matrix, which is approximated by D/W/C
* D is a NxN diagonal positive definite matrix
* W is a KxN low-rank correction
* C is a KxK semidefinite diagonal factor of low-rank correction
This preconditioner is exact but slow - it requires O(N*K^2) time to be
built and O(N*K) time to be applied. Woodbury matrix identity is used to
build inverse matrix.
-- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgssetpreclowrankexact(minlbfgsstate state,
double[] d,
double[] c,
double[,] w,
int cnt)
{
state.prectype = 5;
optserv.preparelowrankpreconditioner(d, c, w, state.n, cnt, state.lowrankbuf);
}
示例5: MinLBFGSSetGradientCheck
/*************************************************************************
L-BFGS algorithm results
INPUT PARAMETERS:
State - algorithm state
OUTPUT PARAMETERS:
X - array[0..N-1], solution
Rep - optimization report:
* Rep.TerminationType completetion code:
* -8 internal integrity control detected infinite
or NAN values in function/gradient. Abnormal
termination signalled.
* -7 gradient verification failed.
See MinLBFGSSetGradientCheck() for more information.
* -2 rounding errors prevent further improvement.
X contains best point found.
* -1 incorrect parameters were specified
* 1 relative function improvement is no more than
EpsF.
* 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken
* 7 stopping conditions are too stringent,
further improvement is impossible
* 8 terminated by user who called minlbfgsrequesttermination().
X contains point which was "current accepted" when
termination request was submitted.
* Rep.IterationsCount contains iterations count
* NFEV countains number of function calculations
-- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgsresults(minlbfgsstate state,
ref double[] x,
minlbfgsreport rep)
{
x = new double[0];
minlbfgsresultsbuf(state, ref x, rep);
}
示例6: P
/*************************************************************************
Modification of the preconditioner: Cholesky factorization of approximate
Hessian is used.
INPUT PARAMETERS:
State - structure which stores algorithm state
P - triangular preconditioner, Cholesky factorization of
the approximate Hessian. array[0..N-1,0..N-1],
(if larger, only leading N elements are used).
IsUpper - whether upper or lower triangle of P is given
(other triangle is not referenced)
After call to this function preconditioner is changed to P (P is copied
into the internal buffer).
NOTE: you can change preconditioner "on the fly", during algorithm
iterations.
NOTE 2: P should be nonsingular. Exception will be thrown otherwise.
-- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgssetpreccholesky(minlbfgsstate state,
double[,] p,
bool isupper)
{
int i = 0;
double mx = 0;
alglib.ap.assert(apserv.isfinitertrmatrix(p, state.n, isupper), "MinLBFGSSetPrecCholesky: P contains infinite or NAN values!");
mx = 0;
for(i=0; i<=state.n-1; i++)
{
mx = Math.Max(mx, Math.Abs(p[i,i]));
}
alglib.ap.assert((double)(mx)>(double)(0), "MinLBFGSSetPrecCholesky: P is strictly singular!");
if( alglib.ap.rows(state.denseh)<state.n || alglib.ap.cols(state.denseh)<state.n )
{
state.denseh = new double[state.n, state.n];
}
state.prectype = 1;
if( isupper )
{
ablas.rmatrixcopy(state.n, state.n, p, 0, 0, ref state.denseh, 0, 0);
}
else
{
ablas.rmatrixtranspose(state.n, state.n, p, 0, 0, state.denseh, 0, 0);
}
}
示例7: MinLBFGSSetScale
/*************************************************************************
Modification of the preconditioner: scale-based diagonal preconditioning.
This preconditioning mode can be useful when you don't have approximate
diagonal of Hessian, but you know that your variables are badly scaled
(for example, one variable is in [1,10], and another in [1000,100000]),
and most part of the ill-conditioning comes from different scales of vars.
In this case simple scale-based preconditioner, with H[i] = 1/(s[i]^2),
can greatly improve convergence.
IMPRTANT: you should set scale of your variables with MinLBFGSSetScale()
call (before or after MinLBFGSSetPrecScale() call). Without knowledge of
the scale of your variables scale-based preconditioner will be just unit
matrix.
INPUT PARAMETERS:
State - structure which stores algorithm state
-- ALGLIB --
Copyright 13.10.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgssetprecscale(minlbfgsstate state)
{
state.prectype = 3;
}
示例8: minlbfgsresultsbuf
/*************************************************************************
L-BFGS algorithm results
Buffered implementation of MinLBFGSResults which uses pre-allocated buffer
to store X[]. If buffer size is too small, it resizes buffer. It is
intended to be used in the inner cycles of performance critical algorithms
where array reallocation penalty is too large to be ignored.
-- ALGLIB --
Copyright 20.08.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgsresultsbuf(minlbfgsstate state, ref double[] x, minlbfgsreport rep)
{
minlbfgs.minlbfgsresultsbuf(state.innerobj, ref x, rep.innerobj);
return;
}
示例9: problems
/*************************************************************************
This subroutine restarts LBFGS algorithm from new point. All optimization
parameters are left unchanged.
This function allows to solve multiple optimization problems (which
must have same number of dimensions) without object reallocation penalty.
INPUT PARAMETERS:
State - structure used to store algorithm state
X - new starting point.
-- ALGLIB --
Copyright 30.07.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgsrestartfrom(minlbfgsstate state, double[] x)
{
minlbfgs.minlbfgsrestartfrom(state.innerobj, x);
return;
}
示例10: function
/*************************************************************************
This family of functions is used to launcn iterations of nonlinear optimizer
These functions accept following parameters:
grad - callback which calculates function (or merit function)
value func and gradient grad at given point x
rep - optional callback which is called after each iteration
can be null
obj - optional object which is passed to func/grad/hess/jac/rep
can be null
-- ALGLIB --
Copyright 20.03.2009 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgsoptimize(minlbfgsstate state, ndimensional_grad grad, ndimensional_rep rep, object obj)
{
if( grad==null )
throw new alglibexception("ALGLIB: error in 'minlbfgsoptimize()' (grad is null)");
while( alglib.minlbfgsiteration(state) )
{
if( state.needfg )
{
grad(state.x, ref state.innerobj.f, state.innerobj.g, obj);
continue;
}
if( state.innerobj.xupdated )
{
if( rep!=null )
rep(state.innerobj.x, state.innerobj.f, obj);
continue;
}
throw new alglibexception("ALGLIB: error in 'minlbfgsoptimize' (some derivatives were not provided?)");
}
}
示例11: minlbfgsresults
/*************************************************************************
L-BFGS algorithm results
INPUT PARAMETERS:
State - algorithm state
OUTPUT PARAMETERS:
X - array[0..N-1], solution
Rep - optimization report:
* Rep.TerminationType completetion code:
* -2 rounding errors prevent further improvement.
X contains best point found.
* -1 incorrect parameters were specified
* 1 relative function improvement is no more than
EpsF.
* 2 relative step is no more than EpsX.
* 4 gradient norm is no more than EpsG
* 5 MaxIts steps was taken
* 7 stopping conditions are too stringent,
further improvement is impossible
* Rep.IterationsCount contains iterations count
* NFEV countains number of function calculations
-- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgsresults(minlbfgsstate state, out double[] x, out minlbfgsreport rep)
{
x = new double[0];
rep = new minlbfgsreport();
minlbfgs.minlbfgsresults(state.innerobj, ref x, rep.innerobj);
return;
}
示例12: exp
/*************************************************************************
This function sets maximum step length
INPUT PARAMETERS:
State - structure which stores algorithm state
StpMax - maximum step length, >=0. Set StpMax to 0.0 (default), if
you don't want to limit step length.
Use this subroutine when you optimize target function which contains exp()
or other fast growing functions, and optimization algorithm makes too
large steps which leads to overflow. This function allows us to reject
steps that are too large (and therefore expose us to the possible
overflow) without actually calculating function value at the x+stp*d.
-- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgssetstpmax(minlbfgsstate state, double stpmax)
{
minlbfgs.minlbfgssetstpmax(state.innerobj, stpmax);
return;
}
示例13: rep
/*************************************************************************
This function turns on/off reporting.
INPUT PARAMETERS:
State - structure which stores algorithm state
NeedXRep- whether iteration reports are needed or not
If NeedXRep is True, algorithm will call rep() callback function if it is
provided to MinLBFGSOptimize().
-- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgssetxrep(minlbfgsstate state, bool needxrep)
{
minlbfgs.minlbfgssetxrep(state.innerobj, needxrep);
return;
}
示例14: minlbfgscreatex
/*************************************************************************
Extended subroutine for internal use only.
Accepts additional parameters:
Flags - additional settings:
* Flags = 0 means no additional settings
* Flags = 1 "do not allocate memory". used when solving
a many subsequent tasks with same N/M values.
First call MUST be without this flag bit set,
subsequent calls of MinLBFGS with same
MinLBFGSState structure can set Flags to 1.
DiffStep - numerical differentiation step
-- ALGLIB --
Copyright 02.04.2010 by Bochkanov Sergey
*************************************************************************/
public static void minlbfgscreatex(int n,
int m,
double[] x,
int flags,
double diffstep,
minlbfgsstate state)
{
bool allocatemem = new bool();
int i = 0;
alglib.ap.assert(n>=1, "MinLBFGS: N too small!");
alglib.ap.assert(m>=1, "MinLBFGS: M too small!");
alglib.ap.assert(m<=n, "MinLBFGS: M too large!");
//
// Initialize
//
state.teststep = 0;
state.diffstep = diffstep;
state.n = n;
state.m = m;
allocatemem = flags%2==0;
flags = flags/2;
if( allocatemem )
{
state.rho = new double[m];
state.theta = new double[m];
state.yk = new double[m, n];
state.sk = new double[m, n];
state.d = new double[n];
state.xp = new double[n];
state.x = new double[n];
state.s = new double[n];
state.g = new double[n];
state.work = new double[n];
}
minlbfgssetcond(state, 0, 0, 0, 0);
minlbfgssetxrep(state, false);
minlbfgssetstpmax(state, 0);
minlbfgsrestartfrom(state, x);
for(i=0; i<=n-1; i++)
{
state.s[i] = 1.0;
}
state.prectype = 0;
}
示例15: make_copy
public override alglib.apobject make_copy()
{
minlbfgsstate _result = new minlbfgsstate();
_result.n = n;
_result.m = m;
_result.epsg = epsg;
_result.epsf = epsf;
_result.epsx = epsx;
_result.maxits = maxits;
_result.xrep = xrep;
_result.stpmax = stpmax;
_result.s = (double[])s.Clone();
_result.diffstep = diffstep;
_result.nfev = nfev;
_result.mcstage = mcstage;
_result.k = k;
_result.q = q;
_result.p = p;
_result.rho = (double[])rho.Clone();
_result.yk = (double[,])yk.Clone();
_result.sk = (double[,])sk.Clone();
_result.xp = (double[])xp.Clone();
_result.theta = (double[])theta.Clone();
_result.d = (double[])d.Clone();
_result.stp = stp;
_result.work = (double[])work.Clone();
_result.fold = fold;
_result.trimthreshold = trimthreshold;
_result.prectype = prectype;
_result.gammak = gammak;
_result.denseh = (double[,])denseh.Clone();
_result.diagh = (double[])diagh.Clone();
_result.precc = (double[])precc.Clone();
_result.precd = (double[])precd.Clone();
_result.precw = (double[,])precw.Clone();
_result.preck = preck;
_result.precbuf = (optserv.precbuflbfgs)precbuf.make_copy();
_result.lowrankbuf = (optserv.precbuflowrank)lowrankbuf.make_copy();
_result.fbase = fbase;
_result.fm2 = fm2;
_result.fm1 = fm1;
_result.fp1 = fp1;
_result.fp2 = fp2;
_result.autobuf = (double[])autobuf.Clone();
_result.x = (double[])x.Clone();
_result.f = f;
_result.g = (double[])g.Clone();
_result.needf = needf;
_result.needfg = needfg;
_result.xupdated = xupdated;
_result.userterminationneeded = userterminationneeded;
_result.teststep = teststep;
_result.rstate = (rcommstate)rstate.make_copy();
_result.repiterationscount = repiterationscount;
_result.repnfev = repnfev;
_result.repvaridx = repvaridx;
_result.repterminationtype = repterminationtype;
_result.lstate = (linmin.linminstate)lstate.make_copy();
return _result;
}