本文整理汇总了C#中mlpbase类的典型用法代码示例。如果您正苦于以下问题:C# mlpbase类的具体用法?C# mlpbase怎么用?C# mlpbase使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
mlpbase类属于命名空间,在下文中一共展示了mlpbase类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: dataset
/*************************************************************************
This function trains neural network passed to this function, using current
dataset (one which was passed to MLPSetDataset() or MLPSetSparseDataset())
and current training settings. Training from NRestarts random starting
positions is performed, best network is chosen.
Training is performed using current training algorithm.
INPUT PARAMETERS:
S - trainer object;
Network - neural network. It must have same number of inputs and
output/classes as was specified during creation of the
trainer object;
TNetwork - the training neural network.
User may look weights in parameter Network while
continue training process.
It has architecture like Network. You have to copy or
create new network with architecture like Network.
State - created LBFGS optimizer;
NRestarts - number of restarts, >=0:
* NRestarts>0 means that specified number of random
restarts are performed, best network is chosen after
training
* NRestarts=0 means that current state of the network
is used for training.
TrnSubset - some subset from training set(it stores row's numbers),
used as trainig set;
TrnSubsetSize- size of subset(if TrnSubsetSize<0 - used full dataset);
when TrnSubsetSize=0, network is filled by zero value,
and ValSubset parameter is IGNORED;
ValSubset - some subset from training set(it stores row's numbers),
used as validation set;
ValSubsetSize- size of subset(if ValSubsetSize<0 - used full dataset);
when ValSubsetSize<>0 this mean that is used early
stopping training algorithm;
BufWBest - buffer for storing interim resuls (BufWBest[0:WCOunt-1]
it has be allocated by user);
BufWFinal - buffer for storing interim resuls(BufWFinal[0:WCOunt-1]
it has be allocated by user).
OUTPUT PARAMETERS:
Network - trained network;
Rep - training report.
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
network is filled by zero values. Same behavior for functions
MLPStartTraining and MLPContinueTraining.
NOTE: this method uses sum-of-squares error function for training.
-- ALGLIB --
Copyright 13.08.2012 by Bochkanov Sergey
*************************************************************************/
private static void mlptrainnetworkx(mlptrainer s,
mlpbase.multilayerperceptron network,
mlpbase.multilayerperceptron tnetwork,
minlbfgs.minlbfgsstate state,
int nrestarts,
int[] trnsubset,
int trnsubsetsize,
int[] valsubset,
int valsubsetsize,
double[] bufwbest,
double[] bufwfinal,
mlpreport rep)
{
mlpbase.modelerrors modrep = new mlpbase.modelerrors();
double eval = 0;
double v = 0;
double ebestcur = 0;
double efinal = 0;
int ngradbatch = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
int twcount = 0;
int itbest = 0;
int itcnt = 0;
int ntype = 0;
int ttype = 0;
bool rndstart = new bool();
int pass = 0;
int i = 0;
int i_ = 0;
alglib.ap.assert(s.npoints>=0, "MLPTrainNetworkX: internal error - parameter S is not initialized or is spoiled(S.NPoints<0)");
if( s.rcpar )
{
ttype = 0;
}
else
{
ttype = 1;
}
if( !mlpbase.mlpissoftmax(network) )
{
ntype = 0;
}
else
{
//.........这里部分代码省略.........
示例2: mlpkfoldcvlbfgs
/*************************************************************************
Cross-validation estimate of generalization error.
Base algorithm - L-BFGS.
INPUT PARAMETERS:
Network - neural network with initialized geometry. Network is
not changed during cross-validation - it is used only
as a representative of its architecture.
XY - training set.
SSize - training set size
Decay - weight decay, same as in MLPTrainLBFGS
Restarts - number of restarts, >0.
restarts are counted for each partition separately, so
total number of restarts will be Restarts*FoldsCount.
WStep - stopping criterion, same as in MLPTrainLBFGS
MaxIts - stopping criterion, same as in MLPTrainLBFGS
FoldsCount - number of folds in k-fold cross-validation,
2<=FoldsCount<=SSize.
recommended value: 10.
OUTPUT PARAMETERS:
Info - return code, same as in MLPTrainLBFGS
Rep - report, same as in MLPTrainLM/MLPTrainLBFGS
CVRep - generalization error estimates
-- ALGLIB --
Copyright 09.12.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlpkfoldcvlbfgs(mlpbase.multilayerperceptron network,
double[,] xy,
int npoints,
double decay,
int restarts,
double wstep,
int maxits,
int foldscount,
ref int info,
mlpreport rep,
mlpcvreport cvrep)
{
info = 0;
mlpkfoldcvgeneral(network, xy, npoints, decay, restarts, foldscount, false, wstep, maxits, ref info, rep, cvrep);
}
示例3: mlpkfoldcvgeneral
/*************************************************************************
Internal cross-validation subroutine
*************************************************************************/
private static void mlpkfoldcvgeneral(mlpbase.multilayerperceptron n,
double[,] xy,
int npoints,
double decay,
int restarts,
int foldscount,
bool lmalgorithm,
double wstep,
int maxits,
ref int info,
mlpreport rep,
mlpcvreport cvrep)
{
int i = 0;
int fold = 0;
int j = 0;
int k = 0;
mlpbase.multilayerperceptron network = new mlpbase.multilayerperceptron();
int nin = 0;
int nout = 0;
int rowlen = 0;
int wcount = 0;
int nclasses = 0;
int tssize = 0;
int cvssize = 0;
double[,] cvset = new double[0,0];
double[,] testset = new double[0,0];
int[] folds = new int[0];
int relcnt = 0;
mlpreport internalrep = new mlpreport();
double[] x = new double[0];
double[] y = new double[0];
int i_ = 0;
info = 0;
//
// Read network geometry, test parameters
//
mlpbase.mlpproperties(n, ref nin, ref nout, ref wcount);
if( mlpbase.mlpissoftmax(n) )
{
nclasses = nout;
rowlen = nin+1;
}
else
{
nclasses = -nout;
rowlen = nin+nout;
}
if( (npoints<=0 || foldscount<2) || foldscount>npoints )
{
info = -1;
return;
}
mlpbase.mlpcopy(n, network);
//
// K-fold out cross-validation.
// First, estimate generalization error
//
testset = new double[npoints-1+1, rowlen-1+1];
cvset = new double[npoints-1+1, rowlen-1+1];
x = new double[nin-1+1];
y = new double[nout-1+1];
mlpkfoldsplit(xy, npoints, nclasses, foldscount, false, ref folds);
cvrep.relclserror = 0;
cvrep.avgce = 0;
cvrep.rmserror = 0;
cvrep.avgerror = 0;
cvrep.avgrelerror = 0;
rep.ngrad = 0;
rep.nhess = 0;
rep.ncholesky = 0;
relcnt = 0;
for(fold=0; fold<=foldscount-1; fold++)
{
//
// Separate set
//
tssize = 0;
cvssize = 0;
for(i=0; i<=npoints-1; i++)
{
if( folds[i]==fold )
{
for(i_=0; i_<=rowlen-1;i_++)
{
testset[tssize,i_] = xy[i,i_];
}
tssize = tssize+1;
}
else
{
for(i_=0; i_<=rowlen-1;i_++)
//.........这里部分代码省略.........
示例4: multilayerperceptron
public multilayerperceptron(mlpbase.multilayerperceptron obj)
{
_innerobj = obj;
}
示例5: dimensionality
/*************************************************************************
Neural network training using L-BFGS algorithm with regularization.
Subroutine trains neural network with restarts from random positions.
Algorithm is well suited for problems of any dimensionality (memory
requirements and step complexity are linear by weights number).
INPUT PARAMETERS:
Network - neural network with initialized geometry
XY - training set
NPoints - training set size
Decay - weight decay constant, >=0.001
Decay term 'Decay*||Weights||^2' is added to error
function.
If you don't know what Decay to choose, use 0.001.
Restarts - number of restarts from random position, >0.
If you don't know what Restarts to choose, use 2.
WStep - stopping criterion. Algorithm stops if step size is
less than WStep. Recommended value - 0.01. Zero step
size means stopping after MaxIts iterations.
MaxIts - stopping criterion. Algorithm stops after MaxIts
iterations (NOT gradient calculations). Zero MaxIts
means stopping when step is sufficiently small.
OUTPUT PARAMETERS:
Network - trained neural network.
Info - return code:
* -8, if both WStep=0 and MaxIts=0
* -2, if there is a point with class number
outside of [0..NOut-1].
* -1, if wrong parameters specified
(NPoints<0, Restarts<1).
* 2, if task has been solved.
Rep - training report
-- ALGLIB --
Copyright 09.12.2007 by Bochkanov Sergey
*************************************************************************/
public static void mlptrainlbfgs(mlpbase.multilayerperceptron network,
double[,] xy,
int npoints,
double decay,
int restarts,
double wstep,
int maxits,
ref int info,
mlpreport rep)
{
int i = 0;
int pass = 0;
int nin = 0;
int nout = 0;
int wcount = 0;
double[] w = new double[0];
double[] wbest = new double[0];
double e = 0;
double v = 0;
double ebest = 0;
minlbfgs.minlbfgsreport internalrep = new minlbfgs.minlbfgsreport();
minlbfgs.minlbfgsstate state = new minlbfgs.minlbfgsstate();
int i_ = 0;
info = 0;
//
// Test inputs, parse flags, read network geometry
//
if( (double)(wstep)==(double)(0) && maxits==0 )
{
info = -8;
return;
}
if( ((npoints<=0 || restarts<1) || (double)(wstep)<(double)(0)) || maxits<0 )
{
info = -1;
return;
}
mlpbase.mlpproperties(network, ref nin, ref nout, ref wcount);
if( mlpbase.mlpissoftmax(network) )
{
for(i=0; i<=npoints-1; i++)
{
if( (int)Math.Round(xy[i,nin])<0 || (int)Math.Round(xy[i,nin])>=nout )
{
info = -2;
return;
}
}
}
decay = Math.Max(decay, mindecay);
info = 2;
//
// Prepare
//
mlpbase.mlpinitpreprocessor(network, xy, npoints);
w = new double[wcount-1+1];
wbest = new double[wcount-1+1];
ebest = math.maxrealnumber;
//.........这里部分代码省略.........
示例6: initmlpetrnsessions
/*************************************************************************
This function initializes temporaries needed for training session.
*************************************************************************/
private static void initmlpetrnsessions(mlpbase.multilayerperceptron individualnetwork,
mlptrainer trainer,
alglib.smp.shared_pool sessions)
{
mlpetrnsession t = new mlpetrnsession();
if( !alglib.smp.ae_shared_pool_is_initialized(sessions) )
{
initmlpetrnsession(individualnetwork, trainer, t);
alglib.smp.ae_shared_pool_set_seed(sessions, t);
}
}
示例7: PassThroughSerializer
/*************************************************************************
Network creation
This function creates network with desired structure. Network is created
using one of the three methods:
a) straighforward creation using MLPCreate???()
b) MLPCreate???() for proxy object, which is copied with PassThroughSerializer()
c) MLPCreate???() for proxy object, which is copied with MLPCopy()
One of these methods is chosen with probability 1/3.
*************************************************************************/
private static void createnetwork(mlpbase.multilayerperceptron network,
int nkind,
double a1,
double a2,
int nin,
int nhid1,
int nhid2,
int nout)
{
int mkind = 0;
mlpbase.multilayerperceptron tmp = new mlpbase.multilayerperceptron();
ap.assert(((nin>0 & nhid1>=0) & nhid2>=0) & nout>0, "CreateNetwork error");
ap.assert(nhid1!=0 | nhid2==0, "CreateNetwork error");
ap.assert(nkind!=1 | nout>=2, "CreateNetwork error");
mkind = math.randominteger(3);
if( nhid1==0 )
{
//
// No hidden layers
//
if( nkind==0 )
{
if( mkind==0 )
{
mlpbase.mlpcreate0(nin, nout, network);
}
if( mkind==1 )
{
mlpbase.mlpcreate0(nin, nout, tmp);
{
//
// This code passes data structure through serializers
// (serializes it to string and loads back)
//
serializer _local_serializer;
string _local_str;
_local_serializer = new serializer();
_local_serializer.alloc_start();
mlpbase.mlpalloc(_local_serializer, tmp);
_local_serializer.sstart_str();
mlpbase.mlpserialize(_local_serializer, tmp);
_local_serializer.stop();
_local_str = _local_serializer.get_string();
_local_serializer = new serializer();
_local_serializer.ustart_str(_local_str);
mlpbase.mlpunserialize(_local_serializer, network);
_local_serializer.stop();
}
}
if( mkind==2 )
{
mlpbase.mlpcreate0(nin, nout, tmp);
mlpbase.mlpcopy(tmp, network);
}
}
else
{
if( nkind==1 )
{
if( mkind==0 )
{
mlpbase.mlpcreatec0(nin, nout, network);
}
if( mkind==1 )
{
mlpbase.mlpcreatec0(nin, nout, tmp);
{
//
// This code passes data structure through serializers
// (serializes it to string and loads back)
//
serializer _local_serializer;
string _local_str;
_local_serializer = new serializer();
_local_serializer.alloc_start();
mlpbase.mlpalloc(_local_serializer, tmp);
_local_serializer.sstart_str();
mlpbase.mlpserialize(_local_serializer, tmp);
_local_serializer.stop();
_local_str = _local_serializer.get_string();
_local_serializer = new serializer();
_local_serializer.ustart_str(_local_str);
mlpbase.mlpunserialize(_local_serializer, network);
_local_serializer.stop();
//.........这里部分代码省略.........
示例8: support
/*************************************************************************
This function estimates generalization error using cross-validation on the
current dataset with current training settings.
FOR USERS OF COMMERCIAL EDITION:
! Commercial version of ALGLIB includes two important improvements of
! this function:
! * multicore support (C++ and C# computational cores)
! * SSE support (C++ computational core)
!
! Second improvement gives constant speedup (2-3X). First improvement
! gives close-to-linear speedup on multicore systems. Following
! operations can be executed in parallel:
! * FoldsCount cross-validation rounds (always)
! * NRestarts training sessions performed within each of
! cross-validation rounds (if NRestarts>1)
! * gradient calculation over large dataset (if dataset is large enough)
!
! In order to use multicore features you have to:
! * use commercial version of ALGLIB
! * call this function with "smp_" prefix, which indicates that
! multicore code will be used (for multicore support)
!
! In order to use SSE features you have to:
! * use commercial version of ALGLIB on Intel processors
! * use C++ computational core
!
! This note is given for users of commercial edition; if you use GPL
! edition, you still will be able to call smp-version of this function,
! but all computations will be done serially.
!
! We recommend you to carefully read ALGLIB Reference Manual, section
! called 'SMP support', before using parallel version of this function.
INPUT PARAMETERS:
S - trainer object
Network - neural network. It must have same number of inputs and
output/classes as was specified during creation of the
trainer object. Network is not changed during cross-
validation and is not trained - it is used only as
representative of its architecture. I.e., we estimate
generalization properties of ARCHITECTURE, not some
specific network.
NRestarts - number of restarts, >=0:
* NRestarts>0 means that for each cross-validation
round specified number of random restarts is
performed, with best network being chosen after
training.
* NRestarts=0 is same as NRestarts=1
FoldsCount - number of folds in k-fold cross-validation:
* 2<=FoldsCount<=size of dataset
* recommended value: 10.
* values larger than dataset size will be silently
truncated down to dataset size
OUTPUT PARAMETERS:
Rep - structure which contains cross-validation estimates:
* Rep.RelCLSError - fraction of misclassified cases.
* Rep.AvgCE - acerage cross-entropy
* Rep.RMSError - root-mean-square error
* Rep.AvgError - average error
* Rep.AvgRelError - average relative error
NOTE: when no dataset was specified with MLPSetDataset/SetSparseDataset(),
or subset with only one point was given, zeros are returned as
estimates.
NOTE: this method performs FoldsCount cross-validation rounds, each one
with NRestarts random starts. Thus, FoldsCount*NRestarts networks
are trained in total.
NOTE: Rep.RelCLSError/Rep.AvgCE are zero on regression problems.
NOTE: on classification problems Rep.RMSError/Rep.AvgError/Rep.AvgRelError
contain errors in prediction of posterior probabilities.
-- ALGLIB --
Copyright 23.07.2012 by Bochkanov Sergey
*************************************************************************/
public static void mlpkfoldcv(mlptrainer s,
mlpbase.multilayerperceptron network,
int nrestarts,
int foldscount,
mlpreport rep)
{
alglib.smp.shared_pool pooldatacv = new alglib.smp.shared_pool();
mlpparallelizationcv datacv = new mlpparallelizationcv();
mlpparallelizationcv sdatacv = null;
double[,] cvy = new double[0,0];
int[] folds = new int[0];
double[] buf = new double[0];
double[] dy = new double[0];
int nin = 0;
int nout = 0;
int wcount = 0;
int rowsize = 0;
int ntype = 0;
int ttype = 0;
int i = 0;
//.........这里部分代码省略.........
示例9: _pexec_mlpkfoldcv
/*************************************************************************
Single-threaded stub. HPC ALGLIB replaces it by multithreaded code.
*************************************************************************/
public static void _pexec_mlpkfoldcv(mlptrainer s,
mlpbase.multilayerperceptron network,
int nrestarts,
int foldscount,
mlpreport rep)
{
mlpkfoldcv(s,network,nrestarts,foldscount,rep);
}
示例10: modelerrors
public modelerrors(mlpbase.modelerrors obj)
{
_innerobj = obj;
}
示例11: mlpeallerrorsx
/*************************************************************************
Calculation of all types of errors
-- ALGLIB --
Copyright 17.02.2009 by Bochkanov Sergey
*************************************************************************/
public static void mlpeallerrorsx(mlpensemble ensemble,
double[,] densexy,
sparse.sparsematrix sparsexy,
int datasetsize,
int datasettype,
int[] idx,
int subset0,
int subset1,
int subsettype,
alglib.smp.shared_pool buf,
mlpbase.modelerrors rep)
{
int i = 0;
int j = 0;
int nin = 0;
int nout = 0;
bool iscls = new bool();
int srcidx = 0;
hpccores.mlpbuffers pbuf = null;
mlpbase.modelerrors rep0 = new mlpbase.modelerrors();
mlpbase.modelerrors rep1 = new mlpbase.modelerrors();
int i_ = 0;
int i1_ = 0;
//
// Get network information
//
nin = mlpbase.mlpgetinputscount(ensemble.network);
nout = mlpbase.mlpgetoutputscount(ensemble.network);
iscls = mlpbase.mlpissoftmax(ensemble.network);
//
// Retrieve buffer, prepare, process data, recycle buffer
//
alglib.smp.ae_shared_pool_retrieve(buf, ref pbuf);
if( iscls )
{
bdss.dserrallocate(nout, ref pbuf.tmp0);
}
else
{
bdss.dserrallocate(-nout, ref pbuf.tmp0);
}
apserv.rvectorsetlengthatleast(ref pbuf.x, nin);
apserv.rvectorsetlengthatleast(ref pbuf.y, nout);
apserv.rvectorsetlengthatleast(ref pbuf.desiredy, nout);
for(i=subset0; i<=subset1-1; i++)
{
srcidx = -1;
if( subsettype==0 )
{
srcidx = i;
}
if( subsettype==1 )
{
srcidx = idx[i];
}
alglib.ap.assert(srcidx>=0, "MLPEAllErrorsX: internal error");
if( datasettype==0 )
{
for(i_=0; i_<=nin-1;i_++)
{
pbuf.x[i_] = densexy[srcidx,i_];
}
}
if( datasettype==1 )
{
sparse.sparsegetrow(sparsexy, srcidx, ref pbuf.x);
}
mlpeprocess(ensemble, pbuf.x, ref pbuf.y);
if( mlpbase.mlpissoftmax(ensemble.network) )
{
if( datasettype==0 )
{
pbuf.desiredy[0] = densexy[srcidx,nin];
}
if( datasettype==1 )
{
pbuf.desiredy[0] = sparse.sparseget(sparsexy, srcidx, nin);
}
}
else
{
if( datasettype==0 )
{
i1_ = (nin) - (0);
for(i_=0; i_<=nout-1;i_++)
{
pbuf.desiredy[i_] = densexy[srcidx,i_+i1_];
}
}
if( datasettype==1 )
{
//.........这里部分代码省略.........
示例12: createnetwork
/*************************************************************************
Network creation
*************************************************************************/
private static void createnetwork(mlpbase.multilayerperceptron network,
int nkind,
double a1,
double a2,
int nin,
int nhid1,
int nhid2,
int nout)
{
ap.assert(((nin>0 & nhid1>=0) & nhid2>=0) & nout>0, "CreateNetwork error");
ap.assert(nhid1!=0 | nhid2==0, "CreateNetwork error");
ap.assert(nkind!=1 | nout>=2, "CreateNetwork error");
if( nhid1==0 )
{
//
// No hidden layers
//
if( nkind==0 )
{
mlpbase.mlpcreate0(nin, nout, network);
}
else
{
if( nkind==1 )
{
mlpbase.mlpcreatec0(nin, nout, network);
}
else
{
if( nkind==2 )
{
mlpbase.mlpcreateb0(nin, nout, a1, a2, network);
}
else
{
if( nkind==3 )
{
mlpbase.mlpcreater0(nin, nout, a1, a2, network);
}
}
}
}
return;
}
if( nhid2==0 )
{
//
// One hidden layer
//
if( nkind==0 )
{
mlpbase.mlpcreate1(nin, nhid1, nout, network);
}
else
{
if( nkind==1 )
{
mlpbase.mlpcreatec1(nin, nhid1, nout, network);
}
else
{
if( nkind==2 )
{
mlpbase.mlpcreateb1(nin, nhid1, nout, a1, a2, network);
}
else
{
if( nkind==3 )
{
mlpbase.mlpcreater1(nin, nhid1, nout, a1, a2, network);
}
}
}
}
return;
}
//
// Two hidden layers
//
if( nkind==0 )
{
mlpbase.mlpcreate2(nin, nhid1, nhid2, nout, network);
}
else
{
if( nkind==1 )
{
mlpbase.mlpcreatec2(nin, nhid1, nhid2, nout, network);
}
else
{
if( nkind==2 )
{
mlpbase.mlpcreateb2(nin, nhid1, nhid2, nout, a1, a2, network);
//.........这里部分代码省略.........
示例13: True
/*************************************************************************
This function performs step-by-step training of the neural network. Here
"step-by-step" means that training starts with MLPStartTrainingX call,
and then user subsequently calls MLPContinueTrainingX to perform one more
iteration of the training.
This function performs one more iteration of the training and returns
either True (training continues) or False (training stopped). In case True
was returned, Network weights are updated according to the current state
of the optimization progress. In case False was returned, no additional
updates is performed (previous update of the network weights moved us to
the final point, and no additional updates is needed).
EXAMPLE:
>
> [initialize network and trainer object]
>
> MLPStartTraining(Trainer, Network, True)
> while MLPContinueTraining(Trainer, Network) do
> [visualize training progress]
>
INPUT PARAMETERS:
S - trainer object
Network - neural network which receives A COPY of the actual
network which is trained by the algorithm. After each
training roung state of the network being trained is
copied to this variable.
It must have same number of inputs and output/classes
as was specified during creation of the trainer object
and it must have exactly same architecture as the
second network (TNetwork).
TNetwork - neural network being trained.
State - LBFGS optimizer, already initialized, number of
dimensions must be equal to number of weights in the
networks.
Subset - some subset from training set(it stores row's numbers);
SubsetSize - size of subset(if SubsetSize<0 - used full dataset).
NGradBatch - number of calls MLPGradBatch function. Initial value
is zero;
OUTPUT PARAMETERS:
Network - weights of the neural network are rewritten by the
current approximation;
NGradBatch - number of calls MLPGradBatch function after training.
NOTE: this method uses sum-of-squares error function for training.
NOTE: it is expected that trainer object settings are NOT changed during
step-by-step training, i.e. no one changes stopping criteria or
training set during training. It is possible and there is no defense
against such actions, but algorithm behavior in such cases is
undefined and can be unpredictable.
NOTE: It is expected that Network is the same one which was passed to
MLPStartTraining() function. However, THIS function checks only
following:
* that number of network inputs is consistent with trainer object
settings
* that number of network outputs/classes is consistent with trainer
object settings
* that number of network weights is the same as number of weights in
the network passed to MLPStartTraining() function
Exception is thrown when these conditions are violated.
It is also expected that you do not change state of the network on
your own - the only party who has right to change network during its
training is a trainer object. Any attempt to interfere with trainer
may lead to unpredictable results.
-- ALGLIB --
Copyright 13.08.2012 by Bochkanov Sergey
*************************************************************************/
private static bool mlpcontinuetrainingx(mlptrainer s,
mlpbase.multilayerperceptron network,
mlpbase.multilayerperceptron tnetwork,
minlbfgs.minlbfgsstate state,
int[] subset,
int subsetsize,
ref int ngradbatch)
{
bool result = new bool();
int nin = 0;
int nout = 0;
int wcount = 0;
int twcount = 0;
int ntype = 0;
int ttype = 0;
double decay = 0;
double v = 0;
int i = 0;
int i_ = 0;
alglib.ap.assert(s.npoints>=0, "MLPContinueTrainingX: internal error - parameter S is not initialized or is spoiled(S.NPoints<0).");
if( s.rcpar )
{
ttype = 0;
}
else
//.........这里部分代码省略.........
示例14: MLPContinueTraining
/*************************************************************************
This function performs step-by-step training of the neural network. Here
"step-by-step" means that training starts with MLPStartTrainingX call,
and then user subsequently calls MLPContinueTrainingX to perform one more
iteration of the training.
After call to this function trainer object remembers network and is ready
to train it. However, no training is performed until first call to
MLPContinueTraining() function. Subsequent calls to MLPContinueTraining()
will advance traing progress one iteration further.
EXAMPLE:
>
> ...initialize network and trainer object....
>
> MLPStartTraining(Trainer, Network, True)
> while MLPContinueTraining(Trainer, Network) do
> ...visualize training progress...
>
INPUT PARAMETERS:
S - trainer object;
Network - neural network which receives A COPY of the actual
network which is trained by the algorithm. After each
training roung state of the network being trained is
copied to this variable.
It must have same number of inputs and output/classes
as was specified during creation of the trainer object
and it must have exactly same architecture as the
second network (TNetwork).
TNetwork - neural network being trained.
State - LBFGS optimizer, already initialized, number of
dimensions must be equal to number of weights in the
networks.
RandomStart - randomize network before training or not:
* True means that network is randomized and its
initial state (one which was passed to the trainer
object) is lost;
* False means that training is started from the
current state of the network.
Subset - some subset from training set(it stores row's numbers);
SubsetSize - size of subset(if SubsetSize<0 - used full dataset).
OUTPUT PARAMETERS:
Network - neural network which is ready to training (weights are
initialized, preprocessor is initialized using current
training set)
NOTE: this method uses sum-of-squares error function for training.
NOTE: it is expected that trainer object settings are NOT changed during
step-by-step training, i.e. no one changes stopping criteria or
training set during training. It is possible and there is no defense
against such actions, but algorithm behavior in such cases is
undefined and can be unpredictable.
-- ALGLIB --
Copyright 13.08.2012 by Bochkanov Sergey
*************************************************************************/
private static void mlpstarttrainingx(mlptrainer s,
mlpbase.multilayerperceptron network,
mlpbase.multilayerperceptron tnetwork,
minlbfgs.minlbfgsstate state,
bool randomstart,
int[] subset,
int subsetsize)
{
int nin = 0;
int nout = 0;
int wcount = 0;
int twcount = 0;
int ntype = 0;
int ttype = 0;
int i = 0;
int i_ = 0;
alglib.ap.assert(s.npoints>=0, "MLPStartTrainingX: internal error - parameter S is not initialized or is spoiled(S.NPoints<0)");
if( s.rcpar )
{
ttype = 0;
}
else
{
ttype = 1;
}
if( !mlpbase.mlpissoftmax(network) )
{
ntype = 0;
}
else
{
ntype = 1;
}
alglib.ap.assert(ntype==ttype, "MLPStartTrainingX: internal error - type of the resulting network is not similar to network type in trainer object");
if( !mlpbase.mlpissoftmax(tnetwork) )
{
ntype = 0;
}
else
{
//.........这里部分代码省略.........
示例15: initmlptrnsessions
/*************************************************************************
This function initializes temporaries needed for training session.
*************************************************************************/
private static void initmlptrnsessions(mlpbase.multilayerperceptron networktrained,
bool randomizenetwork,
mlptrainer trainer,
alglib.smp.shared_pool sessions)
{
int[] dummysubset = new int[0];
smlptrnsession t = new smlptrnsession();
smlptrnsession p = null;
if( alglib.smp.ae_shared_pool_is_initialized(sessions) )
{
//
// Pool was already initialized.
// Clear sessions stored in the pool.
//
alglib.smp.ae_shared_pool_first_recycled(sessions, ref p);
while( p!=null )
{
alglib.ap.assert(mlpbase.mlpsamearchitecture(p.network, networktrained), "InitMLPTrnSessions: internal consistency error");
p.bestrmserror = math.maxrealnumber;
alglib.smp.ae_shared_pool_next_recycled(sessions, ref p);
}
}
else
{
//
// Prepare session and seed pool
//
initmlptrnsession(networktrained, randomizenetwork, trainer, t);
alglib.smp.ae_shared_pool_set_seed(sessions, t);
}
}