本文整理汇总了C++中ClassList::add方法的典型用法代码示例。如果您正苦于以下问题:C++ ClassList::add方法的具体用法?C++ ClassList::add怎么用?C++ ClassList::add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ClassList
的用法示例。
在下文中一共展示了ClassList::add方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: pdSamples
// create derivaton perceptron
NN *NNRegressionFactory::createDerivationPerceptron( NNVariables *vars , NN *p , NNSamples *pSamples , int sensorFrom , int sensorTo , float ( *targetFunction )( NN *p ) )
{
int nDerivationSensors = sensorTo - sensorFrom + 1;
ASSERT( nDerivationSensors > 0 );
// create perceptron - sensors are the same, targets are required derivatives of target function by given sensor
int nSrcSensors = p -> getNSensors();
int nSrcTargets = p -> getNTargets();
int nDstTargets = nDerivationSensors;
int hiddenLayerSize = ( nDstTargets + nSrcSensors ) / 2;
// variables
// sensors are the same
ClassList<NNVariable> sensors;
for( int k = 0; k < p -> getNSensors(); k++ )
sensors.add( p -> getSensorVariable( k ) );
// targets are derivatives - owned by NN variables
ClassList<NNVariable> targets;
for( int k1 = sensorFrom; k1 <= sensorTo; k1++ )
for( int k = 0; k < p -> getNSensors(); k++ )
targets.add( vars -> addCommonNumberDerivative() );
NN *pd = createDefaultPerceptron( String( p -> getId() ) + "#D" , sensors , targets , hiddenLayerSize );
// create main strategy
String instance;
const char *pInstance = p -> getInstance();
if( pInstance != NULL && *pInstance != 0 )
instance = String( pInstance ) + "::derivative";
NNStrategyBackPropagation *psBPd = createDefaultStrategy( instance , pd );
// populate derivation samples - in the same points as training set
NNSamples pdSamples( nSrcSensors , nDstTargets );
for( int m = 0; m < pSamples -> count(); m++ )
{
NNSample *sample = pSamples -> getByPos( m );
NNSample *sampleD = pdSamples.add();
sampleD -> setSensors( sample -> getSensors() );
getDerivatives( p , sensorFrom , sensorTo , sample , sampleD -> getTargets() , targetFunction );
}
// learn derivative perceptron
bool res = psBPd -> learn( &pdSamples , NULL , NULL );
ASSERT( res );
return( pd );
}
示例2: scaleBySamplesStddev
void NNRegressionFactory::scaleBySamplesStddev( NNSamples *samples , NNVariables *vars , ClassList<NNVariable>& sensors , ClassList<NNVariable>& targets )
{
// scale inputs
for( int k = 0; k < samples -> sizeIn(); k++ )
{
float meanV , stddevV;
samples -> getSensorStat( k , &meanV , &stddevV );
NNVariable *var = vars -> addCommonNumber( true , meanV - stddevV , true , meanV + stddevV );
sensors.add( var );
}
// scale outputs
for( int k = 0; k < samples -> sizeOut(); k++ )
{
float meanV , stddevV;
samples -> getTargetStat( k , &meanV , &stddevV );
NNVariable *var = vars -> addCommonNumber( true , meanV - stddevV , true , meanV + stddevV );
targets.add( var );
}
}
示例3: scaleBySamplesSimple
void NNRegressionFactory::scaleBySamplesSimple( NNSamples *samples , NNVariables *vars , ClassList<NNVariable>& sensors , ClassList<NNVariable>& targets )
{
// scale inputs
for( int k = 0; k < samples -> sizeIn(); k++ )
{
float minV , maxV;
samples -> getSensorRange( k , &minV , &maxV );
ASSERT( minV < maxV );
NNVariable *var = vars -> addCommonNumber( true , minV , true , maxV );
sensors.add( var );
}
// scale outputs
for( int k = 0; k < samples -> sizeOut(); k++ )
{
float minV , maxV;
samples -> getTargetRange( k , &minV , &maxV );
ASSERT( minV < maxV );
NNVariable *var = vars -> addCommonNumber( true , minV , true , maxV );
targets.add( var );
}
}
示例4: create
void ThreadPool::create( ClassList<ThreadPoolTask>& tasks ) {
if( !runEnabled ) {
logger.logInfo( "ignore threadPool=" + name + ", runEnabled=false" );
return;
}
int nTasks = tasks.count();
if( nThreads > nTasks )
nThreads = nTasks;
ASSERTMSG( nThreads >= 1 , "nThreads is invalid" );
int nWhole = nTasks / nThreads;
int nPart = nTasks % nThreads;
// split objects by threads
int nFrom = 0;
for( int k = 0; k < nThreads; k++ ) {
// calculate number of objects for thread
int n = nWhole;
if( nPart ) {
n++;
nPart--;
}
// create list of thread tasks
ClassList<ThreadPoolTask> threadTasks;
threadTasks.allocate( n );
for( int j = 0; j < n; j++ ) {
ThreadPoolTask *task = tasks.get( nFrom + j );
task -> pool = this;
threadTasks.add( task );
}
// create thread (suspended) and add to the pool
String threadName = name + "#" + k;
ThreadPoolItem *thread = new ThreadPoolFixedTaskListItem( threadName , k , threadTasks );
threads.add( thread );
// configure thread
nFrom += n;
}
logger.logInfo( String( "threadpool created: name=" ) + name + ", nThreads=" + nThreads + ", nTasks=" + nTasks );
}
示例5: testWorkflow
void testWorkflow( XmlCall& call ) {
// parameters
String threadPoolName = call.getParam( "threadPoolName" );
int nTasks = call.getIntParam( "nTasks" );
int taskTimeMs = call.getIntParam( "taskTimeMs" );
int runTimeSec = call.getIntParam( "runTimeSec" );
int suspendTimeSec = call.getIntParam( "suspendTimeSec" );
int resumeTimeSec = call.getIntParam( "resumeTimeSec" );
// create task list
ClassList<ThreadPoolTask> tasks;
for( int k = 0; k < nTasks; k++ ) {
ThreadPoolTest_Task *task = new ThreadPoolTest_Task( String( "T" ) + k , taskTimeMs );
tasks.add( task );
}
// create and configure thread pool
logger.logInfo( "Create thread pool..." );
ThreadService *ts = ThreadService::getService();
ts -> createThreadPool( threadPoolName , call.getXml().getChildNode( "threadpoolconfiguration" ) , tasks );
// workflow
logger.logInfo( "Start thread pool..." );
ts -> startThreadPool( threadPoolName );
ts -> threadSleepMs( runTimeSec * 1000 );
logger.logInfo( "Suspend thread pool..." );
ts -> suspendThreadPool( threadPoolName );
ts -> threadSleepMs( suspendTimeSec * 1000 );
logger.logInfo( "Resume thread pool..." );
ts -> resumeThreadPool( threadPoolName );
ts -> threadSleepMs( resumeTimeSec * 1000 );
logger.logInfo( "Stop thread pool..." );
ts -> stopThreadPool( threadPoolName );
// drop tasks
tasks.destroy();
logger.logInfo( "Finished." );
}
示例6: execute_line
int NerveTool::execute_line( String& s , FILE *sout , ClassList<Nerve>& nn ) {
if( s.startsFrom( "<img" ) )
return( execute_img( s , sout , 0 , nn ) );
// parse string
int level = s.find( "*" );
if( level < 0 )
return( 1 );
if( level < 2 )
return( 13 );
s.remove( 0 , level + 1 );
level -= 2;
if( nn.count() > 0 ) {
Nerve *np = nn.last();
if( np -> level < level )
if( level != np -> level + 1 )
return( 14 );
}
s.trim();
if( s.startsFrom( "<img" ) )
return( execute_img( s , sout , level , nn ) );
if( !s.startsFrom( "*" ) )
return( 2 );
s.remove( 0 , 1 );
// name
int idx = s.find( "*" );
if( idx < 0 )
return( 3 );
String name = s.getMid( 0 , idx );
s.remove( 0 , idx + 1 );
// synonyms
String synonyms;
if( s.startsFrom( " (" ) ) {
s.remove( 0 , 2 );
idx = s.find( ")" );
if( idx < 0 )
return( 4 );
synonyms = s.getMid( 0 , idx );
s.remove( 0 , idx + 1 );
}
String origin;
String branches;
String distribution;
String modality;
String fibers;
if( s.startsFrom( "; " ) ) {
s.remove( 0 , 2 );
if( !extract_item( sout , origin , s , "ORIGIN" ) )
return( 6 );
if( !extract_item( sout , branches , s , "BRANCHES" ) )
return( 8 );
if( !extract_item( sout , distribution , s , "DISTRIBUTION" ) )
return( 7 );
if( !extract_item( sout , modality , s , "MODALITY" ) )
return( 9 );
if( !extract_item( sout , fibers , s , "FIBERS" ) )
return( 10 );
}
if( !s.isEmpty() )
return( 11 );
Nerve *n = new Nerve;
n -> fibers = fibers;
String fibersinfo;
if( !fibers.isEmpty() )
if( !extract_fibers( sout , n -> fibersinfo , fibers ) ) {
fprintf( sout , "wrong fibers=%s\n" , ( const char * )fibers );
delete n;
return( 12 );
}
n -> name = name;
n -> synonyms = synonyms;
n -> level = level;
n -> origin = origin;
n -> branches = branches;
n -> distribution = distribution;
n -> modality = modality;
nn.add( n );
return( 0 );
}
示例7: extract_fiberitems
bool NerveTool::extract_fiberitems( FILE *sout , StringList& fibersinfo , String type , String& value ) {
// parse value: x,y -> x,y -> x,y ...
ClassList<StringList> chain;
value.trim();
while( !value.isEmpty() ) {
String part;
int idx = value.find( "->" );
if( idx < 0 ) {
if( chain.count() == 0 )
return( false );
part = value;
value.clear();
}
else {
part = value.getMid( 0 , idx );
value.remove( 0 , idx + 2 );
value.trim();
if( value.isEmpty() )
return( false );
}
// parse part
StringList *z = new StringList;
chain.add( z );
if( !extract_codes( part , z ) ) {
fprintf( sout , "wrong part=%s\n" , ( const char * )part );
return( false );
}
// prohibit many-to-many
if( z -> count() > 1 && chain.count() > 1 ) {
StringList& zp = chain.getRef( chain.count() - 2 );
if( zp.count() > 1 )
return( false );
}
}
// chain of more than one
if( chain.count() < 2 )
return( false );
// split chain
int startChain = 0;
int startChainCount = 0;
for( int k = 0; k < chain.count(); k++ ) {
StringList& z = chain.getRef( k );
int zn = z.count();
// starter
if( k == 0 ) {
startChainCount = zn;
continue;
}
// many to one - split
if( startChainCount > 1 ) {
if( zn != 1 )
return( false );
addManyToOne( fibersinfo , type , chain.getRef( startChainCount ) , z.get( 0 ) );
startChain = k;
startChainCount = zn;
continue;
}
// allow x -> y -> z as is
if( zn == 1 ) {
if( k == chain.count() - 1 ) {
addSingleChain( fibersinfo , type , chain , startChain , k );
break;
}
continue;
}
// x -> y -> x,y - split to x -> y and y -> x,y
if( ( k - 1 ) > startChain ) {
addSingleChain( fibersinfo , type , chain , startChain , k - 1 );
startChain = k - 1;
startChainCount = 1;
}
addOneToMany( fibersinfo , type , chain.getRef( startChain ).get( 0 ) , z );
startChain = k;
startChainCount = zn;
}
chain.destroy();
return( true );
}
示例8: joinLattice
void ClassLattice::joinLattice( ClassLattice * lattTo )
//-----------------------------------------------------
{
ClassList* list = _flatClasses;
ClassList adjust;
int levelDiff = 0;
bool levelSet = FALSE;
int i;
REQUIRE( lattTo != this, "classlattice::joinlattice -- join to myself" );
for( i = 0; i < list->count(); i += 1 ) {
ClassLattice * node = (*list)[ i ];
if( node != NULL ) {
adjust.add( node );
for( int j = node->_bases.count(); j > 0; j -= 1 ) {
DerivationPtr * basePtr = node->_bases[ j - 1 ];
REQUIRE( node->_flatClasses != lattTo->_flatClasses,
"ClassLattice::JoinLattice tried to join related" );
int index = findClass( *lattTo->_flatClasses, basePtr->_class );
if( index >= 0 ) {
//NYI rely on not having loaded deriveds (otherwise, might kill me!)
REQUIRE( !basePtr->_class->_derivedsLoaded, "joinLattice ack" );
list->replaceAt( findClass( *list, basePtr->_class ), NULL );
if( basePtr->_class != (*lattTo->_flatClasses)[ index ] ) {
delete basePtr->_class;
// this should probably be a separate function
// what it is doing is changing all of the pointers
// from basePtr->_class to the same class in lattTo
for( int k = list->count(); k > 0; k -= 1 ) {
ClassLattice * work = (*list)[ k - 1 ];
if( work != NULL ) {
for( int l = work->_bases.count(); l > 0; l -= 1 ) {
if( work->_bases[ l - 1 ]->_class == basePtr->_class ) {
work->_bases[ l - 1 ]->adjustTo( (*lattTo->_flatClasses)[ index ] );
int tryDiff = work->_bases[ l - 1 ]->_class->_level + 1 - work->_level;
if( !levelSet ) {
levelDiff = tryDiff;
levelSet = TRUE;
} else {
if( tryDiff > levelDiff ) {
levelDiff = tryDiff;
}
}
}
}
}
}
}
}
}
node->_flatClasses = lattTo->_flatClasses;
lattTo->_flatClasses->add( node );
}
}
for( i = adjust.count(); i > 0; i -= 1 ) {
adjust[ i - 1 ]->_level += levelDiff;
}
delete list;
}