本文整理汇总了C++中CStopWatch::getElapsedTime方法的典型用法代码示例。如果您正苦于以下问题:C++ CStopWatch::getElapsedTime方法的具体用法?C++ CStopWatch::getElapsedTime怎么用?C++ CStopWatch::getElapsedTime使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CStopWatch
的用法示例。
在下文中一共展示了CStopWatch::getElapsedTime方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: log
void ToneMappingDrago03::toneMapping_Drago03(Image<float> *img, float *avLum, float *maxLum, unsigned int *pic, float bias)
{
image = img;
picture = pic;
avLuminance = avLum;
maxLuminance = maxLum;
normMaxLum = *maxLum / *avLum; // normalize maximum luminance by average luminance
const float LOG05 = -0.693147f; // log(0.5)
divider = log10(normMaxLum + 1.0f);
biasP = log(bias)/LOG05;
logFile("divider = %f biasP = %f \n", divider, biasP);
localWorkSize[0] = BLOCK_SIZE;
localWorkSize[1] = BLOCK_SIZE;
//round values on upper value
logFile("%d %d \n", image->getHeight(), image->getWidth());
globalWorkSize[0] = roundUp(BLOCK_SIZE, image->getHeight());
globalWorkSize[1] = roundUp(BLOCK_SIZE, image->getWidth());
//core->runComputeUnit();
CStopWatch timer;
timer.startTimer();
calctoneMapping_Drago03CPU();
timer.stopTimer();
logFile("ToneMappingCPU,calc_time, , ,%f, \n", timer.getElapsedTime());
}
示例2: sizeof
void ConversionRGB2RGBE::getDataFromOpenCLMemory()
{
clFinish(core->getCqCommandQueue());
CStopWatch timer;
timer.startTimer();
unsigned int size = image->getHeight() * image->getWidth() * sizeof(cl_uint);
cl_int ciErr1; // Error code var
// Synchronous/blocking read of results, and check accumulated errors
ciErr1 = clEnqueueReadBuffer(core->getCqCommandQueue(), cl_intChannelR, CL_TRUE, 0,
size, channelR, 0, NULL, NULL);
ciErr1 |= clEnqueueReadBuffer(core->getCqCommandQueue(), cl_intChannelG, CL_TRUE, 0,
size, channelG, 0, NULL, NULL);
ciErr1 |= clEnqueueReadBuffer(core->getCqCommandQueue(), cl_intChannelB, CL_TRUE, 0,
size, channelB, 0, NULL, NULL);
ciErr1 |= clEnqueueReadBuffer(core->getCqCommandQueue(), cl_intChannelE, CL_TRUE, 0,
size, channelE, 0, NULL, NULL);
clFinish(core->getCqCommandQueue());
timer.stopTimer();
logFile("gpuRGB2RGBE,data_out,%d,%d,%f, \n", image->getHeight(), image->getWidth(), timer.getElapsedTime());
logFile("clEnqueueReadBuffer ...\n\n");
if (ciErr1 != CL_SUCCESS)
{
logFile("%d :Error in clEnqueueReadBuffer, Line %u in file %s !!!\n\n", ciErr1, __LINE__, __FILE__);
}
}
示例3: unzip
int HuffmanCode::unzip(char* inputFile, char* outputFile) {
CStopWatch time;
time.startTimer();
cout << "unzipping..."<<endl;
// Declare variable
Writer *writer = new Writer(outputFile); // Writer ref object
Reader *reader = new Reader(inputFile); // Reader ref object
HCZHeader *header = new HCZHeader;
char **codemap; // Map of chareacter code
Node* huffmantree = new Node; // huffmantree
int totalChar;
int totalBit;
header->getTotal(totalChar, totalBit);
// Check if file is right format
if( header->read(reader) != -2){
// Initialize the table of character code
codemap = new char*[256];
for(int i = 0; i < 256; i++){
codemap[i] = new char[16];
memset(codemap[i],'\0',16);
}
for(int i = 0, nbit; i < 256; i++){
header->get((char)i,nbit,codemap[char2Int(i)]);
}
for(int i = 0; i< 256; i++){
if(codemap[i][0] != '\0'){
convertMaptoHuffman(huffmantree, codemap, (char)i, totalChar);
}
}
int bodysize = header->getBodySize();
while(bodysize){
deCode(reader, writer, huffmantree, bodysize);
}
}
delete reader;
delete writer;
delete header;
cout<<"done!"<<endl;
time.stopTimer();
cout<<"Excution time: "<<time.getElapsedTime()<<"s"<<endl;
return UN_IMPLEMENT;
}
示例4: displayGame
//--------------------------------------------------------------
// Purpose : This main loop calls functions to get input,
// update and render the game at a specific frame rate
// You should not be modifying this unless you know what you are doing.
// Input : void
// Output : void
//--------------------------------------------------------------
void displayGame( void )
{
g_Timer.startTimer(); // Start timer to calculate how long it takes to render this frame
while (!g_bQuitGame) // run this loop until user wants to quit
{
getInput(); // get keyboard input
update(g_Timer.getElapsedTime()); // update the game
render(); // render the graphics output to screen
g_Timer.waitUntil(gc_uFrameTime); // Frame rate limiter. Limits each frame to a specified time in ms.
}
}
示例5: clSetKernelArg
void ToneMappingDrago03::setInputDataToOpenCLMemory()
{
int height = image->getHeight();
int width = image->getWidth();
cl_int ciErr1;
// Set the Argument values
ciErr1 = clSetKernelArg(core->getOpenCLKernel(), 0,
sizeof(cl_int), (void*)&width);
ciErr1 |= clSetKernelArg(core->getOpenCLKernel(), 1,
sizeof(cl_int), (void*)&height);
ciErr1 |= clSetKernelArg(core->getOpenCLKernel(), 2,
sizeof(cl_mem), (void*)&cl_floatImage);
ciErr1 |= clSetKernelArg(core->getOpenCLKernel(), 3,
sizeof(cl_mem), (void*)&cl_picture);
ciErr1 |= clSetKernelArg(core->getOpenCLKernel(), 4,
sizeof(cl_float), (void*)avLuminance);
ciErr1 |= clSetKernelArg(core->getOpenCLKernel(), 5,
sizeof(cl_float), (void*)&normMaxLum);
ciErr1 |= clSetKernelArg(core->getOpenCLKernel(), 6,
sizeof(cl_float), (void*)&biasP);
ciErr1 |= clSetKernelArg(core->getOpenCLKernel(), 7,
sizeof(cl_float), (void*)÷r);
logFile("clSetKernelArg 0 - 7...\n\n");
if (ciErr1 != CL_SUCCESS)
{
logFile("%d :Error in clSetKernelArg, Line %u in file %s !!!\n\n", ciErr1, __LINE__, __FILE__);
}
// --------------------------------------------------------
// Start Core sequence... copy input data to GPU, compute, copy results back
clFinish(core->getCqCommandQueue());
CStopWatch timer;
timer.startTimer();
// Asynchronous write of data to GPU device
unsigned int size = sizeof(cl_float) * image->getHeight() * image->getWidth() * RGB_NUM_OF_CHANNELS;
ciErr1 = clEnqueueWriteBuffer(core->getCqCommandQueue(), cl_floatImage, CL_TRUE, 0,
size, image->getImage(), 0, NULL, NULL);
clFinish(core->getCqCommandQueue());
timer.stopTimer();
logFile("gpuDrago,data_in,%d,%d,%f, \n", height, width, timer.getElapsedTime());
logFile("clEnqueueWriteBuffer ...\n");
if (ciErr1 != CL_SUCCESS)
{
logFile("%d :Error in clEnqueueWriteBuffer, Line %u in file %s !!!\n\n", ciErr1, __LINE__, __FILE__);
}
}
示例6: Compute
void Compute()
{
DivisorSummatoryFunctionOdd algorithm;
CStopWatch timer;
for (int i = 1; i <= 24; i++)
{
Integer n = Power(Integer(10), i);
Integer x2 = sqrt(n);
timer.startTimer();
Integer s = algorithm.Evaluate(n, 1, x2);
timer.stopTimer();
std::string sRep = s.get_str();
printf("i = %d, s = %s, elapsed = %.3f\n", i, sRep.c_str(), timer.getElapsedTime() * 1000);
}
}
示例7: main
//#define Multi 1
int main()
{
CStopWatch sw;
sw.startTimer();
std::ofstream fileout("results.txt");
std::ifstream filein("hands.txt");
std::string str;
auto rowCount = 0;
#if Multi
std::array<std::future<std::string>,MaxThreads-1> futures;
auto count = 0;
while (count <MaxThreads-1) {
if (filein.eof()) break;
std::getline(filein, str);
rowCount++;
//futures[count++] = std::async(ProcessThread, str);
futures[count++] = std::async([str]{
PokerHand pokerhand(str);
auto result = EvaluateHand(pokerhand);
return pokerhand.GetResult(result);
});
if (count == MaxThreads-1) {
for (auto & e : futures) {
fileout << e.get() << std::endl;
}
count = 0;
}
}
#else
while (std::getline(filein, str))
{
PokerHand pokerhand(str);
auto result = EvaluateHand(pokerhand);
pokerhand.WriteResult(fileout, result);
rowCount++;
}
#endif fileout.close();
filein.close();
sw.stopTimer();
std::cout << "Time to evaluate " << rowCount << " poker hands: " << sw.getElapsedTime() << std::endl;
return 0;
}
示例8: sizeof
void ToneMappingDrago03::getDataFromOpenCLMemory()
{
clFinish(core->getCqCommandQueue());
CStopWatch timer;
timer.startTimer();
unsigned int size = image->getHeight() * image->getWidth() * sizeof(cl_uint) * RGB_NUM_OF_CHANNELS;
cl_int ciErr1; // Error code var
// Synchronous/blocking read of results, and check accumulated errors
ciErr1 = clEnqueueReadBuffer(core->getCqCommandQueue(), cl_picture, CL_TRUE, 0,
size, picture, 0, NULL, NULL);
clFinish(core->getCqCommandQueue());
timer.stopTimer();
logFile("gpuDrago,data_out,%d,%d,%f, \n", image->getHeight(), image->getWidth(), timer.getElapsedTime());
logFile("clEnqueueReadBuffer ...\n\n");
if (ciErr1 != CL_SUCCESS)
{
logFile("%d :Error in clEnqueueReadBuffer, Line %u in file %s !!!\n\n", ciErr1, __LINE__, __FILE__);
}
}
示例9:
CDescriptorSet * CFeatureExtractor::getDescriptors(const IplImage * pImage)
{
CStopWatch s; s.startTimer();
const int nChannels = pImage->nChannels;
if(nChannels == 1)
{
pGreyImg = const_cast<IplImage *>(pImage);
}
else
{
CHECK(!pGreyImg, "Haven't created a grey image for corner detection");
//cvCvtColor(pImage, pGreyImg, CV_RGB2GRAY);
greyScaler.greyScale(pImage, pGreyImg);
}
CDescriptorSet * pDS = getDescriptors_int(pImage);
s.stopTimer();
REPEAT(20, cout << "Extract corners and describe features took " << s.getElapsedTime() << " seconds\n");
return pDS;
}
示例10: main
//! Load settings from config and use them to set up mosaicer components, and set them going.
int main(int argc, char* argv[])
{
//grc::FeatureDetector * featureDetector = 0; //Delete these after (potentially) handling exceptions
grc::Renderer * mosaicRenderer = 0;
grc::EvaluationFunction * evaluationFunction = 0;
try
{
CMosaicingParams PARAMS(0, 0);
config config(argc > 1 ? argv[1] : "default.cfg");
PARAMS.init(&config);
PARAMS.BOW.DescriptorBinning.RADIUS = PARAMS.PatchDescriptor.radius();
boost::scoped_ptr<CImageSource> pImageLoader( CImageSource::makeImageSource( PARAMS.Im));
boost::scoped_ptr<CFeatureExtractor> pFeatureExtractor ( CFeatureExtractor::makeFeatureExtractor(PARAMS.Im, PARAMS.Corner, PARAMS.PatchDescriptor, PARAMS.DescriptorSetClustering));
grc::Transform::eTransType_s = (grc::Enums::eTransformType)(int)PARAMS.TransformType;
grc::Transform::warpMethod_s = (grc::Enums::eWarpMethod)(int)PARAMS.WarpMethod == grc::Enums::eWarpBilinear ? CV_INTER_LINEAR : CV_INTER_NN;
grc::cDescriptor::eDescriptorType_s = (grc::Enums::eDescriptorInvarianceType)(int)PARAMS.PatchDescriptorType;
grc::cDescriptor::eDescriptorSize_s = (grc::Enums::eDescriptorSize)(int)PARAMS.PatchDescriptorSize;
grc::ImageSource imSource((grc::Enums::eVideoSource)(int)PARAMS.VideoSource, PARAMS.Im.ImageDir.IMAGE_DIR.asSz(), PARAMS.Im.VideoFile.FILENAME.asSz(), (grc::Enums::eDisplayMode)(int)PARAMS.MosaicDestination, PARAMS.MosaicSaveDir.asSz()); //Pure-Translation-Extended "H:/Projects/External/DTA003 Image mosaicing/Test Data/Rangiora"
IplImage * pIm = cvLoadImage("/home/data/data/data/mosaicing/FixedCamera800/IMG_0004.JPG");
grc::FeatureMatcher2 * featureMatcher = 0;
//featureMatcher = new grc::BFFeatureMatcher(imSource, *pFeatureExtractor, PARAMS.BOWMatching);
CBoW bow(PARAMS.BOW);
featureMatcher = new grc::BoWFeatureMatcher(imSource, *pFeatureExtractor, bow, PARAMS.BOWMatching);
grc::BaySACTransformEstimator transformEstimator(*featureMatcher, PARAMS.RANSACHomography, PARAMS.Im.getCamCalibrationMat(),
((bool)PARAMS.MarkCorrespondences) ? pImageLoader.get() : 0);
switch(PARAMS.EvaluationFunction)
{
case grc::Enums::eSSDEvaluation:
evaluationFunction = new grc::SumSquaredErrorEvaluationFunction;
break;
}
if(evaluationFunction == 0 && PARAMS.EvaluationFunction != grc::Enums::eNoEvaluation)
throw new grc::GRCException("main: No evaluation function initialised");
CvSize mosaicSize = cvSize(PARAMS.MosaicX,PARAMS.MosaicY);
grc::TransformEngine engine(transformEstimator, PARAMS.MaxFrames, PARAMS.IncrementalRendering ? 1 : 0, PARAMS.LM * PARAMS.LMIterations,
mosaicSize, PARAMS.SkipFrames ? grc::Enums::eChooseSequentialSkip : grc::Enums::eChooseSequential, PARAMS.FullFrameUpdateFreq, PARAMS.MaxSearchForTransform);
switch(PARAMS.RendererType)
{
case grc::Enums::eBasicRenderer:
mosaicRenderer = new grc::Renderer(imSource, engine, mosaicSize, evaluationFunction);
break;
case grc::Enums::eFeatheredRenderer:
mosaicRenderer = new grc::FeatheredRenderer(imSource, engine, mosaicSize, PARAMS.FeatherRadius, evaluationFunction);
break;
case grc::Enums::eMultiScaleRenderer:
mosaicRenderer = new grc::MultiScaleFeatheredRenderer(imSource, engine, mosaicSize, PARAMS.FeatherRadius, evaluationFunction);
break;
case grc::Enums::eDijkstraRenderer:
mosaicRenderer = new grc::DijkstraCutRenderer(imSource, engine, mosaicSize, PARAMS.DijkstraScale, evaluationFunction);
break;
}
if(mosaicRenderer == 0)
throw new grc::GRCException("main: No renderer initialised");
//This ensures images are captured by this thread:
CStopWatch s;
s.startTimer();
imSource.doCaptureImages(&engine, mosaicRenderer);
s.stopTimer();
std::cout << s.getElapsedTime() << " seconds total" << endl;
//PARAMS.printUseSummary();
//PARAMS.printCfgFile();
}
catch(grc::GRCException * pEx)
{
std::cout << "ERROR: Unhandled exception: " << *(pEx->GetErrorMessage()) << std::endl << "Exiting...";
cvDestroyAllWindows();
#ifndef __GNUC__
Sleep(5000);
#endif
delete pEx;
}
//delete featureDetector;
delete mosaicRenderer;
delete evaluationFunction;
}
示例11: comm_request_cs
int comm_request_cs( unsigned int group ) {
int error = 0;
// Check to see if the reply is for a group that exists if not skip
group_map_t::iterator group_it = comm_groups.find( group );
if ( group_it == comm_groups.end() ) {
cout << "comm_request_cs Cannot request critical section for group " << group << endl;
error = -1;
return error;
}
// Request critical section for specified group. Record stats for waiting time
cs_entry_t req;
req.seq_num = 0;
req.wait_time = 0;
req.msg_count = 0;
CStopWatch timer;
msg_t outgoing;
outgoing.send_ID = NODE_INFO.id;
outgoing.dest_ID = 0;
outgoing.ts = comm_seq_num.num;
outgoing.group_ID = group;
outgoing.msg_type = MSG_T_REQUEST_CS;
timer.startTimer();
// Lock cs entries so they don't change while reading
// comm_seq_num.mutex.lock();
// group_it->second.cs.mutex.lock();
SHARED_VARS.lock();
group_it->second.cs.requesting = true;
// Update and record sequence numbers
comm_seq_num.num = comm_seq_num.max_num + 1;
outgoing.ts = comm_seq_num.num;
req.seq_num = comm_seq_num.num;
for ( map<unsigned int, critical_section_t>::iterator it = group_it->second.cs.cs_map.begin();
it != group_it->second.cs.cs_map.end();
it++ ) {
if ( !(it->second.mutex_token) ) {
req.msg_count++;
outgoing.dest_ID = it->first;
comm_send( outgoing );
}
}
// Total messages exchanged is always 1 sent + 1 recv = 2*sent
req.msg_count *= 2;
// group_it->second.cs.mutex.unlock();
// comm_seq_num.mutex.unlock();
SHARED_VARS.unlock();
// If no requests were made, do not wait on any replies
if ( req.msg_count > 0 ) {
group_it->second.cs.entry_ok.wait( group_it->second.cs.entry_ok_mutex );
}
// Record wait time
timer.stopTimer();
req.wait_time = timer.getElapsedTime();
cout << "cs log entry: " << "seq_num " << req.seq_num
<< " wait_time = " << req.wait_time
<< " msg_count = " << req.msg_count << endl;
comm_cs_log.push( req );
return 0;
}
示例12: main
//.........这里部分代码省略.........
cout << "Receiving data ...\n";
//to stop the application after a specified time, get start time
long startTime = clock();
long endTime = startTime + NUM_SECONDS_RUNNING * CLOCKS_PER_SEC;
//this is the data processing thread; data received from the devices will be written out to a file here
//while (clock() < endTime)
timer.startTimer();
while (!_kbhit())
{
//to release CPU resources wait until the acquisition thread tells that new data has been received and is available in the buffer.
WaitForSingleObject(_newDataAvailable.m_hObject, 1000);
while (_buffer.GetSize() >= numScans * numChannels)
{
//read data from the application buffer and stop application if buffer overrun
if (!ReadData(receivedData, numScans, &errorCode, &errorMessage))
{
if (errorCode == 2)
break;
else
throw errorMessage;
}
timer.stopTimer();
//cout<<"t: "<<setprecision(12)<<timer.getElapsedTime()<<endl;
//stringstream ss;
//ss<<setprecision(9)<<timer.getElapsedTime()<<" ";
//for (size_t i=0; i<numScans * numChannels; i++) {
// ss<<receivedData[i]<<" ";
//}
//zmq::message_t message(ss.str().size()+1);
zmq::message_t message(sizeof(double) + sizeof(float)* numScans * numChannels);
//memcpy(message.data(), ss.str().c_str(), ss.str().size()+1);
double ct = timer.getElapsedTime();
memcpy(message.data(), &ct, sizeof(double));
memcpy((char *) (message.data())+sizeof(double), receivedData, sizeof(float)* numScans * numChannels);
publisher.send(message);
//timer.startTimer();
//cout<<"m:"<<ss.str().c_str()<<endl;
//write data to file
//outputFile.Write(receivedData, numScans * numChannels * sizeof(float));
//cout<<"d1: "<<receivedData[64]<<endl;
//cout<<sizeof(float)<<endl;
}
}
}
catch (string& exception)
{
//an exception occured during data acquisition
cout << "\t" << exception << "\n";
//continue execution in every case to clean up allocated resources (since no finally-block exists)
}
//
//in every case, stop data acquisition and clean up allocated resources
//since no finally statement exists in c++ and we can't mix it with the C __finally statement, the clean-up code follows the try-catch block.
//
//stop data acquisition
StopAcquisition();
//close output file
//outputFile.Close();
}
catch (string& exception)
{
//an exception occured
cout << "\t" << exception << "\n";
//continue execution in every case to clean up allocated resources (since no finally-block exists)
}
cout << "Closing device...\n";
//close device
if (!GT_CloseDevice(&_hDevice))
cout << "Error on GT_CloseDevice: couldn't close device" << GetDeviceErrorMessage() << "\n";
//free allocated resources
delete [] receivedData;
cout << "Clean up complete. Bye bye!" << "\n\n";
}
catch (string& exception)
{
//in case an error occured during opening and initialization, report it and stop execution
cout << "\t" << exception << "\n\n";
}
publisher.close();
cout << "Press ENTER to exit...";
getchar();
}
示例13: testLimitedClasses_SeparateTrainingAndTestDataset
void testLimitedClasses_SeparateTrainingAndTestDataset(bool useLiveSpiking) {
CStopWatch timer;
createDirIfNotExists(OUTPUT_DIR);
if (promptYN("Clear down output directory (" + toString(OUTPUT_DIR) + ") ?")) clearDirectory(OUTPUT_DIR);
vector<int> requiredClasses;
addAllToVector(requiredClasses,"0 1 2 3 4 5 6 7 8 9");
//vector<UINT> observationsPerClass;
//addAllToVector(observationsPerClass,"500");
vector<UINT> observationsExposureMs;
addAllToVector(observationsExposureMs,"120");
vector<UINT> vrCount;
addAllToVector(vrCount,"100 200 300 400 500 600");
vector<UINT> clusterSizes;
addAllToVector(clusterSizes,"20");
for (UINT cl = 0; cl < clusterSizes.size(); ++cl) {
g_clusterSize = clusterSizes[cl];
for (UINT j = 0; j < observationsExposureMs.size(); ++j) {
g_ObservationsExposureMs = observationsExposureMs[j];
for (UINT i = 0; i < vrCount.size(); ++i) {
g_NumVR = vrCount[i];
clearDirectory(DATA_CACHE_DIR);
//ensure we work out the sample distances for the set in question
fileDelete(getMaxMinDistanceFilepath(OBSERVATIONS_DATA_DIR,g_ObservationsUsedPerClass * requiredClasses.size()));
string uniqueRunId = generateUniqueRunId();
stringstream outputDir;
outputDir << OUTPUT_DIR << SLASH << uniqueRunId << "[clust" << toString(g_clusterSize) << "][exp" <<g_ObservationsExposureMs << "ms][" << g_NumVR << "VRs]";
createDirIfNotExists(outputDir.str());
stringstream summaryText;
printSeparator();
summaryText << "VRs: " << g_NumVR << endl;
summaryText << "Observations per class: " << g_ObservationsUsedPerClass << endl;
summaryText << "Observation exposure (ms): " << g_ObservationsExposureMs << endl;
summaryText << "Cluster size: " << g_clusterSize << endl;
printSeparator();
string summaryPath = outputDir.str() + SLASH + uniqueRunId + ".txt";
ofstream summary(summaryPath.c_str());
summary << summaryText.str() << endl;
//summary.close();
cout << summaryText.str() << endl;
cout << (useLiveSpiking?"Using LIVE spiking":"Using spike source files""Using spike source files") << " for input." << endl;
//load all the training observation data and class labelling
string dataPath = OBSERVATIONS_DATA_DIR + SLASH + FILENAME_ALL_SAMPLES_TRAINING;
float * fullObservationsData = loadMNISTdata(dataPath,TRAINING_DATASET_SIZE); //NB: this is going on the heap
vector<int> classLabels;
string labelspath = OBSERVATIONS_DATA_DIR + SLASH + FILENAME_CLASS_LABELS_TRAINING;
loadMNISTclassLabels(labelspath,TRAINING_DATASET_SIZE,classLabels);
vector<int> observationsTrainingSet;
//getFirstNInstancesPerClass(g_ObservationsUsedPerClass,classLabels,requiredClasses,observationsTrainingSet);
getFirstNPerClassInOrder(g_ObservationsUsedPerClass,classLabels,requiredClasses,observationsTrainingSet);
string vrFilename = toString(requiredClasses,"-") + "_" + toString(g_NumVR) + "VRs_" + toString(g_ObservationsUsedPerClass) + "ObsPerClass.dat";
string vrPath = OBSERVATIONS_DATA_DIR + SLASH + vrFilename;
if (!fileExists(vrPath) || !USE_VR_CACHE) {
cout << endl << "Creating custom VR file " << vrFilename << endl;
//create and save a VR set from the current training set only, using SOM/neural gas
string trainingDataFilename = "tmpTrainingDataset.csv";
createObservationsDataFile(observationsTrainingSet, fullObservationsData,OBSERVATIONS_DATA_DIR, trainingDataFilename);
runStdNeuralGas(OBSERVATIONS_DATA_DIR, trainingDataFilename, vrFilename, COMMA, g_NumVR, NUM_EPOCHS);
} else {
cout << endl << "Located compatible VR file " << vrPath << endl;
}
//run training / learning
timer.startTimer();
presentObservationsToModel(useLiveSpiking, true,observationsTrainingSet,classLabels,vrPath, fullObservationsData,outputDir.str());
timer.stopTimer();
float realTimeElapsed = timer.getElapsedTime();
summary << "Training time (sec): " << realTimeElapsed << endl;
delete[] fullObservationsData;
classLabels.clear();
clearDirectory(DATA_CACHE_DIR);//don't want to reuse VR responses as id's will overlap with training set
//run testing
//load all the testing observation data and class labelling
dataPath = OBSERVATIONS_DATA_DIR + SLASH + FILENAME_ALL_SAMPLES_TESTING;
fullObservationsData = loadMNISTdata(dataPath,TESTING_DATASET_SIZE);
labelspath = OBSERVATIONS_DATA_DIR + SLASH + FILENAME_CLASS_LABELS_TESTING;
loadMNISTclassLabels(labelspath,TESTING_DATASET_SIZE,classLabels);
//.........这里部分代码省略.........
示例14: buildRunSpyNNakerLiveSpikeInjectionModel
bool buildRunSpyNNakerLiveSpikeInjectionModel(
bool learning, int numVR, int numClasses,
float * vrRateCodes, float * classActivationRateCodes,
int numObservations, int observationExposureTimeMs,
string outputDir,int clusterSize,
vector<int> & classifierDecision) {
//checkContents("vrRateCodes",vrRateCodes,numObservations * numVR,numVR,data_type_float,2);
bool boardPresent = ping(SPINNAKER_BOARD_IP_ADDR);
if (!boardPresent) {
cerr << "Nothing appears to be connected on specified IP address of " << SPINNAKER_BOARD_IP_ADDR << endl;
exit(-1);
}
//Build and kick off PyNN model running on spinnaker board
vector<string> args;
args.push_back((learning?"True":"False"));
args.push_back(toString(numVR));
args.push_back(toString(numClasses));
args.push_back(toString(RN_FIRST_SPIKE_INJECTION_PORT)); //the (first) port where the spinnkaer RN poluation will listen for UDP spikes
args.push_back(toString(RN_SPIKE_INJECTION_POP_LABEL)); //the name to be used for this pop. Needs to be matched as it is used to extract neuron id's from database
args.push_back(toString(CLASS_ACTIVATION_SPIKE_INJECTION_PORT)); //the port where the spinnaker class activcation poluation will listen for UDP spikes during learning
args.push_back(toString(CLASS_ACTIVATION_SPIKE_INJECTION_POP_LABEL));
args.push_back(toString(numObservations));
args.push_back(toString(observationExposureTimeMs));
args.push_back(outputDir);
args.push_back(toString(clusterSize));
args.push_back(toString(PYNN_MODEL_EXTRA_STARTUP_WAIT_SECS));//start up wait should be added on to the run time to avoid finishing too early while spikes are still being sent
//wait 1 second before running asychronously to give spike sender chance to start up and invoke handshake
launchPythonScript(PYTHON_DIR,PYNN_MODEL_SCRIPT_LIVE_SPIKING, args, 1, 0, true, PYTHON_USE_PROFILER);
//setup up holder for population injections
vector<spinnio::SpikeInjectionPopulation*> sendPopulations;
vector<spinnio::SpikeReceiverPopulation*> receivePopulations;
//we may have to set up more than one sender popn as spinnaker seems to have a size limit
vector<int>senderPopnSizes;
separateAcross(numVR,MAX_SIZE_SPIKE_INJECTION_POP,senderPopnSizes);
cout << senderPopnSizes.size() << " spike injection populations will be set up of sizes " << toString(senderPopnSizes,SPACE) << endl;
for (int sendPop = 0; sendPop < senderPopnSizes.size(); ++sendPop) {
string label = RN_SPIKE_INJECTION_POP_LABEL + toString(sendPop);
int port = RN_FIRST_SPIKE_INJECTION_PORT + sendPop;
int size = senderPopnSizes[sendPop];
spinnio::SpikeInjectionPopulation * spikeInjectionPopRN = new spinnio::SpikeInjectionPopulation(port,label, size);
sendPopulations.push_back(spikeInjectionPopRN);
}
spinnio::SpikeInjectionPopulation * spikeInjectionPopClassActivation = NULL;
if (learning) { //send activation also
spikeInjectionPopClassActivation = new spinnio::SpikeInjectionPopulation(CLASS_ACTIVATION_SPIKE_INJECTION_PORT,CLASS_ACTIVATION_SPIKE_INJECTION_POP_LABEL, numClasses);
sendPopulations.push_back(spikeInjectionPopClassActivation);
} else { //testing
//set up a receiver on one shared port for all the AN populations (i.e. one per class)
string spinnReceivePopLabelTemplate = "popClusterAN_";
int hostReceivePort = HOST_RECEIVE_PORT; //port on host where spikes for this pop will get sent
for (unsigned int cls = 0; cls < numClasses; ++cls) {
string label = spinnReceivePopLabelTemplate + toString(cls);
spinnio::SpikeReceiverPopulation * spikeReceivePop = new spinnio::SpikeReceiverPopulation(hostReceivePort,label);
receivePopulations.push_back(spikeReceivePop);
}
}
string dbPath = PYTHON_DIR + toString("/application_generated_data_files/latest/input_output_database.db");
//create controller that will synchronise spike train inputs for both populations at once
spinnio::SpikeInjectionMultiPopulation * spikeInjectionMultiPop = new spinnio::SpikeInjectionMultiPopulation(SPINNAKER_DATABASE_PORT,SPINNAKER_BOARD_IP_ADDR,dbPath,DT,sendPopulations,receivePopulations);
spikeInjectionMultiPop->waitUntilModelReady();//SpiNNaker will send another handshake to the database port whn model starts to run
spikeInjectionMultiPop->endDatabaseCommunication();
sleep(PYNN_MODEL_EXTRA_STARTUP_WAIT_SECS);
vector<int> presentationTimesMs;
CStopWatch timer;
timer.startTimer();
cout << "Sending observations: " << endl;
for (int ob = 0; ob < numObservations; ++ob) {
int offset = 0;
for (int sendPop = 0; sendPop < senderPopnSizes.size(); ++sendPop) {
spinnio::SpikeInjectionPopulation * spikeInjectionPopRN = sendPopulations[sendPop];
int rateCodeIndex = (ob * numVR) + offset;
spikeInjectionPopRN->setSpikeRates(& vrRateCodes[rateCodeIndex]);//point at the block of rate codes for this observation
offset += spikeInjectionPopRN->getTotalNeurons();
}
if (learning) spikeInjectionPopClassActivation->setSpikeRates(& classActivationRateCodes[ob * numClasses]);
//note the elapsed time (relative to the first observation)
timer.stopTimer();
double elapsedTimeMs = 1000.0 * timer.getElapsedTime();
//these will be used as the time boundaries for evaluating the classifier output
//presentationTimesMs.push_back(elapsedTimeMs);
//tweak the time for next observation to keep on track with overall time. This helps with knowing how long the whole run will take. For ten thousand samples, a 1ms error puts it 10 sec away from the expected finish
int expectedElapsedTimeMs = ob * observationExposureTimeMs; //this is where we should be
int tweakMs = elapsedTimeMs - expectedElapsedTimeMs; //generally seems to jitter / fall behind around 1ms for every 200ms
tweakMs = clip(tweakMs,-5,5);//Don't adjust by too much on any one observation.
//.........这里部分代码省略.........
示例15: main
//.........这里部分代码省略.........
char dtstring[40];
int c;
int names_count=0;
int classno=0;
double sample_percent=.1;
double rm=double(RAND_MAX);
int use_number;
int* use_index;
use_index=(int*)malloc(m*sizeof(int));
int tcount=0;
int turbo=0;
float ff_l1_norm=0;
while( 1 ) {
//if (tcount++>4) break;
frame = cvQueryFrame(capture);
if( !frame ) break;
cvCvtColor(frame,outbw,CV_BGR2GRAY);
cvCvtScale(outbw,outg,.0039,0);//scale to 1/255
cvResize(outg,outgs);
x=(float*)outgs->imageData;
#ifdef PROFILE_MODE
//start_time = clock() - start_time;
watch.stopTimer();
sprintf(dtstring,"FPS = %.4f", 1.0 / watch.getElapsedTime());
cvPutText(outgs,dtstring , cvPoint(10, 60), &font, cvScalar(0, 0, 0, 0));
watch.startTimer();
//start_time = clock();
#endif
sprintf(dtstring,"dt = %.8f",dt);
cvPutText(outgs,dtstring , cvPoint(10, 40), &font, cvScalar(0, 0, 0, 0));
cvShowImage("capture", outgs);
rm=sample_percent*((double)RAND_MAX);
use_number=0;
for (ii=0;ii<m;ii++){
if (rand()<rm){
use_index[use_number]=ii;
use_number++;
}
}
//fprintf(stderr,"use_number=%d\n",use_number);
if (turbo<5) {
grasta_step (B,x,w,m,n,dt,rho,20);
}else{
grasta_step_subsample (B,x,w,m,n,dt,rho,40,use_index,use_number);
}
sgemv("N",&m,&n,&one,B,&m,w,&oneinc,&zero,bb,&oneinc);
// TODO examine what this loop is actually checking for -Steve
// Update: looks like checking for changes in the L1 norm
ff_l1_norm=0;
for (ii=0;ii<m;ii++){
ff[ii]=x[ii]-bb[ii];