本文整理汇总了C++中CascadeClassifier类的典型用法代码示例。如果您正苦于以下问题:C++ CascadeClassifier类的具体用法?C++ CascadeClassifier怎么用?C++ CascadeClassifier使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了CascadeClassifier类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main( int argc, const char** argv )
{
// Check if arguments are given correct
if( argc == 1 ) {
cout << "Usage of model detection software: " << endl;
cout << "detect_objects.exe <object_model.xml> <test_images.txt> <detection_result.txt>" << endl;
return 0;
}
// Load the cascade model into the model container
string model_name = argv[1];
if( !cascade.load( model_name ) ) {
cout << "Error loading the trained model from the provided model file! " << endl;
return -1;
};
// Retrieve the filenames of all the test images
string test_images = argv[2];
ifstream input (test_images.c_str());
string current_line;
vector<string> filenames;
while ( getline(input, current_line) ) {
vector<string> line_elements;
stringstream temp (current_line);
string first_element;
getline(temp, first_element, ' ');
filenames.push_back(first_element);
}
int number_input_samples = filenames.size();
input.close();
// Create an output file for storing detections
string location_output = argv[3];
ofstream output_file (location_output.c_str());
// Loop over each image in the test image sequence and perform detection
for(int i = 0; i < filenames.size(); i++) {
// Read in the first image
Mat current_frame = imread(filenames[i]);
// ------------------------------------------------------------------------
// PERFORM THE ACTUAL DETECTION
// ------------------------------------------------------------------------
// Specific variables for the actual detection phase
vector<Rect> objects;
Mat frame_gray;
// Convert the input frame to grayscale image and apply lightning normalization using histogram equilization
cvtColor( current_frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
// Detect object in a given image
// Parameters should be checked at : http://docs.opencv.org/modules/objdetect/doc/cascade_classification.html?highlight=detectmultiscale#void%20CascadeClassifier::detectMultiScale%28const%20Mat&%20image,%20vector%3CRect%3E&%20objects,%20double%20scaleFactor,%20int%20minNeighbors,%20int%20flags,%20Size%20minSize,%20Size%20maxSize%29
// Below command would detect and visualize all single detections
//cascade.detectMultiScale( frame_gray, objects, 1.05, 0, 0);
// Below command would detect and visualize all detections that have 5 or more matching overlaps
cascade.detectMultiScale( frame_gray, objects, 1.05, 5, 0);
// Below command would detect and visualize all detections that have 5 or more matching overlaps and specify a maximum and minimal object size
//cascade.detectMultiScale( interested_region, objects, 1.05, 5, 0, Size(10, 25), Size(100, 250));
// ------------------------------------------------------------------------
// VISUALIZE THE ACTUAL DETECTION
// ------------------------------------------------------------------------
// Visualize detections on the input frame and show in the given window
for( int j = 0; j < objects.size(); j++ )
{
// Use a rectangle representation on the frame
// Frame width 3 pixels in color red (BGR format)
rectangle(current_frame, Point(objects[j].x, objects[j].y), Point(objects[j].x + objects[j].width, objects[j].y + objects[j].height), Scalar(0, 0, 255), 1);
}
// Show the result
imshow( window_name, current_frame );
waitKey(0);
// ------------------------------------------------------------------------
// SAVE THE DETECTION RESULTS
// Universal format
// filename #detections x1 y1 w1 h1 x2 y2 w2 h2 ... xN yN wN hN
// ------------------------------------------------------------------------
output_file << filenames[i];
output_file << " " << objects.size();
for(int i = 0; i < objects.size(); i++) {
output_file << " " << objects[i].x << " " << objects[i].y << " " << objects[i].width << " " << objects[i].height;
}
output_file << endl;
}
output_file.close();
return 0;
}
示例2: detectAndDraw
void detectAndDraw( Mat& img, CascadeClassifier& cascade,
CascadeClassifier& nestedCascade,
double scale, bool tryflip )
{
int i = 0;
double t = 0;
vector<Rect> faces, faces2;
const static Scalar colors[] = { CV_RGB(0,0,255),
CV_RGB(0,128,255),
CV_RGB(0,255,255),
CV_RGB(0,255,0),
CV_RGB(255,128,0),
CV_RGB(255,255,0),
CV_RGB(255,0,0),
CV_RGB(255,0,255)} ;
Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
cvtColor( img, gray, COLOR_BGR2GRAY );
resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
equalizeHist( smallImg, smallImg );
t = (double)cvGetTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
// |CASCADE_FIND_BIGGEST_OBJECT
// |CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE
,
Size(30, 30) );
if( tryflip )
{
flip(smallImg, smallImg, 1);
cascade.detectMultiScale( smallImg, faces2,
1.1, 2, 0
// |CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE
,
Size(30, 30) );
for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); r++ )
{
faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
}
}
t = (double)cvGetTickCount() - t;
printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
{
Mat smallImgROI;
vector<Rect> nestedObjects;
Point center;
Scalar color = colors[i%8];
int radius;
double aspect_ratio = (double)r->width/r->height;
if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
{
Mat newImage;
img.copyTo(newImage);
center.x = cvRound((r->x + r->width*0.5)*scale);
center.y = cvRound((r->y + r->height*0.5)*scale);
radius = cvRound((r->width + r->height)*0.25*scale);
//circle( img, center, radius, color, 3, 8, 0 );
radius=radius*0.8;
int hf=0.2*radius;
printf("size=%d\n",radius*2);
Rect roi(center.x-radius,center.y-radius,2*radius,2*radius+hf);
int se=20;
Rect roi_out(center.x-radius-se,center.y-radius-se,2*radius+2*se,2*radius+2*se+hf);
Rect roi_in(center.x-radius+se,center.y-radius+se,2*radius-2*se,2*radius-2*se);
Mat mask_smooth=Mat::zeros(img.rows,img.cols,CV_8U);
mask_smooth(roi_out)=255;
mask_smooth(roi_in)=0;
Mat image_roi=img(roi);
Mat mask=Mat::zeros(img.rows,img.cols,CV_8U);
mask(roi)=1;
Scalar avg_pic_intensity=mean(image_roi);
newImage(Rect(0,0,img.cols,img.rows))=avg_pic_intensity;
img.copyTo(newImage,mask);
Mat newImageBlurred;
blur(newImage,newImageBlurred,Size(20,20));
blur(mask_smooth,mask_smooth,Size(20,20));
newImageBlurred.copyTo(newImage,mask_smooth);
//newImage(roi)=image_roi;
imshow("blurred",newImageBlurred);
imwrite("OutputImage.jpg",newImage);
imshow("new",newImage);
imshow("mask",mask_smooth);
waitKey(0);
std::cout<<avg_pic_intensity;
/*for(int i=img.;i<img.rows;i++)
{
for(int j=0;j<img.cols;j++)
{
}
}*/
//.........这里部分代码省略.........
示例3: main
int main (int argc, char** argv) {
Aria::init();
ArRobot robot;
ArSonarDevice sonar;
ArArgumentParser parser(&argc, argv);
parser.loadDefaultArguments();
ArRobotConnector robotConnector(&parser, &robot);
if (!robotConnector.connectRobot()) {
ArLog::log(ArLog::Terse, "Could not connect to the robot.");
if(parser.checkHelpAndWarnUnparsed())
{
Aria::logOptions();
Aria::exit(1);
return 1;
}
}
ArSonarDevice sonarDev;
ArPose* poseList = readPostitions("positions.txt");
robot.runAsync(true);
robot.enableMotors();
robot.moveTo(ArPose(0,0,0));
robot.comInt(ArCommands::ENABLE, 1);
robot.addRangeDevice(&sonarDev);
ArActionGoto gotoPoseAction("goto", ArPose(0, 0, 0), 200);
ArActionAvoidFront avoidFront("avoid front");
ArActionStallRecover stallRecover("stall recover");
robot.addAction(&gotoPoseAction, 50);
robot.addAction(&avoidFront, 60);
robot.moveTo(ArPose(0,0,0));
int length = ARRAY_SIZE(poseList);
cout<<"do dai"<<length;
ArServerBase server;
ArServerSimpleOpener simpleOpener(&parser);
char fileDir[1024];
ArUtil::addDirectories(fileDir, sizeof(fileDir), Aria::getDirectory(),
"ArNetworking/examples");
// first open the server up
if (!simpleOpener.open(&server, fileDir, 240))
{
if (simpleOpener.wasUserFileBad())
printf("Bad user/password/permissions file\n");
else
printf("Could not open server port\n");
exit(1);
}
ArServerInfoRobot serverInfo(&server, &robot);
GotoGoal gotoGoal(&robot, &sonar, &server, &serverInfo);
gotoGoal.init(argc, argv);
float angle = 0;
VideoCapture cap;
cap.open(0);
Rect trackWindow;
//var check find ball
bool checkObject = false;
int hsize = 16;
namedWindow( "threshold", 0 );
namedWindow( "trackbar", 0 );
namedWindow( "Histogram", 0 );
namedWindow( "main", 0 );
createTrackbar( "Vmin", "trackbar", &vmin, 256, 0 );
createTrackbar( "Vmax", "trackbar", &vmax, 256, 0 );
createTrackbar( "Smin", "trackbar", &smin, 256, 0 );
CascadeClassifier c;
c.load("cascade.xml");
Mat frame, hsv, hue, mask, hist, histimg = Mat::zeros(200, 320, CV_8UC3), backproj;
float vel = 0;
int i = 0;
while(1)
{
cap >> frame;
if( frame.empty() ){
cout<<"error camera"<<endl;
break;
}
frame.copyTo(image);
cvtColor(image, hsv, COLOR_BGR2HSV);
int _vmin = vmin, _vmax = vmax;
inRange(hsv, Scalar(0, smin, MIN(_vmin,_vmax)), Scalar(180, 256, MAX(_vmin, _vmax)), mask);
gotoPoseAction.setGoal(poseList[i]);
while (!gotoPoseAction.haveAchievedGoal())
{
ArLog::log(ArLog::Normal, "goal(%.2f, %0.2f) x = %.2f, y = %.2f", poseList[i].getX(), poseList[i].getY(), robot.getX(), robot.getY());
// if (!checkObject)
checkObject = detect(frame, c);
if (checkObject)
cout <<"Phat hien doi tuong"<<endl;
else
cout <<"Khong phat hien doi tuong"<<endl;
if (checkObject) {
if(trackObject(hsv, mask)) {
float d = distance();
if (d < 250) {
gotoGoal.move(-200);
} else if ( d >= 250 && d <= 300) {
gotoGoal.stop();
}
//.........这里部分代码省略.........
示例4: detectAndDraw
Mat detectAndDraw( Mat& img, CascadeClassifier& cascade,
CascadeClassifier& nestedCascade,
double scale, bool tryflip )
{
Mat img2;
Mat newROI;
//Converson to rgchromaticity space
img.copyTo(img2);
Mat M;
img.copyTo(M);
//for( int i = 0; i < img.rows; i++ ) {
// const int* ptr = img2.ptr<int>(i);
// float* dptr = M.ptr<float>(i);
// for( int j = 0; j < img.cols; j++ ) {
// dptr[3*j] = ptr[3*j+2]*1.0 /(ptr[3*j+0] + ptr[3*j+1] + ptr[3*j+2]);
// dptr[3*j+1] = ptr[3*j+1]*1.0 /(ptr[3*j+0] + ptr[3*j+1] + ptr[3*j+2]);
// dptr[3*j+2] = (ptr[3*j+0] + ptr[3*j+1] + ptr[3*j+2]);
// }
//}
//cvtColor(img,img2,COLOR_BGR2GRAY);
int i = 0;
double t = 0;
vector<Rect> faces, faces2;
const static Scalar colors[] = { CV_RGB(0,0,255),
CV_RGB(0,128,255),
CV_RGB(0,255,255),
CV_RGB(0,255,0),
CV_RGB(255,128,0),
CV_RGB(255,255,0),
CV_RGB(255,0,0),
CV_RGB(255,0,255)} ;
Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );
cvtColor( img, gray, COLOR_BGR2GRAY );
resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
equalizeHist( smallImg, smallImg );
t = (double)cvGetTickCount();
cascade.detectMultiScale( smallImg, faces,
1.1, 2, 0
|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE
,
Size(30, 30) );
if( tryflip )
{
flip(smallImg, smallImg, 1);
cascade.detectMultiScale( smallImg, faces2,
1.1, 2, 0
|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
|CASCADE_SCALE_IMAGE
,
Size(30, 30) );
for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); r++ )
{
faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
}
}
t = (double)cvGetTickCount() - t;
printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
{
Mat smallImgROI;
vector<Rect> nestedObjects;
Point center;
Point faceCenter;
Scalar color = colors[i%8];
int radius;
double aspect_ratio = (double)r->width/r->height;
if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
{
center.x = cvRound((r->x + r->width*0.5)*scale);
center.y = cvRound((r->y + r->height*0.5)*scale);
faceCenter = center;
radius = cvRound((r->width + r->height)*0.25*scale);
circle( img, center, radius, color, 3, 8, 0 );
}
else
rectangle( img, cvPoint(cvRound(r->x*scale), cvRound(r->y*scale)),
cvPoint(cvRound((r->x + r->width-1)*scale), cvRound((r->y + r->height-1)*scale)),
color, 3, 8, 0);
if( nestedCascade.empty() )
continue;
smallImgROI = smallImg(*r);
nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
1.1, 2, 0
//|CASCADE_FIND_BIGGEST_OBJECT
//|CASCADE_DO_ROUGH_SEARCH
//|CASCADE_DO_CANNY_PRUNING
|CASCADE_SCALE_IMAGE,
Size(30, 30) );
int nestedFlag=0;
Scalar s1,s2;
//.........这里部分代码省略.........
示例5: detect
void detect(Mat& img, CascadeClassifier& cascade,
CascadeClassifier& nestedCascade, double scale) {
int i = 0;
double t = 0;
vector<Rect> faces;
Mat gray, smallImg;
cvtColor(img, gray, CV_BGR2GRAY);
resize(gray, smallImg, Size(), 1 / scale, 1 / scale, INTER_LINEAR);
equalizeHist(smallImg, smallImg);
t = (double) cvGetTickCount();
cascade.detectMultiScale(smallImg, faces, 1.1, 2, 0
//|CV_HAAR_FIND_BIGGEST_OBJECT
//|CV_HAAR_DO_ROUGH_SEARCH
| CV_HAAR_SCALE_IMAGE, Size(30, 30));
t = (double) cvGetTickCount() - t;
// printf("detection time = %g ms\n",
// t / ((double) cvGetTickFrequency() * 1000.));
// Bounding boxes of all eyes found this frame, in full-size camera space
vector<Rect> allEyes;
for (vector<Rect>::const_iterator r = faces.begin(); r != faces.end();
r++, i++) {
Mat smallImgROI;
vector<Rect> nestedObjects;
if (nestedCascade.empty())
continue;
smallImgROI = smallImg(*r);
nestedCascade.detectMultiScale(smallImgROI, nestedObjects, 1.1, 2, 0
//|CV_HAAR_FIND_BIGGEST_OBJECT
//|CV_HAAR_DO_ROUGH_SEARCH
//|CV_HAAR_DO_CANNY_PRUNING
| CV_HAAR_SCALE_IMAGE, Size(30, 30));
for (vector<Rect>::const_iterator e = nestedObjects.begin();
e != nestedObjects.end(); ++e) {
allEyes.push_back((*e + r->tl()) * scale);
}
}
// Find which tracked eyes are closest to the observed eyes this frame
vector<pair<int, int> > matching = assignTracksToEyes(eyeTracks, allEyes);
vector<bool> foundTracks(eyeTracks.size());
vector<bool> foundEyes(allEyes.size());
for (vector<pair<int, int> >::const_iterator trackToEye = matching.begin();
trackToEye != matching.end(); ++trackToEye) {
foundTracks[trackToEye->first] = true;
foundEyes[trackToEye->second] = true;
const Rect& eyeSrc = allEyes[trackToEye->second];
EyeTrack& track = eyeTracks[trackToEye->first];
record(track, img, eyeSrc);
}
// Forget any tracks that haven't been picked up for a while
for (int i = 0; i < eyeTracks.size(); ++i) {
if (!eyeTracks[i].lastSeen.empty() && !foundTracks[i]) {
int framesSinceLastSeen = ++(eyeTracks[i].numFramesSinceLastSeen);
if (framesSinceLastSeen > 5)
eyeTracks[i] = EyeTrack();
}
}
// Put any new eyes into a free slot, if possible
int j = 0;
for (int i = 0; i < allEyes.size(); ++i) {
if (!foundEyes[i]) {
while (j < eyeTracks.size() && !eyeTracks[j].lastSeen.empty())
j++;
if (j == eyeTracks.size())
break;
record(eyeTracks[j], img, allEyes[i]);
}
}
for (vector<EyeTrack>::iterator t = eyeTracks.begin(); t != eyeTracks.end();
++t) {
if (t->frames.size() >= 10) {
t->frames.erase(t->frames.begin(), t->frames.end() - 10);
t->lastSeen.erase(t->lastSeen.begin(), t->lastSeen.end() - 10);
}
}
i = 0;
for (vector<Rect>::const_iterator r = faces.begin(); r != faces.end();
++r, ++i) {
Scalar color = colors[i % 8];
rectangle(img, *r * scale, color, 3);
}
i = 0;
// for(vector<Rect>::const_iterator e = allEyes.begin(); e != allEyes.end(); ++e, ++i) {
// const Scalar& color = colors[i % maxEyes];
// rectangle(img, *e, color, 3);
// }
}
示例6: ncvAssertPrintReturn
ncvAssertPrintReturn(capture.open(camid) != 0, "Can't open source", -1);
}
capture >> frame;
ncvAssertPrintReturn(!frame.empty(), "Empty video source", -1);
frameSize.width = frame.cols;
frameSize.height = frame.rows;
}
NcvBool bUseGPU = true;
NcvBool bLargestObject = false;
NcvBool bFilterRects = true;
NcvBool bHelpScreen = false;
CascadeClassifier classifierOpenCV;
ncvAssertPrintReturn(classifierOpenCV.load(cascadeName) != 0, "Error (in OpenCV) opening classifier", -1);
int devId;
ncvAssertCUDAReturn(cudaGetDevice(&devId), -1);
cudaDeviceProp devProp;
ncvAssertCUDAReturn(cudaGetDeviceProperties(&devProp, devId), -1);
cout << "Using GPU: " << devId << "(" << devProp.name <<
"), arch=" << devProp.major << "." << devProp.minor << endl;
//==============================================================================
//
// Load the classifier from file (assuming its size is about 1 mb)
// using a simple allocator
//
//==============================================================================
示例7: video_buffer_callback
/**
* buffer header callback function for video
*
* @param port Pointer to port from which callback originated
* @param buffer mmal buffer header pointer
*/
static void video_buffer_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
MMAL_BUFFER_HEADER_T *new_buffer;
PORT_USERDATA *pData = (PORT_USERDATA *)port->userdata;
if (pData)
{
if (buffer->length)
{
mmal_buffer_header_mem_lock(buffer);
//
// *** PR : OPEN CV Stuff here !
//
int w=pData->pstate->width; // get image size
int h=pData->pstate->height;
int h4=h/4;
memcpy(py->imageData,buffer->data,w*h); // read Y
if (pData->pstate->graymode==0)
{
memcpy(pu->imageData,buffer->data+w*h,w*h4); // read U
memcpy(pv->imageData,buffer->data+w*h+w*h4,w*h4); // read v
cvResize(pu, pu_big, CV_INTER_NN);
cvResize(pv, pv_big, CV_INTER_NN); //CV_INTER_LINEAR looks better but it's slower
cvMerge(py, pu_big, pv_big, NULL, image);
cvCvtColor(image,dstImage,CV_YCrCb2RGB); // convert in RGB color space (slow)
gray=cvarrToMat(dstImage);
//cvShowImage("camcvWin", dstImage );
}
else
{
// for face reco, we just keep gray channel, py
gray=cvarrToMat(py);
//cvShowImage("camcvWin", py); // display only gray channel
}
////////////////////////////////
// FACE RECOGNITION START HERE
////////////////////////////////
// detect faces
face_cascade.detectMultiScale(gray, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, Size(80,80));
// for each faces founded
for(int i = 0; i < faces.size(); i++)
{
// crop face (pretty easy with opencv, don't you think ?
Rect face_i = faces[i];
//face = gray(face_i);
// resized face and display it
//cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, CV_INTER_NN); //INTER_CUBIC);
// create a rectangle around the face
rectangle(gray, face_i, CV_RGB(255, 255 ,255), 1);
} // end for
/////////////////////////
// END OF FACE RECO
/////////////////////////
// Show the result:
imshow("camcvWin", gray);
key = (char) waitKey(1);
nCount++; // count frames displayed
mmal_buffer_header_mem_unlock(buffer);
}
else vcos_log_error("buffer null");
}
else
{
vcos_log_error("Received a encoder buffer callback with no state");
}
// release buffer back to the pool
mmal_buffer_header_release(buffer);
// and send one back to the port (if still open)
if (port->is_enabled)
{
MMAL_STATUS_T status;
new_buffer = mmal_queue_get(pData->pstate->video_pool->queue);
//.........这里部分代码省略.........
示例8: faceDetect
Mat faceDetect(Mat img) {
std::vector<Rect> faces;
std::vector<Rect> eyes;
bool two_eyes = false;
bool any_eye_detected = false;
//detecting faces
face_cascade.detectMultiScale(img, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE,
Size(30, 30));
if (faces.size() == 0) {
cout << "Try again.. I did not dectected any faces..." << endl;
exit(1);
}
Point p1 = Point(0, 0);
for (size_t i = 0; i < faces.size(); i++) {
// we cannot draw in the image !!! otherwise we will mess the prediction
// rectangle( img, faces[i], Scalar( 255, 100, 0 ), 4, 8, 0 );
Mat frame_gray;
cvtColor(img, frame_gray, CV_BGR2GRAY);
//imwrite("frame_gary.jpg", frame_gray);
// croping only the face in region defined by faces[i]
std::vector<Rect> eyes;
Mat faceROI;
faceROI = frame_gray(faces[i]);
//imwrite("faceROI.jpg", faceROI);
//In each face, detect eyes
eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 3,
0 | CV_HAAR_SCALE_IMAGE, Size(30, 30));
for (size_t j = 0; j < eyes.size(); j++) {
Point center(faces[i].x + eyes[j].x + eyes[j].width * 0.5,
faces[i].y + eyes[j].y + eyes[j].height * 0.5);
// we cannot draw in the image !!! otherwise we will mess the prediction
//int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
//circle( img, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
if (j == 1) {
p1 = center;
two_eyes = true;
} else {
any_eye_detected = true;
}
}
}
cout << "SOME DEBUG" << endl;
cout << "-------------------------" << endl;
cout << "faces detected:" << faces.size() << endl;
for (size_t j = 0; j < eyes.size(); j++) {
cout << j << endl;
cout << "ex: " << eyes[j].x << endl;
cout << "ey: " << eyes[j].y << endl;
cout << "ew: " << eyes[j].width << endl;
cout << "eh: " << eyes[j].height << endl << endl;
}
cout << "x: " << faces[0].x << endl;
cout << "y: " << faces[0].y << endl;
cout << "w: " << faces[0].width << endl;
cout << "h: " << faces[0].height << endl << endl;
Mat imageInRectangle;
imageInRectangle = img(faces[0]);
Size recFaceSize = imageInRectangle.size();
cout << recFaceSize << endl;
// for debug
//imwrite("imageInRectangle2.jpg", imageInRectangle);
int rec_w = 0;
int rec_h = faces[0].height * 0.64;
// checking the (x,y) for cropped rectangle
// based in human anatomy
int px = 0;
int py = 2 * 0.125 * faces[0].height;
Mat cropImage;
cout << "faces[0].x:" << faces[0].x << endl;
p1.x = p1.x - faces[0].x;
cout << "p1.x:" << p1.x << endl;
if (any_eye_detected) {
if (two_eyes) {
cout << "two eyes detected" << endl;
// we have detected two eyes
// we have p1 and p2
// left eye
px = p1.x / 1.35;
} else {
// only one eye was found.. need to check if the
// left or right eye
// we have only p1
//.........这里部分代码省略.........
示例9: detectAndDisplay
/** @function detectAndDisplay */
void detectAndDisplay( Mat frame, int argc, char** argv )
{
std::vector<Rect> faces;
Mat frame_gray;
//ROS initiated
ros::init(argc, argv, "detect_faces");
ros::NodeHandle n;
ros::Publisher face_pub = n.advertise<std_msgs::Int32>("num_faces",1000);
std_msgs::Int32 msg;
cvtColor( frame, frame_gray, CV_BGR2GRAY );
equalizeHist( frame_gray, frame_gray );
//-- Detect faces
face_cascade.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );
if(counter%3 == 0)
{
//ROS initiated
ros::init(argc, argv, "detect_faces");
ros::NodeHandle n;
ros::Publisher face_pub = n.advertise<std_msgs::Int32>("num_faces",1000);
std_msgs::Int32 msg;
//store data to msg and publish it
msg.data = faces.size();
face_pub.publish(msg);
}
if(counter == 0)
{
system("espeak \"Hi Matteo! This is our project.\"");
}
if(counter%20 == 0 && counter !=0)
{
if(faces.size()==0)
{
system("espeak \"No one is here. I'm lonely\"");
}
else if(faces.size()==1)
{
system("espeak \"1 person is here. How can I help?\"");
}
else if(faces.size()==2)
{
system("espeak \"2 people are here. Does anyone need anything\"");
}
else if (faces.size() ==3)
{
system("espeak \"3 people are here. Who needs help first\"");
}
else
{
system("espeak \"So many people! You must really like me\"");
}
}
counter++;
for( size_t i = 0; i < faces.size(); i++ )
{
Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
ellipse( frame, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
Mat faceROI = frame_gray( faces[i] );
std::vector<Rect> eyes;
//-- In each face, detect eyes
eyes_cascade.detectMultiScale( faceROI, eyes, 1.1, 2, 0 |CV_HAAR_SCALE_IMAGE, Size(30, 30) );
for( size_t j = 0; j < eyes.size(); j++ )
{
Point center( faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5 );
int radius = cvRound( (eyes[j].width + eyes[j].height)*0.25 );
circle( frame, center, radius, Scalar( 255, 0, 0 ), 4, 8, 0 );
}
}
//-- Show what you got
imshow( window_name, frame );
}
示例10: video_buffer_callback
/**
* buffer header callback function for video
*
* @param port Pointer to port from which callback originated
* @param buffer mmal buffer header pointer
*/
static void video_buffer_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
MMAL_BUFFER_HEADER_T *new_buffer;
PORT_USERDATA *pData = (PORT_USERDATA *)port->userdata;
if (pData)
{
if (buffer->length)
{
mmal_buffer_header_mem_lock(buffer);
//
// *** PR : OPEN CV Stuff here !
//
int w=pData->pstate->width; // get image size
int h=pData->pstate->height;
int h4=h/4;
memcpy(py->imageData,buffer->data,w*h); // read Y
if (pData->pstate->graymode==0)
{
memcpy(pu->imageData,buffer->data+w*h,w*h4); // read U
memcpy(pv->imageData,buffer->data+w*h+w*h4,w*h4); // read v
cvResize(pu, pu_big, CV_INTER_NN);
cvResize(pv, pv_big, CV_INTER_NN); //CV_INTER_LINEAR looks better but it's slower
cvMerge(py, pu_big, pv_big, NULL, image);
cvCvtColor(image,dstImage,CV_YCrCb2RGB); // convert in RGB color space (slow)
gray=cvarrToMat(dstImage);
//cvShowImage("camcvWin", dstImage );
}
else
{
// for face reco, we just keep gray channel, py
gray=cvarrToMat(py);
//cvShowImage("camcvWin", py); // display only gray channel
}
////////////////////////////////
// FACE RECOGNITION START HERE
////////////////////////////////
// dynamixel ids
int id_x = 9;
int id_y = 11;
// center coordinates of whole picture
int center_x = ( w / 2 );
int center_y = ( h / 2 );
// dead zone - minimal movement, when reached
int x_dead_min = ( center_x - ( w / 10 ) );
int x_dead_max = ( center_x + ( w / 10 ) );
int y_dead_min = ( center_y - ( h / 5 ) );
int y_dead_max = ( center_y + ( h / 20 ) );
/*cv::line(gray, Point(x_dead_min, 0), Point(x_dead_min, h), CV_RGB(255, 255 ,255) );
cv::line(gray, Point(x_dead_max, 0), Point(x_dead_max, h), CV_RGB(255, 255 ,255) );
cv::line(gray, Point(0, y_dead_min), Point(w, y_dead_min), CV_RGB(255, 255 ,255) );
cv::line(gray, Point(0, y_dead_max), Point(w, y_dead_max), CV_RGB(255, 255 ,255) );*/
// detect faces
face_cascade.detectMultiScale(gray, faces, 1.1, 3, CV_HAAR_SCALE_IMAGE, Size(35,35));
// for each faces founded
for(int i = 0; i < faces.size(); i++)
{
// crop face (pretty easy with opencv, don't you think ?
Rect face_i = faces[i];
face = gray(face_i);
// resized face and display it
cv::resize(face, face_resized, Size(im_width, im_height), 1.0, 1.0, CV_INTER_NN); //INTER_CUBIC);
// now, we try to predict who is it ?
char sTmp[256];
double predicted_confidence = 0.0;
int prediction = -1;
model.predict(face_resized,prediction,predicted_confidence);
// create a rectangle around the face
// rectangle(gray, face_i, CV_RGB(255, 255 ,255), 1);
// if good prediction : > threshold
if (predicted_confidence>PREDICTION_TRESHOLD)
{
// trace
// sprintf(sTmp,"+ prediction ok = %s (%d) confiance = (%d)",people[prediction].c_str(),prediction,(int)predicted_confidence);
//trace((string)(sTmp));
//.........这里部分代码省略.........
示例11: main
/**
* main
*/
int main(int argc, const char **argv)
{
/////////////////////////////////
// BEGIN OF FACE RECO INIT
/////////////////////////////////
//
// see thinkrpi.wordpress.com, articles on Magic Mirror to understand this command line and parameters
//
cout<<"start\n";
if ((argc != 4)&&(argc!=3)) {
cout << "usage: " << argv[0] << " ext_files seuil(opt) \n files.ext histo(0/1) 5000 \n" << endl;
exit(1);
}
// set value by default for prediction treshold = minimum value to recognize
if (argc==3) { trace("(init) prediction treeshold = 4500.0 by default");PREDICTION_SEUIL = 4500.0;}
if (argc==4) PREDICTION_SEUIL = atoi(argv[3]);
// do we do a color histogram equalization ?
bHisto=atoi(argv[2]);
// init people, should be do in a config file,
// but I don't have time, I need to go to swimming pool
// with my daughters
// and they prefer to swimm than to see their father do a config file
// life is hard.
people[P_ANON] = "Unknown";
people[P_PHIL] = "Phil";
// init...
// reset counter
for (int i=0;i>MAX_PEOPLE;i++)
{
nPictureById[i]=0;
}
int bFirstDisplay =1;
trace("(init) People initialized");
// Get the path to your CSV
fn_csv = string(argv[1]);
// Note : /!\ change with your opencv path
//fn_haar = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml";
// change fn_harr to be quicker LBP (see article)
fn_haar = "/usr/share/opencv/lbpcascades/lbpcascade_frontalface.xml";
DEBUG cout<<"(OK) csv="<<fn_csv<<"\n";
// Read in the data (fails if no valid input filename is given, but you'll get an error message):
try {
read_csv(fn_csv, images, labels);
DEBUG cout<<"(OK) read CSV ok\n";
}
catch (cv::Exception& e)
{
cerr << "Error opening file \"" << fn_csv << "\". Reason: " << e.msg << endl;
exit(1);
}
// get heigh, witdh of 1st images--> must be the same
im_width = images[0].cols;
im_height = images[0].rows;
trace("(init) taille images ok");
//
// Create a FaceRecognizer and train it on the given images:
//
// this a Eigen model, but you could replace with Fisher model (in this case
// threshold value should be lower) (try)
// Fisherfaces model;
// train the model with your nice collection of pictures
trace("(init) start train images");
model.train(images, labels);
trace("(init) train images : ok");
// load face model
if (!face_cascade.load(fn_haar))
{
cout <<"(E) face cascade model not loaded :"+fn_haar+"\n";
return -1;
}
trace("(init) Load modele : ok");
/////////////////////////////////
// END OF FACE RECO INIT
/////////////////////////////////
// Our main data storage vessel..
RASPIVID_STATE state;
//.........这里部分代码省略.........
示例12: detectAndDisplay
void detectAndDisplay(Mat frame)
{
vector<Rect> faces;
vector<Rect> eyes;
vector<vector<Point> > contours;
vector<Vec4i> hierarchy;
Mat frame_gray;
int thresh = 100;
cvtColor(frame, frame_gray, CV_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(150,150));
for (size_t i = 0; i < faces.size(); i++)
{
Point center(faces[i].x + faces[i].width, faces[i].y + faces[i].height);
circle(frame, center, 1, Scalar(0, 255, 0), 1, 8, 0);
Point x1(faces[i].x , faces[i].y);
circle(frame, x1, 1, Scalar(0, 0, 255), 6, 8, 0);
Point x2(faces[i].x + faces[i].width , faces[i].y + faces[i].height);
// faces[i].x = x1.x;
// faces[i].y = x1.y;
// faces[i].width *= 0.75;
// faces[i].height = faces[i].width*0.5;
circle(frame, x2, 1, Scalar(0, 0, 255), 6, 8, 0);
rectangle(frame, x1, x2, Scalar(255, 0, 255), 2, 8, 0);
Mat faceROI = (frame_gray)(faces[i]);
eyes_cascade.detectMultiScale(faceROI, eyes, 1.1, 2, 0 | CV_HAAR_SCALE_IMAGE, Size(10, 10));
for (size_t j = 0; j < eyes.size(); j++)
{
Point center(faces[i].x + eyes[j].x + eyes[j].width*0.5, faces[i].y + eyes[j].y + eyes[j].height*0.5);
int radius = cvRound((eyes[j].width + eyes[j].height)*0.25);
circle(frame, center, radius, Scalar(255, 0, 0), 1, 8, 0);
circle(frame, center, 1, Scalar(0, 255, 0), 1, 8, 0);
Mat eyesROI = (faceROI)(eyes[i]);
Mat canny_output;
Canny(eyesROI, canny_output, thresh, thresh * 2, 3);
findContours(canny_output, contours, hierarchy, CV_RETR_TREE, CV_CHAIN_APPROX_SIMPLE, Point(0, 0));
Mat::zeros(canny_output.size(), CV_8UC3);
for (unsigned int i = 0; i< contours.size(); i++)
{
drawContours(canny_output, contours, i, Scalar(255, 0, 0), 1, 8, hierarchy, 0, Point());
contours.erase(contours.begin() + i);
}
imshow("countours", canny_output);
}
}
string window_name = "Face and Eyes detection";
flip(frame, frame, 1);
imshow(window_name, frame);
}
示例13: img
static void
kms_nose_detect_process_frame(KmsNoseDetect *nose_detect,int width,int height,double scale_f2n,
double scale_n2o, double scale_o2f,GstClockTime pts)
{
Mat img (nose_detect->priv->img_orig);
vector<Rect> *faces=nose_detect->priv->faces;
vector<Rect> *noses=nose_detect->priv->noses;
vector<Rect> nose;
Scalar color;
Mat gray, nose_frame (cvRound(img.rows/scale_n2o), cvRound(img.cols/scale_n2o), CV_8UC1);
Mat smallImg( cvRound (img.rows/scale_o2f), cvRound(img.cols/scale_o2f), CV_8UC1 );
Mat noseROI;
Rect r_aux;
int i=0,j=0;
const static Scalar colors[] = { CV_RGB(255,0,255),
CV_RGB(255,0,0),
CV_RGB(255,255,0),
CV_RGB(255,128,0),
CV_RGB(0,255,0),
CV_RGB(0,255,255),
CV_RGB(0,128,255),
CV_RGB(0,0,255)} ;
if ( ! __process_alg(nose_detect,pts) && nose_detect->priv->num_frames_to_process <=0)
return;
nose_detect->priv->num_frame++;
if ( (2 == nose_detect->priv->process_x_every_4_frames && // one every 2 images
(1 == nose_detect->priv->num_frame % 2)) ||
( (2 != nose_detect->priv->process_x_every_4_frames) &&
(nose_detect->priv->num_frame <= nose_detect->priv->process_x_every_4_frames)))
{
nose_detect->priv->num_frames_to_process --;
cvtColor( img, gray, CV_BGR2GRAY );
//if detect_event != 0 we have received faces as meta-data
if ( 0 == nose_detect->priv->detect_event)
{
//setting up the image where the face detector will be executed
resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
equalizeHist( smallImg, smallImg );
faces->clear();
fcascade.detectMultiScale(smallImg,*faces,
MULTI_SCALE_FACTOR(nose_detect->priv->scale_factor),2,
0 |CV_HAAR_SCALE_IMAGE,
Size(3,3));
}
//setting up the image e where the nose detector will be executed
resize(gray,nose_frame,nose_frame.size(), 0,0,INTER_LINEAR);
equalizeHist( nose_frame, nose_frame);
noses->clear();
for( vector<Rect>::iterator r = faces->begin(); r != faces->end(); r++,i++ )
{
color = colors[i%8];
const int top_height=cvRound((float)r->height*TOP_PERCENTAGE/100);
const int down_height=cvRound((float)r->height*DOWN_PERCENTAGE/100);
const int side_width=cvRound((float)r->width*SIDE_PERCENTAGE/100);
//Transforming the point detected in face image to nose coordinates
//we only take the down half of the face to avoid excessive processing
r_aux.y=(r->y + top_height)*scale_f2n;
r_aux.x=(r->x+side_width)*scale_f2n;
r_aux.height = (r->height-down_height-top_height)*scale_f2n;
r_aux.width = (r->width-side_width)*scale_f2n;
noseROI = nose_frame(r_aux);
nose.clear();
ncascade.detectMultiScale( noseROI, nose,
NOSE_SCALE_FACTOR, 3,
0|CV_HAAR_FIND_BIGGEST_OBJECT,
Size(1, 1));
for ( vector<Rect>::iterator m = nose.begin(); m != nose.end();m++,j++)
{
Rect m_aux;
m_aux.x=(r_aux.x + m->x)*scale_n2o;
m_aux.y=(r_aux.y + m->y)*scale_n2o;
m_aux.width=(m->width-1)*scale_n2o;
m_aux.height=(m->height-1)*scale_n2o;
noses->push_back(m_aux);
}
}
}
if (GOP == nose_detect->priv->num_frame )
nose_detect->priv->num_frame=0;
//Printing on image
j=0;
if (1 == nose_detect->priv->view_noses )
for ( vector<Rect>::iterator m = noses->begin(); m != noses->end();m++,j++)
{
color = colors[j%8];
cvRectangle( nose_detect->priv->img_orig, cvPoint(m->x,m->y),
//.........这里部分代码省略.........
示例14: main
/*
* To work with Kinect or XtionPRO the user must install OpenNI library and PrimeSensorModule for OpenNI and
* configure OpenCV with WITH_OPENNI flag is ON (using CMake).
*/
int main( int argc, char* argv[] )
{
time_t start = time(0);
bool isColorizeDisp, isFixedMaxDisp;
int imageMode;
bool retrievedImageFlags[5];
string filename;
bool isVideoReading;
//parseCommandLine( argc, argv, isColorizeDisp, isFixedMaxDisp, imageMode, retrievedImageFlags, filename, isVideoReading );
if (pcl::io::loadPCDFile<pcl::PointXYZ> ("test_pcd.pcd", *cloud_golden) == -1) //* load the file
{
PCL_ERROR ("Couldn't read file test_pcd.pcd \n");
return (-1);
}
std::cout << "Loaded "
<< cloud_golden->width * cloud_golden->height
<< " data points from test_pcd.pcd with the following fields: "
<< std::endl;
//
pcl::copyPointCloud (*cloud_golden, *cloud_transformed);
cout << "Device opening ..." << endl;
cout << CV_CAP_OPENNI <<endl;
VideoCapture capture;
if( isVideoReading )
capture.open( filename );
else
capture.open(CV_CAP_OPENNI);
cout << "done." << endl;
if( !capture.isOpened() )
{
cout << "Can not open a capture object." << endl;
return -1;
}
if( !isVideoReading )
{
bool modeRes=false;
switch ( imageMode )
{
case 0:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ );
break;
case 1:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_15HZ );
break;
case 2:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_SXGA_30HZ );
break;
//The following modes are only supported by the Xtion Pro Live
case 3:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_30HZ );
break;
case 4:
modeRes = capture.set( CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_QVGA_60HZ );
break;
default:
CV_Error( CV_StsBadArg, "Unsupported image mode property.\n");
}
if (!modeRes)
cout << "\nThis image mode is not supported by the device, the default value (CV_CAP_OPENNI_SXGA_15HZ) will be used.\n" << endl;
}
if(capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) == 0) capture.set(CV_CAP_PROP_OPENNI_REGISTRATION,1);
// Print some avalible device settings.
cout << "\nDepth generator output mode:" << endl <<
"FRAME_WIDTH " << capture.get( CV_CAP_PROP_FRAME_WIDTH ) << endl <<
"FRAME_HEIGHT " << capture.get( CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
"FRAME_MAX_DEPTH " << capture.get( CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH ) << " mm" << endl <<
"FPS " << capture.get( CV_CAP_PROP_FPS ) << endl <<
"REGISTRATION " << capture.get( CV_CAP_PROP_OPENNI_REGISTRATION ) << endl;
if( capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT ) )
{
cout <<
"\nImage generator output mode:" << endl <<
"FRAME_WIDTH " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_WIDTH ) << endl <<
"FRAME_HEIGHT " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FRAME_HEIGHT ) << endl <<
"FPS " << capture.get( CV_CAP_OPENNI_IMAGE_GENERATOR+CV_CAP_PROP_FPS ) << endl;
}
else
{
cout << "\nDevice doesn't contain image generator." << endl;
if (!retrievedImageFlags[0] && !retrievedImageFlags[1] && !retrievedImageFlags[2])
return 0;
}
if( !face_cascade.load( cascade_name[0] ) )
{
printf("--(!)Error loading\n"); return -1;
};
if( !eyes_cascade.load( cascade_name[1] ) )
{
printf("--(!)Error loading\n"); return -1;
};
//printf("Entering for\n");
//.........这里部分代码省略.........
示例15: detectAndDisplay
// Function detectAndDisplay
int detectAndDisplay(Mat frame)
{
std::vector<Rect> faces;
Mat frame_gray;
Mat crop;
Mat res;
Mat gray;
string text;
stringstream sstm;
cvtColor(frame, frame_gray, COLOR_BGR2GRAY);
equalizeHist(frame_gray, frame_gray);
// Detect faces
face_cascade.detectMultiScale(frame_gray, faces, 1.1, 2, 0 | CASCADE_SCALE_IMAGE, Size(30, 30));
// Set Region of Interest
cv::Rect roi_b;
cv::Rect roi_c;
size_t ic = 0; // ic is index of current element
int ac = 0; // ac is area of current element
size_t ib = 0; // ib is index of biggest element
int ab = 0; // ab is area of biggest element
for (ic = 0; ic < faces.size(); ic++) // Iterate through all current elements (detected faces)
{
roi_c.x = faces[ic].x;
roi_c.y = faces[ic].y;
roi_c.width = (faces[ic].width);
roi_c.height = (faces[ic].height);
ac = roi_c.width * roi_c.height; // Get the area of current element (detected face)
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
ab = roi_b.width * roi_b.height; // Get the area of biggest element, at beginning it is same as "current" element
if (ac > ab)
{
ib = ic;
roi_b.x = faces[ib].x;
roi_b.y = faces[ib].y;
roi_b.width = (faces[ib].width);
roi_b.height = (faces[ib].height);
}
crop = frame(roi_b);
resize(crop, res, Size(128, 128), 0, 0, INTER_LINEAR); // This will be needed later while saving images
cvtColor(crop, gray, CV_BGR2GRAY); // Convert cropped image to Grayscale
Point pt1(faces[ic].x, faces[ic].y); // Display detected faces on main window - live stream from camera
Point pt2((faces[ic].x + faces[ic].height), (faces[ic].y + faces[ic].width));
rectangle(frame, pt1, pt2, Scalar(0, 255, 0), 2, 8, 0);
}
if(!crop.empty())
return 1;
else
return 0;
}