本文整理汇总了C++中cv::CascadeClassifier::load方法的典型用法代码示例。如果您正苦于以下问题:C++ CascadeClassifier::load方法的具体用法?C++ CascadeClassifier::load怎么用?C++ CascadeClassifier::load使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::CascadeClassifier
的用法示例。
在下文中一共展示了CascadeClassifier::load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: onInit
virtual void onInit()
{
nh_ = getNodeHandle();
it_ = boost::shared_ptr<image_transport::ImageTransport>(new image_transport::ImageTransport(nh_));
local_nh_ = ros::NodeHandle("~");
local_nh_.param("debug_view", debug_view_, false);
subscriber_count_ = 0;
prev_stamp_ = ros::Time(0, 0);
image_transport::SubscriberStatusCallback img_connect_cb = boost::bind(&FaceDetectionNodelet::img_connectCb, this, _1);
image_transport::SubscriberStatusCallback img_disconnect_cb = boost::bind(&FaceDetectionNodelet::img_disconnectCb, this, _1);
ros::SubscriberStatusCallback msg_connect_cb = boost::bind(&FaceDetectionNodelet::msg_connectCb, this, _1);
ros::SubscriberStatusCallback msg_disconnect_cb = boost::bind(&FaceDetectionNodelet::msg_disconnectCb, this, _1);
img_pub_ = image_transport::ImageTransport(local_nh_).advertise("image", 1, img_connect_cb, img_disconnect_cb);
msg_pub_ = local_nh_.advertise<opencv_apps::FaceArrayStamped>("faces", 1, msg_connect_cb, msg_disconnect_cb);
if( debug_view_ ) {
subscriber_count_++;
}
std::string face_cascade_name, eyes_cascade_name;
local_nh_.param("face_cascade_name", face_cascade_name, std::string("/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"));
local_nh_.param("eyes_cascade_name", eyes_cascade_name, std::string("/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml"));
if( !face_cascade_.load( face_cascade_name ) ){ NODELET_ERROR("--Error loading %s", face_cascade_name.c_str()); };
if( !eyes_cascade_.load( eyes_cascade_name ) ){ NODELET_ERROR("--Error loading %s", eyes_cascade_name.c_str()); };
dynamic_reconfigure::Server<face_detection::FaceDetectionConfig>::CallbackType f =
boost::bind(&FaceDetectionNodelet::reconfigureCallback, this, _1, _2);
srv.setCallback(f);
}
示例2: setup
void ocvFaceDetectApp::setup()
{
mFaceCascade.load( getAssetPath( "haarcascade_frontalface_alt.xml" ).string() );
mEyeCascade.load( getAssetPath( "haarcascade_eye.xml" ).string() );
mCapture = Capture( 640, 480 );
mCapture.start();
}
示例3: main
int main(int argc, char** argv)
{
// Load the cascade classifiers
// Make sure you point the XML files to the right path, or
// just copy the files from [OPENCV_DIR]/data/haarcascades directory
face_cascade.load("haarcascade_frontalface_alt2.xml");
eye_cascade.load("haarcascade_eye.xml");
// Open webcam
cv::VideoCapture cap(0);
// Check if everything is ok
if (face_cascade.empty() || eye_cascade.empty() || !cap.isOpened())
return 1;
// Set video to 320x240
cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
cv::Mat frame, eye_tpl;
cv::Rect eye_bb;
while (cv::waitKey(15) != 'q')
{
cap >> frame;
if (frame.empty())
break;
// Flip the frame horizontally, Windows users might need this
cv::flip(frame, frame, 1);
// Convert to grayscale and
// adjust the image contrast using histogram equalization
cv::Mat gray;
cv::cvtColor(frame, gray, CV_BGR2GRAY);
if (eye_bb.width == 0 && eye_bb.height == 0)
{
// Detection stage
// Try to detect the face and the eye of the user
detectEye(gray, eye_tpl, eye_bb);
}
else
{
// Tracking stage with template matching
trackEye(gray, eye_tpl, eye_bb);
// Draw bounding rectangle for the eye
cv::rectangle(frame, eye_bb, CV_RGB(0,255,0));
}
// Display video
cv::imshow("video", frame);
}
return 0;
}
示例4: FaceDetector
FaceDetector(){
if(!face_cascade_.load(face_cascade_name)){
std::cerr << "failed to load : " << face_cascade_name;
}
if(!smile_cascade_.load(smile_cascade_name)){
std::cerr << "failed to load : " << smile_cascade_name;
}
if(!eyes_cascade_.load(eyes_cascade_name)){
std::cerr << "failed to load : " << eyes_cascade_name;
}
}
示例5: setup
void ocvFaceDetectApp::setup()
{
#if defined( CINDER_MAC )
mFaceCascade.load( getResourcePath( "haarcascade_frontalface_alt.xml" ) );
mEyeCascade.load( getResourcePath( "haarcascade_eye.xml" ) );
#else
mFaceCascade.load( getAppPath() + "../../resources/haarcascade_frontalface_alt.xml" );
mEyeCascade.load( getAppPath() + "../../resources/haarcascade_eye.xml" );
#endif
mCapture = Capture( 640, 480 );
mCapture.start();
}
示例6: main
int main( int argc, char** argv )
{
ros::init(argc, argv, "face_det_service");
if( !face_cascade.load( "/opt/ros/hydro/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml" ) )
{
std::cerr << "Error: loading classifier failed" << std::endl;
return -1;
}
ros::NodeHandle n;
// Subscribe input camera images
image_transport::ImageTransport it(n);
image_transport::Subscriber sub = it.subscribe("image_in", 10, imageCallback);
// Create a new service server and register the callback
ros::ServiceServer service = n.advertiseService("detect_faces", detect);
ROS_INFO_STREAM("Face detection service initialized and listening...");
ros::spin();
return 1;
}
示例7: main
int main()
{
if (!faceDetector.load("resources/haarcascade_frontalface_alt_tree.xml")) {
std::cout << "Failed to load classifier" << std::endl;
getchar();
return -1;
}
cv::VideoCapture capture(CV_CAP_ANY); //Capture using any camera connected to your system
if (!capture.isOpened())
return -1;
char key;
cv::namedWindow("Camera_Output", 1); //Create window
while (true){ //Create infinte loop for live streaming
cv::Mat frame;
capture >> frame; //Create image frames from capture
cv::Mat faceDetectedFrame = detectFace(frame);
cv::imshow("Camera_Output", faceDetectedFrame); //Show image frames on created window
key = cv::waitKey(1); //Capture Keyboard stroke
if (char(key) == 27){
break; //If you hit ESC key loop will break.
}
}
cv::destroyAllWindows();
return 0;
}
示例8: main
int main(int arc, char **argv){
std::vector <cv::Mat> images;
std::vector <int> labels;
face_cascade.load("config/haarcascade_frontalface_alt.xml");
read_csv("faces.csv", images, labels);
}
示例9: main
int main( int argc, char** argv )
{
ros::init(argc, argv, "face_det_node");
if( !face_cascade.load( "/opt/ros/hydro/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml" ) )
{
std::cerr << "Error: loading classifier failed" << std::endl;
return -1;
}
ros::NodeHandle n;
// Subscribe input camera images
image_transport::ImageTransport it(n);
image_transport::Subscriber sub = it.subscribe("image_in", 10, imageCallback);
// Advertise detected faces
pub = n.advertise<workshop_msgs::DetectionsStamped>("face_detections_out", 1000);
ROS_INFO_STREAM("Face detector initialized and listening...");
ros::spin();
return 1;
}
示例10: Init
bool MainWindow::Init()
{
if( !face_cascade.load( face_cascade_name) ){
qDebug()<<"级联分类器错误,可能未找到文件,拷贝该文件到工程目录下!";
return false;
}
if(m_str_url.isEmpty())
return false;
//打开视频流
int result=avformat_open_input(&pAVFormatContext, m_str_url.toStdString().c_str(),NULL,NULL);
if (result<0){
qDebug()<<"打开视频流失败";
return false;
}
//获取视频流信息
result=avformat_find_stream_info(pAVFormatContext,NULL);
if (result<0){
qDebug()<<"获取视频流信息失败";
return false;
}
//获取视频流索引
videoStreamIndex = -1;
for (uint i = 0; i < pAVFormatContext->nb_streams; i++) {
if (pAVFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
videoStreamIndex = i;
break;
}
}
if (videoStreamIndex==-1){
qDebug()<<"获取视频流索引失败";
return false;
}
//获取视频流的分辨率大小
pAVCodecContext = pAVFormatContext->streams[videoStreamIndex]->codec;
videoWidth=pAVCodecContext->width;
videoHeight=pAVCodecContext->height;
avpicture_alloc(&pAVPicture,AV_PIX_FMT_RGB24,videoWidth,videoHeight);
AVCodec *pAVCodec;
//获取视频流解码器
pAVCodec = avcodec_find_decoder(pAVCodecContext->codec_id);
pSwsContext = sws_getContext(videoWidth,videoHeight,AV_PIX_FMT_YUV420P,videoWidth,videoHeight,AV_PIX_FMT_RGB24,SWS_BICUBIC,0,0,0);
//打开对应解码器
result=avcodec_open2(pAVCodecContext,pAVCodec,NULL);
if (result<0){
qDebug()<<"打开解码器失败";
return false;
}
qDebug()<<"初始化视频流成功";
return true;
}
示例11: main
/**
* @function main
*/
int main( int argc, const char** argv ) {
CvCapture* capture;
cv::Mat frame;
// Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };
cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
cv::moveWindow(main_window_name, 400, 100);
cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
cv::moveWindow(face_window_name, 10, 100);
cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
cv::moveWindow("Right Eye", 10, 600);
cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
cv::moveWindow("Left Eye", 10, 800);
cv::namedWindow("aa",CV_WINDOW_NORMAL);
cv::moveWindow("aa", 10, 800);
cv::namedWindow("aaa",CV_WINDOW_NORMAL);
cv::moveWindow("aaa", 10, 800);
createCornerKernels();
ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);
// Read the video stream
capture = cvCaptureFromCAM( -1 );
if( capture ) {
while( true ) {
frame = cvQueryFrame( capture );
// mirror it
cv::flip(frame, frame, 1);
frame.copyTo(debugImage);
// Apply the classifier to the frame
if( !frame.empty() ) {
detectAndDisplay( frame );
}
else {
printf(" --(!) No captured frame -- Break!");
break;
}
imshow(main_window_name,debugImage);
int c = cv::waitKey(10);
if( (char)c == 'c' ) { break; }
if( (char)c == 'f' ) {
imwrite("frame.png",frame);
}
}
}
releaseCornerKernels();
return 0;
}
示例12: update
void update(double time,
uint32_t* out,
const uint32_t* in)
{
if (cascade.empty()) {
cv::setNumThreads(cvRound(threads * 100));
if (classifier.length() > 0) {
if (!cascade.load(classifier.c_str()))
fprintf(stderr, "ERROR: Could not load classifier cascade %s\n", classifier.c_str());
}
else {
memcpy(out, in, size * 4);
return;
}
}
// sanitize parameters
search_scale = CLAMP(search_scale, 0.11, 1.0);
neighbors = CLAMP(neighbors, 0.01, 1.0);
// copy input image to OpenCV
image = cv::Mat(height, width, CV_8UC4, (void*)in);
// only re-detect periodically to control performance and reduce shape jitter
int recheckInt = abs(cvRound(recheck * 1000));
if ( recheckInt > 0 && count % recheckInt )
{
// skip detect
count++;
// fprintf(stderr, "draw-only counter %u\n", count);
}
else
{
count = 1; // reset the recheck counter
if (objects.size() > 0) // reset the list of objects
objects.clear();
double elapsed = (double) cvGetTickCount();
objects = detect();
// use detection time to throttle frequency of re-detect vs. redraw (automatic recheck)
elapsed = cvGetTickCount() - elapsed;
elapsed = elapsed / ((double) cvGetTickFrequency() * 1000.0);
// Automatic recheck uses an undocumented negative parameter value,
// which is not compliant, but technically feasible.
if (recheck < 0 && cvRound( elapsed / (1000.0 / (recheckInt + 1)) ) <= recheckInt)
count += recheckInt - cvRound( elapsed / (1000.0 / (recheckInt + 1)));
// fprintf(stderr, "detection time = %gms counter %u\n", elapsed, count);
}
draw();
// copy filtered OpenCV image to output
memcpy(out, image.data, size * 4);
}
示例13: getAssetPath
void TellThatToMyCamera_v1_0App::setup()
{
mExpressionsCascade.load(getAssetPath("haarcascade_frontalface_alt.xml").string());
mPath= getAssetPath("ppdtest.csv").string();
mCapture = Capture( 640, 480 ); // Camera settings
mCapture.start();
read_csv(mPath, mDBimgFaces, mDBLabels); // Read DB of faces for FaceRec algorithm
mFisherFaceRec->train(mDBimgFaces, mDBLabels); // Train the Fisher Face Recognizer algorithm
}
示例14: init
void FaceFrameCutter::init()
{
if (!faceCascade_.load(haar_))
{
std::cerr<<"--(!)Error loading"<<std::endl;
exit(-1);
};
{
string cmd("rm -rf ");
cmd += outputPitures_;
system( cmd.c_str());
}
}
示例15: main
/**
* @function main
*/
int main( int argc, const char** argv ) {
cv::Mat frame;
// Load the cascades
if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };
cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
cv::moveWindow(main_window_name, 400, 100);
cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
cv::moveWindow(face_window_name, 10, 100);
cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
cv::moveWindow("Right Eye", 10, 600);
cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
cv::moveWindow("Left Eye", 10, 800);
cv::namedWindow("aa",CV_WINDOW_NORMAL);
cv::moveWindow("aa", 10, 800);
cv::namedWindow("aaa",CV_WINDOW_NORMAL);
cv::moveWindow("aaa", 10, 800);
createCornerKernels();
ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);
frame = cv::imread(argv[1]);
frame.copyTo(debugImage);
cv::Mat result;
// Apply the classifier to the frame
if( !frame.empty() ) {
result = detectAndDisplay( frame );
}
else {
printf(" cannot read image. terminating");
return -1;
}
imshow(main_window_name,debugImage);
std::stringstream result_filename;
result_filename << argv[1];
result_filename << "_eyes.jpg";
imwrite(result_filename.str().c_str(), result);
std::cout << "written file: " << result_filename.str() << std::endl;
releaseCornerKernels();
return 0;
}