当前位置: 首页>>代码示例>>C++>>正文


C++ Ptr::empty方法代码示例

本文整理汇总了C++中cv::Ptr::empty方法的典型用法代码示例。如果您正苦于以下问题:C++ Ptr::empty方法的具体用法?C++ Ptr::empty怎么用?C++ Ptr::empty使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cv::Ptr的用法示例。


在下文中一共展示了Ptr::empty方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: setCommonProperty

bool CvCapture_OpenNI::setCommonProperty( int propIdx, double propValue )
{
    bool isSet = false;

    switch( propIdx )
    {
    // There is a set of properties that correspond to depth generator by default
    // (is they are pass without particular generator flag).
    case CV_CAP_PROP_OPENNI_REGISTRATION:
        isSet = setDepthGeneratorProperty( propIdx, propValue );
        break;
    case CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC :
        if( propValue && depthGenerator.IsValid() && imageGenerator.IsValid() )
        {
            // start synchronization
            if( approxSyncGrabber.empty() )
            {
                approxSyncGrabber = new ApproximateSyncGrabber( context, depthGenerator, imageGenerator, maxBufferSize, isCircleBuffer, maxTimeDuration );
            }
            else
            {
                approxSyncGrabber->finish();

                // update params
                approxSyncGrabber->setMaxBufferSize(maxBufferSize);
                approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
                approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
            }
            approxSyncGrabber->start();
        }
        else if( !propValue && !approxSyncGrabber.empty() )
        {
            // finish synchronization
            approxSyncGrabber->finish();
        }
        break;
    case CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE :
        maxBufferSize = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxBufferSize(maxBufferSize);
        break;
    case CV_CAP_PROP_OPENNI_CIRCLE_BUFFER :
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setIsCircleBuffer(isCircleBuffer);
        break;
    case CV_CAP_PROP_OPENNI_MAX_TIME_DURATION :
        maxTimeDuration = cvRound(propValue);
        if( !approxSyncGrabber.empty() )
            approxSyncGrabber->setMaxTimeDuration(maxTimeDuration);
        break;
    default:
    {
        std::stringstream ss;
        ss << "Such parameter (propIdx=" << propIdx << ") isn't supported for setting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return isSet;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:60,代码来源:cap_openni.cpp

示例2: quality_test

inline void quality_test(cv::Ptr<quality::QualityBase> ptr, const TMat& cmp, const Scalar& expected, const std::size_t quality_maps_expected = 1, const bool empty_expected = false )
{
    std::vector<cv::Mat> qMats = {};
    ptr->getQualityMaps(qMats);
    EXPECT_TRUE( qMats.empty());

    quality_expect_near( expected, ptr->compute(cmp));

    if (empty_expected)
        EXPECT_TRUE(ptr->empty());
    else
        EXPECT_FALSE(ptr->empty());

    ptr->getQualityMaps(qMats);

    EXPECT_EQ( qMats.size(), quality_maps_expected);
    for (auto& qm : qMats)
    {
        EXPECT_GT(qm.rows, 0);
        EXPECT_GT(qm.cols, 0);
    }

    ptr->clear();
    EXPECT_TRUE(ptr->empty());
}
开发者ID:Bleach665,项目名称:opencv_contrib,代码行数:25,代码来源:test_precomp.hpp

示例3: DetectorAgregator

    DetectorAgregator(cv::Ptr<CascadeDetectorAdapter>& _mainDetector, cv::Ptr<CascadeDetectorAdapter>& _trackingDetector):
            mainDetector(_mainDetector),
            trackingDetector(_trackingDetector)
    {
        CV_Assert(!_mainDetector.empty());
        CV_Assert(!_trackingDetector.empty());

        DetectionBasedTracker::Parameters DetectorParams;
        tracker = new DetectionBasedTracker(mainDetector.ptr<DetectionBasedTracker::IDetector>(), trackingDetector.ptr<DetectionBasedTracker::IDetector>(), DetectorParams);
    }
开发者ID:09beezahmad,项目名称:opencv,代码行数:10,代码来源:DetectionBasedTracker_jni.cpp

示例4: CascadeDetectorAdapter

 CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
         IDetector(),
         Detector(detector)
 {
     LOGD("CascadeDetectorAdapter::Detect::Detect");
     CV_Assert(!detector.empty());
 }
开发者ID:09beezahmad,项目名称:opencv,代码行数:7,代码来源:DetectionBasedTracker_jni.cpp

示例5: cascadeInThread

cv::DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork(DetectionBasedTracker& _detectionBasedTracker, cv::Ptr<DetectionBasedTracker::IDetector> _detector)
    :detectionBasedTracker(_detectionBasedTracker),
    cascadeInThread(),
    isObjectDetectingReady(false),
    shouldObjectDetectingResultsBeForgot(false),
    stateThread(STATE_THREAD_STOPPED),
    timeWhenDetectingThreadStartedWork(-1)
{
    CV_Assert(!_detector.empty());

    cascadeInThread = _detector;

    int res=0;
    res=pthread_mutex_init(&mutex, NULL);//TODO: should be attributes?
    if (res) {
        LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_mutex_init(&mutex, NULL) is %d", res);
        throw(std::exception());
    }
    res=pthread_cond_init (&objectDetectorRun, NULL);
    if (res) {
        LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_cond_init(&objectDetectorRun,, NULL) is %d", res);
        pthread_mutex_destroy(&mutex);
        throw(std::exception());
    }
    res=pthread_cond_init (&objectDetectorThreadStartStop, NULL);
    if (res) {
        LOGE("ERROR in DetectionBasedTracker::SeparateDetectionWork::SeparateDetectionWork in pthread_cond_init(&objectDetectorThreadStartStop,, NULL) is %d", res);
        pthread_cond_destroy(&objectDetectorRun);
        pthread_mutex_destroy(&mutex);
        throw(std::exception());
    }
}
开发者ID:aslakhellesoy,项目名称:opencv,代码行数:32,代码来源:detection_based_tracker.cpp

示例6: grabFrame

bool CvCapture_OpenNI::grabFrame()
{
    if( !isOpened() )
        return false;

    bool isGrabbed = false;
    if( !approxSyncGrabber.empty() && approxSyncGrabber->isRun() )
    {
        isGrabbed = approxSyncGrabber->grab( depthMetaData, imageMetaData );
    }
    else
    {
        XnStatus status = context.WaitAndUpdateAll();
        if( status != XN_STATUS_OK )
            return false;

        if( depthGenerator.IsValid() )
            depthGenerator.GetMetaData( depthMetaData );
        if( imageGenerator.IsValid() )
            imageGenerator.GetMetaData( imageMetaData );
        isGrabbed = true;
    }

    return isGrabbed;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:25,代码来源:cap_openni.cpp

示例7: calculate

void MapperGradShift::calculate(
    const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
    Mat gradx, grady, imgDiff;
    Mat img2;

    CV_DbgAssert(img1.size() == image2.size());

    if(!res.empty()) {
        // We have initial values for the registration: we move img2 to that initial reference
        res->inverseWarp(image2, img2);
    } else {
        img2 = image2;
    }

    // Get gradient in all channels
    gradient(img1, img2, gradx, grady, imgDiff);

    // Calculate parameters using least squares
    Matx<double, 2, 2> A;
    Vec<double, 2> b;
    // For each value in A, all the matrix elements are added and then the channels are also added,
    // so we have two calls to "sum". The result can be found in the first element of the final
    // Scalar object.

    A(0, 0) = sum(sum(gradx.mul(gradx)))[0];
    A(0, 1) = sum(sum(gradx.mul(grady)))[0];
    A(1, 1) = sum(sum(grady.mul(grady)))[0];
    A(1, 0) = A(0, 1);

    b(0) = -sum(sum(imgDiff.mul(gradx)))[0];
    b(1) = -sum(sum(imgDiff.mul(grady)))[0];

    // Calculate shift. We use Cholesky decomposition, as A is symmetric.
    Vec<double, 2> shift = A.inv(DECOMP_CHOLESKY)*b;

    if(res.empty()) {
        res = new MapShift(shift);
    } else {
        MapShift newTr(shift);
        res->compose(newTr);
   }
}
开发者ID:alfonsosanchezbeato,项目名称:OpenCV_reg,代码行数:43,代码来源:mappergradshift.cpp

示例8:

    std::vector<bbox_t> tracking_flow(cv::Mat new_dst_mat, bool check_error = true)
    {
        if (sync_PyrLKOpticalFlow.empty()) {
            std::cout << "sync_PyrLKOpticalFlow isn't initialized \n";
            return cur_bbox_vec;
        }

        cv::cvtColor(new_dst_mat, dst_grey, CV_BGR2GRAY, 1);

        if (src_grey.rows != dst_grey.rows || src_grey.cols != dst_grey.cols) {
            src_grey = dst_grey.clone();
            return cur_bbox_vec;
        }

        if (prev_pts_flow.cols < 1) {
            return cur_bbox_vec;
        }

        ////sync_PyrLKOpticalFlow_gpu.sparse(src_grey_gpu, dst_grey_gpu, prev_pts_flow_gpu, cur_pts_flow_gpu, status_gpu, &err_gpu);    // OpenCV 2.4.x
        sync_PyrLKOpticalFlow->calc(src_grey, dst_grey, prev_pts_flow, cur_pts_flow, status, err);    // OpenCV 3.x

        dst_grey.copyTo(src_grey);

        std::vector<bbox_t> result_bbox_vec;

        if (err.rows == cur_bbox_vec.size() && status.rows == cur_bbox_vec.size())
        {
            for (size_t i = 0; i < cur_bbox_vec.size(); ++i)
            {
                cv::Point2f cur_key_pt = cur_pts_flow.at<cv::Point2f>(0, i);
                cv::Point2f prev_key_pt = prev_pts_flow.at<cv::Point2f>(0, i);

                float moved_x = cur_key_pt.x - prev_key_pt.x;
                float moved_y = cur_key_pt.y - prev_key_pt.y;

                if (abs(moved_x) < 100 && abs(moved_y) < 100 && good_bbox_vec_flags[i])
                    if (err.at<float>(0, i) < flow_error && status.at<unsigned char>(0, i) != 0 &&
                        ((float)cur_bbox_vec[i].x + moved_x) > 0 && ((float)cur_bbox_vec[i].y + moved_y) > 0)
                    {
                        cur_bbox_vec[i].x += moved_x + 0.5;
                        cur_bbox_vec[i].y += moved_y + 0.5;
                        result_bbox_vec.push_back(cur_bbox_vec[i]);
                    }
                    else good_bbox_vec_flags[i] = false;
                else good_bbox_vec_flags[i] = false;

                //if(!check_error && !good_bbox_vec_flags[i]) result_bbox_vec.push_back(cur_bbox_vec[i]);
            }
        }

        prev_pts_flow = cur_pts_flow.clone();

        return result_bbox_vec;
    }
开发者ID:Nuzhny007,项目名称:Multitarget-tracker,代码行数:54,代码来源:yolo_v2_class.hpp

示例9: assert

Node::Node(ros::NodeHandle* nh,
        const cv::Mat& visual, const cv::Mat& depth,
        image_geometry::PinholeCameraModel cam_model,
        cv::Ptr<cv::FeatureDetector> detector,
        cv::Ptr<cv::DescriptorExtractor> extractor,
        cv::Ptr<cv::DescriptorMatcher> matcher,
        const sensor_msgs::PointCloud2ConstPtr& point_cloud,
        unsigned int msg_id,
        unsigned int id,
        const cv::Mat& detection_mask):
        nh_(nh),
        msg_id_(msg_id),
        id_(id),
        cloudMessage_(*point_cloud),
        cam_model_(cam_model),
        matcher_(matcher)
{
    std::clock_t starttime=std::clock();

    ROS_FATAL_COND(detector.empty(), "No valid detector!");
    detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );
    ROS_INFO("Found %d Keypoints", (int)feature_locations_2d_.size());

    cloud_pub = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
    cloud_pub2 = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/my_clouds",20);

    // get pcl::Pointcloud to extract depthValues a pixel positions
    std::clock_t starttime5=std::clock();
    // TODO: This takes 0.1 seconds and is not strictly necessary
    //pcl::fromROSMsg(*point_cloud,pc);
    pcl::fromROSMsg(*point_cloud,pc_col);
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );

    // project pixels to 3dPositions and create search structures for the gicp
    projectTo3D(depth, feature_locations_2d_, feature_locations_3d_,pc_col); //takes less than 0.01 sec

    std::clock_t starttime4=std::clock();
    // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );

    std::clock_t starttime2=std::clock();
    extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
    assert(feature_locations_2d_.size() == feature_locations_3d_.size());
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );
    flannIndex = NULL;

    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");


}
开发者ID:aa755,项目名称:scene_labelling,代码行数:51,代码来源:node.cpp

示例10: RunTest

void SuperResolution::RunTest(cv::Ptr<cv::superres::SuperResolution> superRes)
{
    const std::string inputVideoName = cvtest::TS::ptr()->get_data_path() + "car.avi";
    const int scale = 2;
    const int iterations = 100;
    const int temporalAreaRadius = 2;

    ASSERT_FALSE( superRes.empty() );

    const int btvKernelSize = superRes->getInt("btvKernelSize");

    superRes->set("scale", scale);
    superRes->set("iterations", iterations);
    superRes->set("temporalAreaRadius", temporalAreaRadius);

    cv::Ptr<cv::superres::FrameSource> goldSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale));
    cv::Ptr<cv::superres::FrameSource> lowResSource(new DegradeFrameSource(new AllignedFrameSource(cv::superres::createFrameSource_Video(inputVideoName), scale), scale));

    // skip first frame
    cv::Mat frame;

    lowResSource->nextFrame(frame);
    goldSource->nextFrame(frame);

    cv::Rect inner(btvKernelSize, btvKernelSize, frame.cols - 2 * btvKernelSize, frame.rows - 2 * btvKernelSize);

    superRes->setInput(lowResSource);

    double srAvgMSSIM = 0.0;
    const int count = 10;

    cv::Mat goldFrame, superResFrame;
    for (int i = 0; i < count; ++i)
    {
        goldSource->nextFrame(goldFrame);
        ASSERT_FALSE( goldFrame.empty() );

        superRes->nextFrame(superResFrame);
        ASSERT_FALSE( superResFrame.empty() );

        const double srMSSIM = MSSIM(goldFrame(inner), superResFrame);

        srAvgMSSIM += srMSSIM;
    }

    srAvgMSSIM /= count;

    EXPECT_GE( srAvgMSSIM, 0.5 );
}
开发者ID:bouffa,项目名称:opencv,代码行数:49,代码来源:test_superres.cpp

示例11: getCommonProperty

double CvCapture_OpenNI::getCommonProperty( int propIdx )
{
    double propValue = 0;

    switch( propIdx )
    {
    // There is a set of properties that correspond to depth generator by default
    // (is they are pass without particular generator flag). Two reasons of this:
    // 1) We can assume that depth generator is the main one for depth sensor.
    // 2) In the initial vertions of OpenNI integration to OpenCV the value of
    //    flag CV_CAP_OPENNI_DEPTH_GENERATOR was 0 (it isn't zero now).
    case CV_CAP_PROP_OPENNI_GENERATOR_PRESENT :
    case CV_CAP_PROP_FRAME_WIDTH :
    case CV_CAP_PROP_FRAME_HEIGHT :
    case CV_CAP_PROP_FPS :
    case CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH :
    case CV_CAP_PROP_OPENNI_BASELINE :
    case CV_CAP_PROP_OPENNI_FOCAL_LENGTH :
    case CV_CAP_PROP_OPENNI_REGISTRATION :
        propValue = getDepthGeneratorProperty( propIdx );
        break;
    case CV_CAP_PROP_OPENNI_APPROX_FRAME_SYNC :
        propValue = !approxSyncGrabber.empty() && approxSyncGrabber->isRun() ? 1. : 0.;
        break;
    case CV_CAP_PROP_OPENNI_MAX_BUFFER_SIZE :
        propValue = maxBufferSize;
        break;
    case CV_CAP_PROP_OPENNI_CIRCLE_BUFFER :
        propValue = isCircleBuffer ? 1. : 0.;
        break;
    case CV_CAP_PROP_OPENNI_MAX_TIME_DURATION :
        propValue = maxTimeDuration;
        break;
    default :
    {
        std::stringstream ss;
        ss << "Such parameter (propIdx=" << propIdx << ") isn't supported for getting.\n";
        CV_Error( CV_StsBadArg, ss.str().c_str() );
    }
    }

    return propValue;
}
开发者ID:AndreSteenveld,项目名称:opencv,代码行数:43,代码来源:cap_openni.cpp

示例12: WrapPhase

cv::Mat NFringeStructuredLight::WrapPhase( vector<cv::Mat> fringeImages, cv::Ptr<cv::FilterEngine> filter )
{
  Utils::AssertOrThrowIfFalse(fringeImages.size() == m_numberOfFringes, 
	"Invalid number of fringes passed into phase wrapper");

  // Should be the same size as our fringe images 
  // and floating point precision for decimal phase values
  cv::Mat sine(fringeImages[0].size(), CV_32F, 0.0f);
  cv::Mat cosine(fringeImages[0].size(), CV_32F, 0.0f);
  cv::Mat phase(fringeImages[0].size(), CV_32F, 0.0f);

  for(int row = 0; row < phase.rows; ++row)
  {
	for(int col = 0; col < phase.cols; ++col)
	{
	  for(int fringe = 0; fringe < m_numberOfFringes; ++fringe)
	  {
		sine.at<float>(row, col) += ( float( fringeImages[fringe].at<uchar>(row, col) ) / 255.0 ) * sin(2.0 * M_PI * float(fringe) / float(m_numberOfFringes));
		cosine.at<float>(row, col) += ( float( fringeImages[fringe].at<uchar>(row, col) ) / 255.0 ) * cos(2.0 * M_PI * float(fringe) / float(m_numberOfFringes));
	  }
	}
  }

  // Filter out noise in the sine and cosine components
  if( !filter.empty( ) )
  {
	filter->apply( sine, sine );
	filter->apply( cosine, cosine );
  }

  // Now perform phase wrapping
  for(int row = 0; row < phase.rows; ++row)
  {
	for(int col = 0; col < phase.cols; ++col)
	{
	  // This is negative so that are phase gradient increases from 0 -> rows or 0 -> cols
	  phase.at<float>(row, col) = -atan2( sine.at<float>( row, col ), cosine.at<float>( row, col ) );
	}
  }
  return phase;
}
开发者ID:wflohry,项目名称:PortalCalibration,代码行数:41,代码来源:NFringeStructuredLight.cpp

示例13: main

int main( int argc, char **argv )
{
	if(argc<4) {
		usage(argc,argv);
		return 1;
	}
	is = helper::createImageSource(argv[1]);
	if(is.empty() || is->done()) {
		loglne("[main] createImageSource failed or no valid imagesource!");
		return -1;
	}
	is->pause(false);
	is->reportInfo();
	is->get(frame);
	imgW = frame.cols; imgH = frame.rows;
	videoFromWebcam = false;
	if( is->classname() == "ImageSource_Camera" ) {
		videoFromWebcam = true;
	}

	loglni("[main] loading K matrix from: "<<argv[2]);
	double K[9];
	std::ifstream kfile(argv[2]);
	for(int i=0; i<9; ++i) kfile >> K[i];
	tracker.loadK(K);
	loglni("[main] K matrix loaded:");
	loglni(helper::PrintMat<>(3,3,K));

	loglni("[main] load template image from: "<<argv[3]);
	tracker.loadTemplate(argv[3]);

	//////////////// TagDetector /////////////////////////////////////////
	int tagid = 0; //default tag16h5
	if(argc>5) tagid = atoi(argv[5]);
	tagFamily = TagFamilyFactory::create(tagid);
	if(tagFamily.empty()) {
		loglne("[main] create TagFamily fail!");
		return -1;
	}
	detector = new TagDetector(tagFamily);
	if(detector.empty()) {
		loglne("[main] create TagDetector fail!");
		return -1;
	}
	Mat temp = imread(argv[3]);
	if( findAprilTag(temp, 0, HI, true) ) {
		namedWindow("template");
		imshow("template", temp);
		iHI = HI.inv();
	} else {
		loglne("[main error] detector did not find any apriltag on template image!");
		return -1;
	}

	//////////////// OSG ////////////////////////////////////////////////
	osg::ref_ptr<osg::Group> root = new osg::Group;

	string scenefilename = (argc>4?argv[4]:("cow.osg"));
	osg::ref_ptr<osg::Node> cow = osgDB::readNodeFile(scenefilename);
	arscene = new helper::ARSceneRoot;
	helper::FixMat<3,double>::Type matK = helper::FixMat<3,double>::ConvertType(K);
	CV2CG::cv2cg(matK,0.01,500,imgW,imgH,*arscene);
	manipMat = new osg::MatrixTransform(osg::Matrix::identity());
	manipMat->addChild(cow);
	manipMat->getOrCreateStateSet()->setMode(GL_NORMALIZE, osg::StateAttribute::ON);
	arscene->addChild(manipMat);

	osg::ref_ptr<osg::Image> backgroundImage = new osg::Image;
	helper::cvmat2osgimage(frame,backgroundImage);
	arvideo = new helper::ARVideoBackground(backgroundImage);
	root->setUpdateCallback(new ARUpdateCallback);

	root->addChild(arvideo);
	root->addChild(arscene);

	viewer.setSceneData(root);
	viewer.addEventHandler(new osgViewer::StatsHandler);
	viewer.addEventHandler(new osgViewer::WindowSizeHandler);
	viewer.addEventHandler(new QuitHandler);

	//start tracking thread
	OpenThreads::Thread::Init();
	TrackThread* thr = new TrackThread;
	thr->start();

	viewer.run();

	delete thr;
	loglni("[main] DONE...exit!");
	return 0;
}
开发者ID:andrewjchen,项目名称:ethzasl_apriltag,代码行数:91,代码来源:main.cpp

示例14: calculate

void MapperGradAffine::calculate(
    const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
    Mat gradx, grady, imgDiff;
    Mat img2;

    CV_DbgAssert(img1.size() == image2.size());
    CV_DbgAssert(img1.channels() == image2.channels());
    CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);

    if(!res.empty()) {
        // We have initial values for the registration: we move img2 to that initial reference
        res->inverseWarp(image2, img2);
    } else {
        img2 = image2;
    }

    // Get gradient in all channels
    gradient(img1, img2, gradx, grady, imgDiff);

    // Matrices with reference frame coordinates
    Mat grid_r, grid_c;
    grid(img1, grid_r, grid_c);

    // Calculate parameters using least squares
    Matx<double, 6, 6> A;
    Vec<double, 6> b;
    // For each value in A, all the matrix elements are added and then the channels are also added,
    // so we have two calls to "sum". The result can be found in the first element of the final
    // Scalar object.
    Mat xIx = grid_c.mul(gradx);
    Mat xIy = grid_c.mul(grady);
    Mat yIx = grid_r.mul(gradx);
    Mat yIy = grid_r.mul(grady);
    Mat Ix2 = gradx.mul(gradx);
    Mat Iy2 = grady.mul(grady);
    Mat xy = grid_c.mul(grid_r);
    Mat IxIy = gradx.mul(grady);
    A(0, 0) = sum(sum(sqr(xIx)))[0];
    A(0, 1) = sum(sum(xy.mul(Ix2)))[0];
    A(0, 2) = sum(sum(grid_c.mul(Ix2)))[0];
    A(0, 3) = sum(sum(sqr(grid_c).mul(IxIy)))[0];
    A(0, 4) = sum(sum(xy.mul(IxIy)))[0];
    A(0, 5) = sum(sum(grid_c.mul(IxIy)))[0];
    A(1, 1) = sum(sum(sqr(yIx)))[0];
    A(1, 2) = sum(sum(grid_r.mul(Ix2)))[0];
    A(1, 3) = A(0, 4);
    A(1, 4) = sum(sum(sqr(grid_r).mul(IxIy)))[0];
    A(1, 5) = sum(sum(grid_r.mul(IxIy)))[0];
    A(2, 2) = sum(sum(Ix2))[0];
    A(2, 3) = A(0, 5);
    A(2, 4) = A(1, 5);
    A(2, 5) = sum(sum(IxIy))[0];
    A(3, 3) = sum(sum(sqr(xIy)))[0];
    A(3, 4) = sum(sum(xy.mul(Iy2)))[0];
    A(3, 5) = sum(sum(grid_c.mul(Iy2)))[0];
    A(4, 4) = sum(sum(sqr(yIy)))[0];
    A(4, 5) = sum(sum(grid_r.mul(Iy2)))[0];
    A(5, 5) = sum(sum(Iy2))[0];
    // Lower half values (A is symmetric)
    A(1, 0) = A(0, 1);
    A(2, 0) = A(0, 2);
    A(2, 1) = A(1, 2);
    A(3, 0) = A(0, 3);
    A(3, 1) = A(1, 3);
    A(3, 2) = A(2, 3);
    A(4, 0) = A(0, 4);
    A(4, 1) = A(1, 4);
    A(4, 2) = A(2, 4);
    A(4, 3) = A(3, 4);
    A(5, 0) = A(0, 5);
    A(5, 1) = A(1, 5);
    A(5, 2) = A(2, 5);
    A(5, 3) = A(3, 5);
    A(5, 4) = A(4, 5);

    // Calculation of b
    b(0) = -sum(sum(imgDiff.mul(xIx)))[0];
    b(1) = -sum(sum(imgDiff.mul(yIx)))[0];
    b(2) = -sum(sum(imgDiff.mul(gradx)))[0];
    b(3) = -sum(sum(imgDiff.mul(xIy)))[0];
    b(4) = -sum(sum(imgDiff.mul(yIy)))[0];
    b(5) = -sum(sum(imgDiff.mul(grady)))[0];

    // Calculate affine transformation. We use Cholesky decomposition, as A is symmetric.
    Vec<double, 6> k = A.inv(DECOMP_CHOLESKY)*b;

    Matx<double, 2, 2> linTr(k(0) + 1., k(1), k(3), k(4) + 1.);
    Vec<double, 2> shift(k(2), k(5));
    if(res.empty()) {
        res = Ptr<Map>(new MapAffine(linTr, shift));
    } else {
        MapAffine newTr(linTr, shift);
        res->compose(newTr);
   }
}
开发者ID:23pointsNorth,项目名称:opencv_contrib,代码行数:96,代码来源:mappergradaffine.cpp

示例15: CascadeDetectorAdapter

 CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
     Detector(detector)
 {
     CV_Assert(!detector.empty());
 }
开发者ID:Linyes,项目名称:opencv,代码行数:5,代码来源:detection_based_tracker_sample.cpp


注:本文中的cv::Ptr::empty方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。