本文整理汇总了C++中cv::Ptr类的典型用法代码示例。如果您正苦于以下问题:C++ Ptr类的具体用法?C++ Ptr怎么用?C++ Ptr使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Ptr类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: callback
void callback(const sensor_msgs::ImageConstPtr& msg)
{
if (image_0_ == NULL)
{
// Take first image:
try
{
image_0_ = cv_bridge::toCvCopy(msg,
sensor_msgs::image_encodings::isColor(msg->encoding) ?
sensor_msgs::image_encodings::BGR8 :
sensor_msgs::image_encodings::MONO8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR_STREAM("Failed to take first image: " << e.what());
return;
}
ROS_INFO("First image taken");
// Detect keypoints:
detector_->detect(image_0_->image, keypoints_0_);
ROS_INFO_STREAM(keypoints_0_.size() << " points found.");
// Extract keypoints' descriptors:
extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
}
else
{
// Take second image:
try
{
image_1_ = cv_bridge::toCvShare(msg,
sensor_msgs::image_encodings::isColor(msg->encoding) ?
sensor_msgs::image_encodings::BGR8 :
sensor_msgs::image_encodings::MONO8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR_STREAM("Failed to take image: " << e.what());
return;
}
// Detect keypoints:
std::vector<cv::KeyPoint> keypoints_1;
detector_->detect(image_1_->image, keypoints_1);
ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");
// Extract keypoints' descriptors:
cv::Mat descriptors_1;
extractor_->compute(image_1_->image, keypoints_1, descriptors_1);
// Compute matches:
std::vector<cv::DMatch> matches;
match(descriptors_0_, descriptors_1, matches);
// Compute homography:
cv::Mat H;
homography(keypoints_0_, keypoints_1, matches, H);
// Draw matches:
const int s = std::max(image_0_->image.rows, image_0_->image.cols);
cv::Size size(s, s);
cv::Mat draw_image;
warped_image_ = boost::make_shared<cv_bridge::CvImage>(
image_0_->header, image_0_->encoding,
cv::Mat(size, image_0_->image.type()));
if (!H.empty()) // filter outliers
{
std::vector<char> matchesMask(matches.size(), 0);
const size_t N = matches.size();
std::vector<int> queryIdxs(N), trainIdxs(N);
for (size_t i = 0; i < N; ++i)
{
queryIdxs[i] = matches[i].queryIdx;
trainIdxs[i] = matches[i].trainIdx;
}
std::vector<cv::Point2f> points1, points2;
cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);
cv::Mat points1t;
cv::perspectiveTransform(cv::Mat(points1), points1t, H);
double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
for (size_t i1 = 0; i1 < points1.size(); ++i1)
{
if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
matchesMask[i1] = 1;
}
// draw inliers
cv::drawMatches(
image_0_->image, keypoints_0_,
image_1_->image, keypoints_1, matches,
draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
matchesMask,
cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
//.........这里部分代码省略.........
示例2: CascadeDetectorAdapter
CascadeDetectorAdapter(cv::Ptr<cv::CascadeClassifier> detector):
Detector(detector)
{
CV_Assert(!detector.empty());
}
示例3: calculate
void MapperGradEuclid::calculate(
const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
Mat gradx, grady, imgDiff;
Mat img2;
CV_DbgAssert(img1.size() == image2.size());
CV_DbgAssert(img1.channels() == image2.channels());
CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);
if(!res.empty()) {
// We have initial values for the registration: we move img2 to that initial reference
res->inverseWarp(image2, img2);
} else {
img2 = image2;
}
// Matrices with reference frame coordinates
Mat grid_r, grid_c;
grid(img1, grid_r, grid_c);
// Get gradient in all channels
gradient(img1, img2, gradx, grady, imgDiff);
// Calculate parameters using least squares
Matx<double, 3, 3> A;
Vec<double, 3> b;
// For each value in A, all the matrix elements are added and then the channels are also added,
// so we have two calls to "sum". The result can be found in the first element of the final
// Scalar object.
Mat xIy_yIx = grid_c.mul(grady);
xIy_yIx -= grid_r.mul(gradx);
A(0, 0) = sum(sum(gradx.mul(gradx)))[0];
A(0, 1) = sum(sum(gradx.mul(grady)))[0];
A(0, 2) = sum(sum(gradx.mul(xIy_yIx)))[0];
A(1, 1) = sum(sum(grady.mul(grady)))[0];
A(1, 2) = sum(sum(grady.mul(xIy_yIx)))[0];
A(2, 2) = sum(sum(xIy_yIx.mul(xIy_yIx)))[0];
A(1, 0) = A(0, 1);
A(2, 0) = A(0, 2);
A(2, 1) = A(1, 2);
b(0) = -sum(sum(imgDiff.mul(gradx)))[0];
b(1) = -sum(sum(imgDiff.mul(grady)))[0];
b(2) = -sum(sum(imgDiff.mul(xIy_yIx)))[0];
// Calculate parameters. We use Cholesky decomposition, as A is symmetric.
Vec<double, 3> k = A.inv(DECOMP_CHOLESKY)*b;
double cosT = cos(k(2));
double sinT = sin(k(2));
Matx<double, 2, 2> linTr(cosT, -sinT, sinT, cosT);
Vec<double, 2> shift(k(0), k(1));
if(res.empty()) {
res = Ptr<Map>(new MapAffine(linTr, shift));
} else {
MapAffine newTr(linTr, shift);
res->compose(newTr);
}
}
示例4: main
int main(int argc, char* argv[]) {
// welcome message
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
std::cout<<"* The retina model still have the following properties:"<<std::endl;
std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
std::cout<<"* for more information, reer to the following papers :"<<std::endl;
std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
std::cout<<"* => reports comments/remarks at [email protected]"<<std::endl;
std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"*** You can use free tools to generate OpenEXR images from images sets : ***"<<std::endl;
std::cout<<"*** => 1. take a set of photos from the same viewpoint using bracketing ***"<<std::endl;
std::cout<<"*** => 2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
std::cout<<"*** => 3. apply tone mapping with this program ***"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
// basic input arguments checking
if (argc<2)
{
help("bad number of parameter");
return -1;
}
bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
int chosenMethod=0;
if (!strcmp(argv[argc-1], "fast"))
{
chosenMethod=1;
std::cout<<"Using fast method (no spectral whithning), adaptation of Meylan&al 2008 method"<<std::endl;
}
std::string inputImageName=argv[1];
//////////////////////////////////////////////////////////////////////////////
// checking input media type (still image, video file, live video acquisition)
std::cout<<"RetinaDemo: processing image "<<inputImageName<<std::endl;
// image processing case
// declare the retina input buffer... that will be fed differently in regard of the input media
inputImage = cv::imread(inputImageName, -1); // load image in RGB mode
std::cout<<"=> image size (h,w) = "<<inputImage.size().height<<", "<<inputImage.size().width<<std::endl;
if (!inputImage.total())
{
help("could not load image, program end");
return -1;
}
// rescale between 0 and 1
normalize(inputImage, inputImage, 0.0, 1.0, cv::NORM_MINMAX);
cv::Mat gammaTransformedImage;
cv::pow(inputImage, 1./5, gammaTransformedImage); // apply gamma curve: img = img ** (1./5)
imshow("EXR image original image, 16bits=>8bits linear rescaling ", inputImage);
imshow("EXR image with basic processing : 16bits=>8bits with gamma correction", gammaTransformedImage);
if (inputImage.empty())
{
help("Input image could not be loaded, aborting");
return -1;
}
//////////////////////////////////////////////////////////////////////////////
// Program start in a try/catch safety context (Retina may throw errors)
try
{
/* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
* -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
*/
if (useLogSampling)
{
retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
retina = cv::bioinspired::createRetina(inputImage.size());
// create a fast retina tone mapper (Meyla&al algorithm)
std::cout<<"Allocating fast tone mapper..."<<std::endl;
//cv::Ptr<cv::RetinaFastToneMapping> fastToneMapper=createRetinaFastToneMapping(inputImage.size());
std::cout<<"Fast tone mapper allocated"<<std::endl;
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
retina->activateMovingContoursProcessing(false);
// declare retina output buffers
cv::Mat retinaOutput_parvo;
/////////////////////////////////////////////
// prepare displays and interactions
histogramClippingValue=0; // default value... updated with interface slider
//inputRescaleMat = inputImage;
//outputRescaleMat = imageInputRescaled;
//.........这里部分代码省略.........
示例5: findCirclesGridAB
bool findCirclesGridAB( cv::InputArray _image, cv::Size patternSize,
cv::OutputArray _centers, int flags, const cv::Ptr<cv::FeatureDetector> &blobDetector )
{
bool isAsymmetricGrid = (flags & cv::CALIB_CB_ASYMMETRIC_GRID) ? true : false;
bool isSymmetricGrid = (flags & cv::CALIB_CB_SYMMETRIC_GRID ) ? true : false;
CV_Assert(isAsymmetricGrid ^ isSymmetricGrid);
cv::Mat image = _image.getMat();
std::vector<cv::Point2f> centers;
std::vector<cv::KeyPoint> keypoints;
blobDetector->detect(image, keypoints);
std::vector<cv::Point2f> points;
for (size_t i = 0; i < keypoints.size(); i++)
{
points.push_back (keypoints[i].pt);
}
if(flags & cv::CALIB_CB_CLUSTERING)
{
CirclesGridClusterFinder circlesGridClusterFinder(isAsymmetricGrid);
circlesGridClusterFinder.findGrid(points, patternSize, centers);
cv::Mat(centers).copyTo(_centers);
return !centers.empty();
}
CirclesGridFinderParameters parameters;
parameters.vertexPenalty = -0.6f;
parameters.vertexGain = 1;
parameters.existingVertexGain = 10000;
parameters.edgeGain = 1;
parameters.edgePenalty = -0.6f;
if(flags & cv::CALIB_CB_ASYMMETRIC_GRID)
parameters.gridType = CirclesGridFinderParameters::ASYMMETRIC_GRID;
if(flags & cv::CALIB_CB_SYMMETRIC_GRID)
parameters.gridType = CirclesGridFinderParameters::SYMMETRIC_GRID;
const int attempts = 2;
const size_t minHomographyPoints = 4;
cv::Mat H;
for (int i = 0; i < attempts; i++)
{
centers.clear();
CirclesGridFinder boxFinder(patternSize, points, parameters);
bool isFound = false;
//#define BE_QUIET 1
#if BE_QUIET
void* oldCbkData;
//cv::ErrorCallback oldCbk = redirectError(quiet_error, 0, &oldCbkData);
#endif
try
{
isFound = boxFinder.findHoles();
}
catch (cv::Exception)
{
}
#if BE_QUIET
redirectError(oldCbk, oldCbkData);
#endif
if (isFound)
{
switch(parameters.gridType)
{
case CirclesGridFinderParameters::SYMMETRIC_GRID:
boxFinder.getHoles(centers);
break;
case CirclesGridFinderParameters::ASYMMETRIC_GRID:
boxFinder.getAsymmetricHoles(centers);
break;
default:
CV_Error(CV_StsBadArg, "Unkown pattern type");
}
if (i != 0)
{
cv::Mat orgPointsMat;
cv::transform(centers, orgPointsMat, H.inv());
cv::convertPointsFromHomogeneous(orgPointsMat, centers);
}
cv::Mat(centers).copyTo(_centers);
return true;
}
boxFinder.getHoles(centers);
if (i != attempts - 1)
{
if (centers.size() < minHomographyPoints)
break;
H = CirclesGridFinder::rectifyGrid(boxFinder.getDetectedGridSize(), centers, points, points);
}
}
cv::Mat(centers).copyTo(_centers);
return false;
}
示例6: calculate
void MapperGradProj::calculate(
const cv::Mat& img1, const cv::Mat& image2, cv::Ptr<Map>& res) const
{
Mat gradx, grady, imgDiff;
Mat img2;
CV_DbgAssert(img1.size() == image2.size());
CV_DbgAssert(img1.channels() == image2.channels());
CV_DbgAssert(img1.channels() == 1 || img1.channels() == 3);
if(!res.empty()) {
// We have initial values for the registration: we move img2 to that initial reference
res->inverseWarp(image2, img2);
} else {
img2 = image2;
}
// Get gradient in all channels
gradient(img1, img2, gradx, grady, imgDiff);
// Matrices with reference frame coordinates
Mat grid_r, grid_c;
grid(img1, grid_r, grid_c);
// Calculate parameters using least squares
Matx<double, 8, 8> A;
Vec<double, 8> b;
// For each value in A, all the matrix elements are added and then the channels are also added,
// so we have two calls to "sum". The result can be found in the first element of the final
// Scalar object.
Mat xIx = grid_c.mul(gradx);
Mat xIy = grid_c.mul(grady);
Mat yIx = grid_r.mul(gradx);
Mat yIy = grid_r.mul(grady);
Mat Ix2 = gradx.mul(gradx);
Mat Iy2 = grady.mul(grady);
Mat xy = grid_c.mul(grid_r);
Mat IxIy = gradx.mul(grady);
Mat x2 = grid_c.mul(grid_c);
Mat y2 = grid_r.mul(grid_r);
Mat G = xIx + yIy;
Mat G2 = sqr(G);
Mat IxG = gradx.mul(G);
Mat IyG = grady.mul(G);
A(0, 0) = sum(sum(x2.mul(Ix2)))[0];
A(1, 0) = sum(sum(xy.mul(Ix2)))[0];
A(2, 0) = sum(sum(grid_c.mul(Ix2)))[0];
A(3, 0) = sum(sum(x2.mul(IxIy)))[0];
A(4, 0) = sum(sum(xy.mul(IxIy)))[0];
A(5, 0) = sum(sum(grid_c.mul(IxIy)))[0];
A(6, 0) = -sum(sum(x2.mul(IxG)))[0];
A(7, 0) = -sum(sum(xy.mul(IxG)))[0];
A(1, 1) = sum(sum(y2.mul(Ix2)))[0];
A(2, 1) = sum(sum(grid_r.mul(Ix2)))[0];
A(3, 1) = A(4, 0);
A(4, 1) = sum(sum(y2.mul(IxIy)))[0];
A(5, 1) = sum(sum(grid_r.mul(IxIy)))[0];
A(6, 1) = A(7, 0);
A(7, 1) = -sum(sum(y2.mul(IxG)))[0];
A(2, 2) = sum(sum(Ix2))[0];
A(3, 2) = A(5, 0);
A(4, 2) = A(5, 1);
A(5, 2) = sum(sum(IxIy))[0];
A(6, 2) = -sum(sum(grid_c.mul(IxG)))[0];
A(7, 2) = -sum(sum(grid_r.mul(IxG)))[0];
A(3, 3) = sum(sum(x2.mul(Iy2)))[0];
A(4, 3) = sum(sum(xy.mul(Iy2)))[0];
A(5, 3) = sum(sum(grid_c.mul(Iy2)))[0];
A(6, 3) = -sum(sum(x2.mul(IyG)))[0];
A(7, 3) = -sum(sum(xy.mul(IyG)))[0];
A(4, 4) = sum(sum(y2.mul(Iy2)))[0];
A(5, 4) = sum(sum(grid_r.mul(Iy2)))[0];
A(6, 4) = A(7, 3);
A(7, 4) = -sum(sum(y2.mul(IyG)))[0];
A(5, 5) = sum(sum(Iy2))[0];
A(6, 5) = -sum(sum(grid_c.mul(IyG)))[0];
A(7, 5) = -sum(sum(grid_r.mul(IyG)))[0];
A(6, 6) = sum(sum(x2.mul(G2)))[0];
A(7, 6) = sum(sum(xy.mul(G2)))[0];
A(7, 7) = sum(sum(y2.mul(G2)))[0];
// Upper half values (A is symmetric)
A(0, 1) = A(1, 0);
A(0, 2) = A(2, 0);
A(0, 3) = A(3, 0);
A(0, 4) = A(4, 0);
A(0, 5) = A(5, 0);
A(0, 6) = A(6, 0);
A(0, 7) = A(7, 0);
A(1, 2) = A(2, 1);
A(1, 3) = A(3, 1);
//.........这里部分代码省略.........
示例7: stereoSelectorCallback
void stereoSelectorCallback(const sensor_msgs::Image::ConstPtr& image_ptr)
{
if(!capture_image)
return;
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(image_ptr, sensor_msgs::image_encodings::RGB8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
cv::cvtColor(cv_ptr->image, input_image, CV_BGR2RGB);
string object_name;
//input_image = crop_hand(input_image);
float certainty = orbit->recognizeObject(input_image, object_name, Orbit::BAG_OF_WORDS_SVM);
/*
* Clean stabilizer if gesture has not been seen in a while
*/
ros::Time now = ros::Time::now();
ros::Duration diff_last_hand_received = now - last_hand_received;
last_hand_received = now;
if(diff_last_hand_received.toSec()>1)
{
for(unsigned int i = 0; i<stabilizer.size(); i++)
{
stabilizer[i] = 0;
}
}
/*
* Update stabilizer when the gesture is not recognized
*/
if(certainty<(float)certainty_threshold)
{
for(unsigned int i = 0; i<stabilizer.size()-1; i++)
{
if(stabilizer[i]>0)
stabilizer[i]--;
}
if(stabilizer[stabilizer.size()-1] < max_stabilizer)
stabilizer[stabilizer.size()-1]++;
return;
}
else
{
if(stabilizer[stabilizer.size()-1] >= 2)
stabilizer[stabilizer.size()-1]-=2;
else if(stabilizer[stabilizer.size()-1] == 1)
stabilizer[stabilizer.size()-1]--;
}
/*
* Update stabilizer when gesture is known
*/
for(unsigned int i = 0; i<stabilizer.size()-1; i++)
{
if(object_name == hands[i])
{
if(stabilizer[i] < max_stabilizer)
stabilizer[i]++;
}
else
{ if(stabilizer[i]>0)
stabilizer[i]--;
}
}
/*
* Print Stabilizer values
*/
for(unsigned int i = 0; i<stabilizer.size(); i++)
{
if(i<stabilizer.size()-1)
printf("%s: %d, ",hands[i].c_str(), stabilizer[i]);
else
//.........这里部分代码省略.........
示例8: backgroundSubstractionDetection
void backgroundSubstractionDetection(cv::Mat sequence, std::vector<cv::Rect> &detectedPedestrianFiltered, cv::Ptr<cv::BackgroundSubtractor> &pMOG2, trackingOption &tracking)
{
int threshold = 150;
cv::Mat mask;
cv::Mat sequenceGrayDiff;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
std::vector<std::vector<cv::Point> > contours_poly;
std::vector<cv::Rect> detectedPedestrian;
pMOG2->apply(sequence,sequenceGrayDiff);
cv::threshold(sequenceGrayDiff, mask, threshold, 255, cv::THRESH_BINARY);
cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(6,6)));
cv::dilate(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(25,55)));
cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,6)));
/*
cv::Mat dist;
cv::distanceTransform(mask, dist, CV_DIST_L2, 3);
cv::normalize(dist, dist, 0, 1., cv::NORM_MINMAX);
cv::threshold(dist, dist, .4, 1., CV_THRESH_BINARY);
cv::imshow("temp", dist);
*/
cv::findContours(mask, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point(0,0));
contours_poly.resize(contours.size());
detectedPedestrian.resize(contours.size());
for(size_t j=0;j<contours.size();j++)
{
cv::approxPolyDP(cv::Mat(contours[j]), contours_poly[j], 3, true);
detectedPedestrian[j] = cv::boundingRect(cv::Mat(contours_poly[j]));
//test
/*
double pix = 30;
if(detectedPedestrian[j].x >= pix)
detectedPedestrian[j].x -= pix;
else
detectedPedestrian[j].x = 0;
if((detectedPedestrian[j].x+detectedPedestrian[j].width) <= (sequence.cols-pix))
detectedPedestrian[j].width += pix;
else
detectedPedestrian[j].width = sequence.cols - detectedPedestrian[j].x;
if(detectedPedestrian[j].y >= pix)
detectedPedestrian[j].y -= pix;
else
detectedPedestrian[j].y = 0;
if((detectedPedestrian[j].y+detectedPedestrian[j].height) <= (sequence.rows-pix))
detectedPedestrian[j].height += pix;
else
detectedPedestrian[j].height = sequence.rows - detectedPedestrian[j].y;
*/
}
if(detectedPedestrian.size() != 0)
{
tracking = GOOD_FEATURES_TO_TRACK;
detectedPedestrianFiltered.clear();
detectedPedestrianFiltered.resize(detectedPedestrian.size());
detectedPedestrianFiltered = detectedPedestrian;
}
else
tracking = NOTHING_TO_TRACK;
}
示例9: main
int main(int argc, char* argv[]) {
// welcome message
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"* Retina demonstration for High Dynamic Range compression (tone-mapping) : demonstrates the use of a wrapper class of the Gipsa/Listic Labs retina model."<<std::endl;
std::cout<<"* This retina model allows spatio-temporal image processing (applied on still images, video sequences)."<<std::endl;
std::cout<<"* This demo focuses demonstration of the dynamic compression capabilities of the model"<<std::endl;
std::cout<<"* => the main application is tone mapping of HDR images (i.e. see on a 8bit display a more than 8bits coded (up to 16bits) image with details in high and low luminance ranges"<<std::endl;
std::cout<<"* The retina model still have the following properties:"<<std::endl;
std::cout<<"* => It applies a spectral whithening (mid-frequency details enhancement)"<<std::endl;
std::cout<<"* => high frequency spatio-temporal noise reduction"<<std::endl;
std::cout<<"* => low frequency luminance to be reduced (luminance range compression)"<<std::endl;
std::cout<<"* => local logarithmic luminance compression allows details to be enhanced in low light conditions\n"<<std::endl;
std::cout<<"* for more information, reer to the following papers :"<<std::endl;
std::cout<<"* Benoit A., Caplier A., Durette B., Herault, J., \"USING HUMAN VISUAL SYSTEM MODELING FOR BIO-INSPIRED LOW LEVEL IMAGE PROCESSING\", Elsevier, Computer Vision and Image Understanding 114 (2010), pp. 758-773, DOI: http://dx.doi.org/10.1016/j.cviu.2010.01.011"<<std::endl;
std::cout<<"* Vision: Images, Signals and Neural Networks: Models of Neural Processing in Visual Perception (Progress in Neural Processing),By: Jeanny Herault, ISBN: 9814273686. WAPI (Tower ID): 113266891."<<std::endl;
std::cout<<"* => reports comments/remarks at [email protected]"<<std::endl;
std::cout<<"* => more informations and papers at : http://sites.google.com/site/benoitalexandrevision/"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"** WARNING : this sample requires OpenCV to be configured with OpenEXR support **"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
std::cout<<"*** You can use free tools to generate OpenEXR images from images sets : ***"<<std::endl;
std::cout<<"*** => 1. take a set of photos from the same viewpoint using bracketing ***"<<std::endl;
std::cout<<"*** => 2. generate an OpenEXR image with tools like qtpfsgui.sourceforge.net ***"<<std::endl;
std::cout<<"*** => 3. apply tone mapping with this program ***"<<std::endl;
std::cout<<"*********************************************************************************"<<std::endl;
// basic input arguments checking
if (argc<4)
{
help("bad number of parameter");
return -1;
}
bool useLogSampling = !strcmp(argv[argc-1], "log"); // check if user wants retina log sampling processing
int startFrameIndex=0, endFrameIndex=0, currentFrameIndex=0;
sscanf(argv[2], "%d", &startFrameIndex);
sscanf(argv[3], "%d", &endFrameIndex);
std::string inputImageNamePrototype(argv[1]);
//////////////////////////////////////////////////////////////////////////////
// checking input media type (still image, video file, live video acquisition)
std::cout<<"RetinaDemo: setting up system with first image..."<<std::endl;
loadNewFrame(inputImageNamePrototype, startFrameIndex, true);
if (inputImage.empty())
{
help("could not load image, program end");
return -1;
}
//////////////////////////////////////////////////////////////////////////////
// Program start in a try/catch safety context (Retina may throw errors)
try
{
/* create a retina instance with default parameters setup, uncomment the initialisation you wanna test
* -> if the last parameter is 'log', then activate log sampling (favour foveal vision and subsamples peripheral vision)
*/
if (useLogSampling)
{
retina = cv::bioinspired::createRetina(inputImage.size(),true, cv::bioinspired::RETINA_COLOR_BAYER, true, 2.0, 10.0);
}
else// -> else allocate "classical" retina :
retina = cv::bioinspired::createRetina(inputImage.size());
// save default retina parameters file in order to let you see this and maybe modify it and reload using method "setup"
retina->write("RetinaDefaultParameters.xml");
// desactivate Magnocellular pathway processing (motion information extraction) since it is not usefull here
retina->activateMovingContoursProcessing(false);
// declare retina output buffers
cv::Mat retinaOutput_parvo;
/////////////////////////////////////////////
// prepare displays and interactions
histogramClippingValue=0; // default value... updated with interface slider
std::string retinaInputCorrected("Retina input image (with cut edges histogram for basic pixels error avoidance)");
cv::namedWindow(retinaInputCorrected,1);
cv::createTrackbar("histogram edges clipping limit", "Retina input image (with cut edges histogram for basic pixels error avoidance)",&histogramClippingValue,50,callBack_rescaleGrayLevelMat);
std::string RetinaParvoWindow("Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping");
cv::namedWindow(RetinaParvoWindow, 1);
colorSaturationFactor=3;
cv::createTrackbar("Color saturation", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &colorSaturationFactor,5,callback_saturateColors);
retinaHcellsGain=40;
cv::createTrackbar("Hcells gain", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping",&retinaHcellsGain,100,callBack_updateRetinaParams);
localAdaptation_photoreceptors=197;
localAdaptation_Gcells=190;
cv::createTrackbar("Ph sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_photoreceptors,199,callBack_updateRetinaParams);
cv::createTrackbar("Gcells sensitivity", "Retina Parvocellular pathway output : 16bit=>8bit image retina tonemapping", &localAdaptation_Gcells,199,callBack_updateRetinaParams);
std::string powerTransformedInput("EXR image with basic processing : 16bits=>8bits with gamma correction");
/////////////////////////////////////////////
// apply default parameters of user interaction variables
callBack_updateRetinaParams(1,NULL); // first call for default parameters setup
//.........这里部分代码省略.........
开发者ID:23pointsNorth,项目名称:opencv_contrib,代码行数:101,代码来源:OpenEXRimages_HDR_Retina_toneMapping_video.cpp
示例10: processImage
void processImage(cv::Mat& image) {
if (image.empty())
return;
#ifdef _OPENCV3
pMOG->apply(image, fgMaskMOG, 0.05);
#else
pMOG->operator()(image, fgMaskMOG, 0.05);
#endif
cv::dilate(fgMaskMOG,fgMaskMOG,cv::getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(15,15)));
bin = new IplImage(fgMaskMOG);
frame = new IplImage(image);
labelImg = cvCreateImage(cvSize(image.cols,image.rows),IPL_DEPTH_LABEL,1);
unsigned int result = cvLabel(bin, labelImg, blobs);
cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE);
cvFilterByArea(blobs, 1500, 40000);
cvUpdateTracks(blobs, tracks, 200., 5);
cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID);
for (std::map<CvID, CvTrack*>::iterator track_it = tracks.begin(); track_it!=tracks.end(); track_it++) {
CvID id = (*track_it).first;
CvTrack* track = (*track_it).second;
cur_pos = track->centroid;
if (track->inactive == 0) {
if (last_poses.count(id)) {
std::map<CvID, CvPoint2D64f>::iterator pose_it = last_poses.find(id);
last_pos = pose_it -> second;
last_poses.erase(pose_it);
}
last_poses.insert(std::pair<CvID, CvPoint2D64f>(id, cur_pos));
if (line_pos+25>cur_pos.y && cur_pos.y>line_pos && line_pos-25<last_pos.y && last_pos.y<line_pos) {
count++;
countUD++;
}
if (line_pos-25<cur_pos.y && cur_pos.y<line_pos && line_pos+25>last_pos.y && last_pos.y>line_pos) {
count++;
countDU++;
}
if ( cur_pos.y<line_pos+50 && cur_pos.y>line_pos-50) {
avg_vel += abs(cur_pos.y-last_pos.y);
count_active++;
}
//update heatmapfg
heat_mapfg = cv::Mat::zeros(FR_H, FR_W, CV_8UC3);
count_arr[lmindex] = count;
avg_vel_arr[lmindex] = avg_vel/count_active ;
for (int i=0; i<landmarks.size(); i++) {
cv::circle(heat_mapfg, cv::Point((landmarks[i].y + 50)*2.4, (landmarks[i].x + 50)*2.4), count_arr[i]*3, cv::Scalar(0, 16*avg_vel_arr[i], 255 - 16*avg_vel_arr[i]), -1);
}
cv::GaussianBlur(heat_mapfg, heat_mapfg, cv::Size(15, 15), 5);
} else {
if (last_poses.count(id)) {
last_poses.erase(last_poses.find(id));
}
}
}
cv::line(image, cv::Point(0, line_pos), cv::Point(FR_W, line_pos), cv::Scalar(0,255,0),2);
cv::putText(image, "COUNT: "+to_string(count), cv::Point(10, 15), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
cv::putText(image, "UP->DOWN: "+to_string(countUD), cv::Point(10, 30), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
cv::putText(image, "DOWN->UP: "+to_string(countDU), cv::Point(10, 45), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
cv::imshow("BLOBS", image);
cv::imshow("HEATMAP", heat_map + heat_mapfg);
cv::waitKey(33);
}
示例11: detectAndTrackFace
void CHumanTracker::detectAndTrackFace()
{
static ros::Time probe;
// Do ROI
debugFrame = rawFrame.clone();
Mat img = this->rawFrame(searchROI);
faces.clear();
ostringstream txtstr;
const static Scalar colors[] = { CV_RGB(0,0,255),
CV_RGB(0,128,255),
CV_RGB(0,255,255),
CV_RGB(0,255,0),
CV_RGB(255,128,0),
CV_RGB(255,255,0),
CV_RGB(255,0,0),
CV_RGB(255,0,255)} ;
Mat gray;
Mat frame( cvRound(img.rows), cvRound(img.cols), CV_8UC1 );
cvtColor( img, gray, CV_BGR2GRAY );
resize( gray, frame, frame.size(), 0, 0, INTER_LINEAR );
//equalizeHist( frame, frame );
// This if for internal usage
const ros::Time _n = ros::Time::now();
double dt = (_n - probe).toSec();
probe = _n;
CvMat _image = frame;
if (!storage.empty())
{
cvClearMemStorage(storage);
}
CvSeq* _objects = cvHaarDetectObjects(&_image, cascade, storage,
1.2, initialScoreMin, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE, minFaceSize, maxFaceSize);
vector<CvAvgComp> vecAvgComp;
Seq<CvAvgComp>(_objects).copyTo(vecAvgComp);
// End of using C API
isFaceInCurrentFrame = (vecAvgComp.size() > 0);
// This is a hack
bool isProfileFace = false;
if ((profileHackEnabled) && (!isFaceInCurrentFrame) && ((trackingState == STATE_REJECT) || (trackingState == STATE_REJECT)))
{
ROS_DEBUG("Using Profile Face hack ...");
if (!storageProfile.empty()) {
cvClearMemStorage(storageProfile);
}
CvSeq* _objectsProfile = cvHaarDetectObjects(&_image, cascadeProfile, storageProfile,
1.2, initialScoreMin, CV_HAAR_DO_CANNY_PRUNING|CV_HAAR_SCALE_IMAGE, minFaceSize, maxFaceSize);
vecAvgComp.clear();
Seq<CvAvgComp>(_objectsProfile).copyTo(vecAvgComp);
isFaceInCurrentFrame = (vecAvgComp.size() > 0);
if (isFaceInCurrentFrame)
{
ROS_DEBUG("The hack seems to work!");
}
isProfileFace = true;
}
if (trackingState == STATE_LOST)
{
if (isFaceInCurrentFrame)
{
stateCounter++;
trackingState = STATE_DETECT;
}
}
if (trackingState == STATE_DETECT)
{
if (isFaceInCurrentFrame)
{
stateCounter++;
}
else
{
stateCounter = 0;
trackingState = STATE_LOST;
}
if (stateCounter > minDetectFrames)
{
stateCounter = 0;
trackingState = STATE_TRACK;
}
}
if (trackingState == STATE_TRACK)
{
if (!isFaceInCurrentFrame)
{
//.........这里部分代码省略.........
示例12: calcLocation
cv::Point2f calcLocation(cv::Mat query_img) {
std::vector<cv::KeyPoint> kp_query; // Keypoints of the query image
cv::Mat des_query;
cv::Mat query_img_gray;
cv::cvtColor(query_img,
query_img_gray,
cv::COLOR_BGR2GRAY);
detector->detectAndCompute(query_img_gray,
cv::noArray(),
kp_query,
des_query);
std::vector< std::vector<cv::DMatch> > matches;
matcher.knnMatch(des_ref,
des_query,
matches,
2);
std::vector<cv::KeyPoint> matched_query, matched_ref, inliers_query, inliers_ref;
std::vector<cv::DMatch> good_matches;
//-- Localize the object
std::vector<cv::Point2f> pts_query;
std::vector<cv::Point2f> pts_ref;
for(size_t i = 0; i < matches.size(); i++) {
cv::DMatch first = matches[i][0];
float dist_query = matches[i][0].distance;
float dist_ref = matches[i][1].distance;
if (dist_query < match_ratio * dist_ref) {
matched_query.push_back(kp_query[first.queryIdx]);
matched_ref.push_back(kp_ref[first.trainIdx]);
pts_query.push_back(kp_query[first.queryIdx].pt);
pts_ref.push_back(kp_ref[first.trainIdx].pt);
}
}
cv::Mat mask;
// Homograpy
cv::Mat homography;
homography = cv::findHomography(pts_query,
pts_ref,
cv::RANSAC,
5,
mask);
// Input Quadilateral or Image plane coordinates
std::vector<cv::Point2f> centers(1), centers_transformed(1);
cv::Point2f center(query_img_gray.rows / 2,
query_img_gray.cols / 2);
cv::Point2f center_transformed(query_img.rows / 2,
query_img.cols / 2);
centers[0] = center; // Workaround for using perspective transform
cv::perspectiveTransform(centers,
centers_transformed,
homography);
center_transformed = centers_transformed[0];
return center_transformed;
}
示例13: reset
void AllignedFrameSource::reset()
{
base_->reset();
}
示例14: simpleMatching
void simpleMatching(
const cv::Mat& descriptors_0, const cv::Mat& descriptors_1,
std::vector<cv::DMatch>& matches)
{
matcher_->match(descriptors_0, descriptors_1, matches);
}
示例15: initialize
/*
* Initializes annotator
*/
TyErrorId initialize(AnnotatorContext &ctx)
{
outInfo("initialize");
if(ctx.isParameterDefined("keypointDetector"))
{
ctx.extractValue("keypointDetector", keypointDetector);
}
else
{
outError("no keypoint detector provided!");
return UIMA_ERR_ANNOTATOR_MISSING_INIT;
}
if(ctx.isParameterDefined("featureExtractor"))
{
ctx.extractValue("featureExtractor", featureExtractor);
}
else
{
outError("no feature extractor provided!");
return UIMA_ERR_ANNOTATOR_MISSING_INIT;
}
outDebug("creating " << keypointDetector << " key points detector...");
detector = cv::FeatureDetector::create(keypointDetector);
if(detector.empty())
{
outError("creation failed!");
return UIMA_ERR_ANNOTATOR_MISSING_INIT;
}
#if OUT_LEVEL == OUT_LEVEL_DEBUG
printParams(detector);
#endif
setupAlgorithm(detector);
outDebug("creating " << featureExtractor << " feature extractor...");
extractor = cv::DescriptorExtractor::create(featureExtractor);
if(extractor.empty())
{
outError("creation failed!");
return UIMA_ERR_ANNOTATOR_MISSING_INIT;
}
#if OUT_LEVEL == OUT_LEVEL_DEBUG
printParams(extractor);
#endif
setupAlgorithm(extractor);
if(featureExtractor == "SIFT" || featureExtractor == "SURF")
{
featureType = "numerical";
}
else
{
featureType = "binary";
}
return UIMA_ERR_NONE;
}