本文整理汇总了C++中cv::Ptr::apply方法的典型用法代码示例。如果您正苦于以下问题:C++ Ptr::apply方法的具体用法?C++ Ptr::apply怎么用?C++ Ptr::apply使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Ptr
的用法示例。
在下文中一共展示了Ptr::apply方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: WrapPhase
cv::Mat NFringeStructuredLight::WrapPhase( vector<cv::Mat> fringeImages, cv::Ptr<cv::FilterEngine> filter )
{
Utils::AssertOrThrowIfFalse(fringeImages.size() == m_numberOfFringes,
"Invalid number of fringes passed into phase wrapper");
// Should be the same size as our fringe images
// and floating point precision for decimal phase values
cv::Mat sine(fringeImages[0].size(), CV_32F, 0.0f);
cv::Mat cosine(fringeImages[0].size(), CV_32F, 0.0f);
cv::Mat phase(fringeImages[0].size(), CV_32F, 0.0f);
for(int row = 0; row < phase.rows; ++row)
{
for(int col = 0; col < phase.cols; ++col)
{
for(int fringe = 0; fringe < m_numberOfFringes; ++fringe)
{
sine.at<float>(row, col) += ( float( fringeImages[fringe].at<uchar>(row, col) ) / 255.0 ) * sin(2.0 * M_PI * float(fringe) / float(m_numberOfFringes));
cosine.at<float>(row, col) += ( float( fringeImages[fringe].at<uchar>(row, col) ) / 255.0 ) * cos(2.0 * M_PI * float(fringe) / float(m_numberOfFringes));
}
}
}
// Filter out noise in the sine and cosine components
if( !filter.empty( ) )
{
filter->apply( sine, sine );
filter->apply( cosine, cosine );
}
// Now perform phase wrapping
for(int row = 0; row < phase.rows; ++row)
{
for(int col = 0; col < phase.cols; ++col)
{
// This is negative so that are phase gradient increases from 0 -> rows or 0 -> cols
phase.at<float>(row, col) = -atan2( sine.at<float>( row, col ), cosine.at<float>( row, col ) );
}
}
return phase;
}
示例2: imageCallback
void BackgroundSubtraction::imageCallback(const sensor_msgs::ImageConstPtr& msg)
{
cv_bridge::CvImageConstPtr cv_ptr;
try
{
// TBD why converting to BGR8
cv_ptr = cv_bridge::toCvShare(msg, sensor_msgs::image_encodings::RGB8);
//, "mono8"); // sensor_msgs::image_encodings::MONO8);
}
catch (cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s", e.what());
return;
}
cv::Mat live_frame = cv_ptr->image.clone();
mog2_->apply(live_frame, fg_mask_mog2_);
if (config_.capture_background)
{
bg_image_ = live_frame;
}
// else
// if (config_.subtract_background)
// if (!bg_image_.isEmpty())
{
cv::Mat image;
#if 0
cv::cvtColor(fg_mask_mog2_, fg_mask_mog2_rgb_, CV_GRAY2RGB);
if (fg_mask_mog2_rgb_.size() == live_frame.size())
image = live_frame & fg_mask_mog2_rgb_;
else
image = live_frame;
#endif
cv::Mat diff;
cv::absdiff(bg_image_, live_frame, diff);
cv::Mat diff_gray;
cv::cvtColor(diff, diff_gray, CV_RGB2GRAY);
cv::Mat mask = diff_gray > 20; // TODO(lucasw) make this a config_ dr param
cv::Mat mask_rgb;
cv::cvtColor(mask, mask_rgb, CV_GRAY2RGB);
image = mask_rgb & live_frame;
cv_bridge::CvImage cv_image;
cv_image.image = image;
cv_image.encoding = "rgb8";
cv_image.header.stamp = msg->header.stamp;
cv_image.header.frame_id = msg->header.frame_id;
image_pub_.publish(cv_image.toImageMsg());
}
}
示例3: convertIr
void convertIr(const cv::Mat &ir, cv::Mat &grey)
{
const float factor = 255.0f / (maxIr - minIr);
grey.create(ir.rows, ir.cols, CV_8U);
#pragma omp parallel for
for(size_t r = 0; r < (size_t)ir.rows; ++r)
{
const uint16_t *itI = ir.ptr<uint16_t>(r);
uint8_t *itO = grey.ptr<uint8_t>(r);
for(size_t c = 0; c < (size_t)ir.cols; ++c, ++itI, ++itO)
{
*itO = std::min(std::max(*itI - minIr, 0) * factor, 255.0f);
}
}
clahe->apply(grey, grey);
}
示例4: processImage
void processImage(cv::Mat& image) {
if (image.empty())
return;
#ifdef _OPENCV3
pMOG->apply(image, fgMaskMOG, 0.05);
#else
pMOG->operator()(image, fgMaskMOG, 0.05);
#endif
cv::dilate(fgMaskMOG,fgMaskMOG,cv::getStructuringElement(cv::MORPH_ELLIPSE,cv::Size(15,15)));
bin = new IplImage(fgMaskMOG);
frame = new IplImage(image);
labelImg = cvCreateImage(cvSize(image.cols,image.rows),IPL_DEPTH_LABEL,1);
unsigned int result = cvLabel(bin, labelImg, blobs);
cvRenderBlobs(labelImg, blobs, frame, frame, CV_BLOB_RENDER_BOUNDING_BOX|CV_BLOB_RENDER_CENTROID|CV_BLOB_RENDER_ANGLE);
cvFilterByArea(blobs, 1500, 40000);
cvUpdateTracks(blobs, tracks, 200., 5);
cvRenderTracks(tracks, frame, frame, CV_TRACK_RENDER_ID);
for (std::map<CvID, CvTrack*>::iterator track_it = tracks.begin(); track_it!=tracks.end(); track_it++) {
CvID id = (*track_it).first;
CvTrack* track = (*track_it).second;
cur_pos = track->centroid;
if (track->inactive == 0) {
if (last_poses.count(id)) {
std::map<CvID, CvPoint2D64f>::iterator pose_it = last_poses.find(id);
last_pos = pose_it -> second;
last_poses.erase(pose_it);
}
last_poses.insert(std::pair<CvID, CvPoint2D64f>(id, cur_pos));
if (line_pos+25>cur_pos.y && cur_pos.y>line_pos && line_pos-25<last_pos.y && last_pos.y<line_pos) {
count++;
countUD++;
}
if (line_pos-25<cur_pos.y && cur_pos.y<line_pos && line_pos+25>last_pos.y && last_pos.y>line_pos) {
count++;
countDU++;
}
if ( cur_pos.y<line_pos+50 && cur_pos.y>line_pos-50) {
avg_vel += abs(cur_pos.y-last_pos.y);
count_active++;
}
//update heatmapfg
heat_mapfg = cv::Mat::zeros(FR_H, FR_W, CV_8UC3);
count_arr[lmindex] = count;
avg_vel_arr[lmindex] = avg_vel/count_active ;
for (int i=0; i<landmarks.size(); i++) {
cv::circle(heat_mapfg, cv::Point((landmarks[i].y + 50)*2.4, (landmarks[i].x + 50)*2.4), count_arr[i]*3, cv::Scalar(0, 16*avg_vel_arr[i], 255 - 16*avg_vel_arr[i]), -1);
}
cv::GaussianBlur(heat_mapfg, heat_mapfg, cv::Size(15, 15), 5);
} else {
if (last_poses.count(id)) {
last_poses.erase(last_poses.find(id));
}
}
}
cv::line(image, cv::Point(0, line_pos), cv::Point(FR_W, line_pos), cv::Scalar(0,255,0),2);
cv::putText(image, "COUNT: "+to_string(count), cv::Point(10, 15), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
cv::putText(image, "UP->DOWN: "+to_string(countUD), cv::Point(10, 30), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
cv::putText(image, "DOWN->UP: "+to_string(countDU), cv::Point(10, 45), cv::FONT_HERSHEY_PLAIN, 1, cv::Scalar(255,255,255));
cv::imshow("BLOBS", image);
cv::imshow("HEATMAP", heat_map + heat_mapfg);
cv::waitKey(33);
}
示例5: backgroundSubstractionDetection
void backgroundSubstractionDetection(cv::Mat sequence, std::vector<cv::Rect> &detectedPedestrianFiltered, cv::Ptr<cv::BackgroundSubtractor> &pMOG2, trackingOption &tracking)
{
int threshold = 150;
cv::Mat mask;
cv::Mat sequenceGrayDiff;
std::vector<std::vector<cv::Point> > contours;
std::vector<cv::Vec4i> hierarchy;
std::vector<std::vector<cv::Point> > contours_poly;
std::vector<cv::Rect> detectedPedestrian;
pMOG2->apply(sequence,sequenceGrayDiff);
cv::threshold(sequenceGrayDiff, mask, threshold, 255, cv::THRESH_BINARY);
cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(6,6)));
cv::dilate(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(25,55)));
cv::erode(mask, mask, cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(3,6)));
/*
cv::Mat dist;
cv::distanceTransform(mask, dist, CV_DIST_L2, 3);
cv::normalize(dist, dist, 0, 1., cv::NORM_MINMAX);
cv::threshold(dist, dist, .4, 1., CV_THRESH_BINARY);
cv::imshow("temp", dist);
*/
cv::findContours(mask, contours, hierarchy, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE, cv::Point(0,0));
contours_poly.resize(contours.size());
detectedPedestrian.resize(contours.size());
for(size_t j=0;j<contours.size();j++)
{
cv::approxPolyDP(cv::Mat(contours[j]), contours_poly[j], 3, true);
detectedPedestrian[j] = cv::boundingRect(cv::Mat(contours_poly[j]));
//test
/*
double pix = 30;
if(detectedPedestrian[j].x >= pix)
detectedPedestrian[j].x -= pix;
else
detectedPedestrian[j].x = 0;
if((detectedPedestrian[j].x+detectedPedestrian[j].width) <= (sequence.cols-pix))
detectedPedestrian[j].width += pix;
else
detectedPedestrian[j].width = sequence.cols - detectedPedestrian[j].x;
if(detectedPedestrian[j].y >= pix)
detectedPedestrian[j].y -= pix;
else
detectedPedestrian[j].y = 0;
if((detectedPedestrian[j].y+detectedPedestrian[j].height) <= (sequence.rows-pix))
detectedPedestrian[j].height += pix;
else
detectedPedestrian[j].height = sequence.rows - detectedPedestrian[j].y;
*/
}
if(detectedPedestrian.size() != 0)
{
tracking = GOOD_FEATURES_TO_TRACK;
detectedPedestrianFiltered.clear();
detectedPedestrianFiltered.resize(detectedPedestrian.size());
detectedPedestrianFiltered = detectedPedestrian;
}
else
tracking = NOTHING_TO_TRACK;
}