本文整理汇总了C++中Mat::clone方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::clone方法的具体用法?C++ Mat::clone怎么用?C++ Mat::clone使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat::clone方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: vanishPointDecide
//.........这里部分代码省略.........
}
if (!skyVpt_turnoff)
{
int middlex=skyVpts[index].x;
int middley=skyVpts[index].y;
for (int x=middlex-halfSearchSizeX/10;x<middlex+halfSearchSizeX/10;x+=firstSearchStepx/3)
{
for (int y = middley-halfSearchSizeY/10; y < middley+halfSearchSizeY/10; y+=firstSearchStepy/3)
{
candidates.push_back(Point(x,y));
}
}
}
else
{
int middlex=img.cols/2;
int middley=img.rows/2;
for (int x=middlex-halfSearchSizeX;x<middlex+halfSearchSizeX;x+=firstSearchStepx)
{
for (int y = middley-halfSearchSizeY; y < middley+halfSearchSizeY; y+=firstSearchStepy)
{
candidates.push_back(Point(x,y));
}
}
}
vector<double> candidateConfidence(candidates.size(),0.0);
double largeConfi=0.0;
int largindex=0;
double largeConfi_motion=0.0;
double largeConfi_structure=0.0;
for (int i = 0; i < candidates.size(); i++)
{
for (int j = 0; j < linesOfTrjs.size(); j++)
{
if(trjlbs[j])
{
if(pointToLineDis(get<0>(linesOfTrjs[j]),get<1>(linesOfTrjs[j]),candidates[i])<candidateSelectThres_traj)
candidateConfidence[i]+=traj_weight;
}
}
if(vFrmEchCue)
{
if (candidateConfidence[i]>largeConfi_structure)
{
largeConfi_structure=candidateConfidence[i];
vCues[1]=candidates[i];
}
}
double tem_Confidence_structure=0;
for (int j = 0; j < abln_hlines.size(); j++)
{
if(hlnlbs[j])
{
if(pointToLineDis(abln_hlines[j].first,abln_hlines[j].second,candidates[i])<candidateSelectThres_hline)
candidateConfidence[i]+=hline_weight;
}
}
if(vFrmEchCue)
{
if((candidateConfidence[i]-tem_Confidence_structure)>largeConfi_motion)
{
largeConfi_motion=candidateConfidence[i]-tem_Confidence_structure;
vCues[2]=candidates[i];
}
}
if (candidateConfidence[i]>largeConfi)
{
largeConfi=candidateConfidence[i];
largindex=i;
}
}
#ifdef presentationMode_on
Mat copy=img.clone();
drawPoint(copy,skyVpts[index]);
drawHlines(copy,hlines,hlnlbs);
drawTrajs(copy,trjs,trjlbs);
imshow("rslt",copy);
copy=img.clone();
drawHlines(copy,hlines,hlnlbs,false);
drawTrajs(copy,trjs,trjlbs,false);
drawPoint(copy,candidates[largindex]);
imshow("newrslt",copy);
#endif
return candidates[largindex];
}
示例2: proc
bool f_stabilizer::proc(){
ch_image * pgryin = dynamic_cast<ch_image*>(m_chin[0]);
if(pgryin == NULL)
return false;
ch_image * pclrin = dynamic_cast<ch_image*>(m_chin[1]);
if(pclrin == NULL)
return false;
ch_image * pgryout = dynamic_cast<ch_image*>(m_chout[0]);
if(pgryout == NULL)
return false;
ch_image * pclrout = dynamic_cast<ch_image*>(m_chout[1]);
if(pclrout == NULL)
return false;
long long tgry;
Mat gry = pgryin->get_img(tgry);
if(gry.empty())
return true;
long long tclr;
Mat clr = pclrin->get_img(tclr);
if(clr.empty())
return true;
if(m_bthrough){
pgryout->set_img(gry, tgry);
pclrout->set_img(clr, tclr);
return true;
}
if(m_bclr)
{
long long timg;
Mat img = pgryin->get_img(timg);
pgryout->set_img(img, timg);
m_num_conv_frms = m_num_frms = 0;
m_bWinit = false;
m_bclr = false;
return true;
}
if(tgry != tclr)
return true;
// getting new gray image to be stabilized
//Mat & gry = m_pgryin->get_img();
// roi saturation
if(m_roi.x < 0 || m_roi.x > gry.cols)
m_roi.x = 0;
if(m_roi.y < 0 || m_roi.y > gry.rows)
m_roi.y = 0;
if(m_roi.width <= 0 || (m_roi.x + m_roi.width) >= gry.cols)
m_roi.width = gry.cols - m_roi.x;
if(m_roi.height <= 0 || (m_roi.y + m_roi.height) >= gry.rows)
m_roi.height = gry.rows - m_roi.y;
// making image pyramid of a new image
buildPyramid(gry, m_pyrimg[0], m_num_pyr_level);
// making a template and its pyramid
if(m_refimg.rows != gry.rows &&
m_refimg.cols != gry.cols )
m_refimg = gry.clone();
if(m_bmask && m_Tmask.size() != m_num_pyr_level)
buildPyramid(m_mask, m_Tmask, m_num_pyr_level);
// making image pyramid of the template image sampled from ROI
// in the previous image stabilized with warp parameters m_W
buildPyramid(m_refimg(m_roi), m_pyrimg[1], m_num_pyr_level);
if(!m_bWinit){
init();
m_bWinit = true;
}
// calculating new warp. The new warp contains the previous warp.
// Warp function basically is ROI of Wnew(Original Image) = ROI of Wold(Previous Image)
Mat Wnew;
if(m_bmask)
Wnew = m_core.calc_warp(m_pyrimg[1] /* previous image template*/,
m_Tmask /* Mask image. Pixels with value zero is ignored in the error calculation.*/,
m_pyrimg[0] /* image the warp to be calculated */,
m_roi /* roi for template sampling */,
m_W /* previous warp */);
else
Wnew = m_core.calc_warp(m_pyrimg[1] /* previous image */,
m_pyrimg[0] /* image the warp to be calculated */,
m_roi/* roi for template sampling */,
m_W /* previous warp */);
// convergence check
if(m_core.is_conv())
m_num_conv_frms++;
else // if not, the previous warp parameters are used.
m_W.copyTo(Wnew);
//.........这里部分代码省略.........
示例3: smallSource
//=============================================================================
// Assumes that source image exists and numDownPyrs > 1, no ROIs for either
// image, and both images have the same depth and number of channels
bool
CFastMatchTemplate::FastMatchTemplate( const Mat& source,
const Mat& target,
vector<Point>* foundPointsList,
vector<double>* confidencesList,
int matchPercentage,
bool findMultipleTargets,
int numMaxima,
int numDownPyrs,
int searchExpansion )
{
// make sure that the template image is smaller than the source
if(target.size().width > source.size().width ||
target.size().height > source.size().height)
{
printf( "\nSource image must be larger than target image.\n" );
return false;
}
if(source.depth() != target.depth())
{
printf( "\nSource image and target image must have same depth.\n" );
return false;
}
if(source.channels() != target.channels())
{
printf("%d %d\n",source.channels() , target.channels());
printf("\nSource image and target image must have same number of channels.\n" );
return false;
}
Size sourceSize = source.size();
Size targetSize = target.size();
// create copies of the images to modify
Mat copyOfSource = source.clone();
Mat copyOfTarget = target.clone();
// down pyramid the images
for(int ii = 0; ii < numDownPyrs; ii++)
{
// start with the source image
sourceSize.width = (sourceSize.width + 1) / 2;
sourceSize.height = (sourceSize.height + 1) / 2;
Mat smallSource(sourceSize, source.type());
pyrDown(copyOfSource, smallSource);
// prepare for next loop, if any
copyOfSource = smallSource.clone();
// next, do the target
targetSize.width = (targetSize.width + 1) / 2;
targetSize.height = (targetSize.height + 1) / 2;
Mat smallTarget(targetSize, target.type());
pyrDown(copyOfTarget, smallTarget);
// prepare for next loop, if any
copyOfTarget = smallTarget.clone();
}
// perform the match on the shrunken images
Size smallTargetSize = copyOfTarget.size();
Size smallSourceSize = copyOfSource.size();
Size resultSize;
resultSize.width = smallSourceSize.width - smallTargetSize.width + 1;
resultSize.height = smallSourceSize.height - smallTargetSize.height + 1;
Mat result(resultSize, CV_32FC1);
matchTemplate(copyOfSource, copyOfTarget, result, CV_TM_CCOEFF_NORMED);
// find the top match locations
Point* locations = NULL;
MultipleMaxLoc(result, &locations, numMaxima);
// search the large images at the returned locations
sourceSize = source.size();
targetSize = target.size();
// create a copy of the source in order to adjust its ROI for searching
for(int currMax = 0; currMax < numMaxima; currMax++)
{
// transform the point to its corresponding point in the larger image
locations[currMax].x *= (int)pow(2.0f, numDownPyrs);
locations[currMax].y *= (int)pow(2.0f, numDownPyrs);
locations[currMax].x += targetSize.width / 2;
locations[currMax].y += targetSize.height / 2;
const Point& searchPoint = locations[currMax];
// if we are searching for multiple targets and we have found a target or
// multiple targets, we don't want to search in the same location(s) again
if(findMultipleTargets && !foundPointsList->empty())
{
//.........这里部分代码省略.........
示例4: callback
void callback(
const sensor_msgs::ImageConstPtr& imgmsg,
const humans_msgs::HumansConstPtr& kinect
)
{
int okao_i = 0;
int no_okao_i = 0;
humans_msgs::Humans okao_human, no_okao_human;
cv_bridge::CvImagePtr cv_ptr;
try
{
cv_ptr = cv_bridge::toCvCopy(imgmsg, sensor_msgs::image_encodings::BGR8);
for(int i = 0; i < kinect->num; i++)
{
//POS head2d, head3d, neck2d;
Point top, bottom;
geometry_msgs::Point head2d, neck2d;//, top, bottom;
head2d.x
= kinect->human[i].body.joints[HEAD].position_color_space.x;
head2d.y
= kinect->human[i].body.joints[HEAD].position_color_space.y;
neck2d.x
= kinect->human[i].body.joints[SPINE_S].position_color_space.x;
neck2d.y
= kinect->human[i].body.joints[SPINE_S].position_color_space.y;
double diff_w = fabs(head2d.y-neck2d.y);
double diff_h = fabs(head2d.y-neck2d.y);
top.x = head2d.x - diff_w;
top.y = head2d.y - diff_h;
bottom.x = head2d.x + diff_w;
bottom.y = head2d.y + diff_h;
/*
cout << "cut (" << cut.x << "," << cut.y << ")"<<endl;
if( cut.x < 0 )
cut.x = 0;
else if( cut.x >= (cv_ptr->image.cols - diff_w*2) )
cut.x = cv_ptr->image.cols-diff_w*2-1;
if( cut.y < 0 )
cut.y = 0;
else if( cut.y >= (cv_ptr->image.rows - diff_h*2) )
cut.y = cv_ptr->image.rows-diff_h*2-1;
*/
top.x = max(0, top.x);
top.y = max(0, top.y);
bottom.x = min(cv_ptr->image.cols-1, bottom.x);
bottom.y = min(cv_ptr->image.rows-1, bottom.y);
if (( top.x > bottom.x || top.y > bottom.y)||( top.x == bottom.x || top.y == bottom.y))
continue;
/*
cout << "(" << top.x << "," << top.y << ")"
<< "-"
<< "(" << bottom.x << "," << bottom.y << ")"<<endl;
cout << "diff:" << "(" << diff_w << "," << diff_h<< ")" << endl;
cout << "image:" << "(" << cv_ptr->image.cols << "," << cv_ptr->image.rows << ")" << endl;
*/
Mat cutRgbImage;
try
{
cutRgbImage = Mat(cv_ptr->image, cv::Rect(top, bottom));
}
catch(cv_bridge::Exception& e)
{
ROS_ERROR("cv_bridge exception: %s",e.what());
}
Mat rgbImage = cutRgbImage.clone();
if( rgbImage.cols > 1280 )
{
cv::resize( rgbImage, rgbImage,
cv::Size(1280, cutRgbImage.rows*1280/cutRgbImage.cols) );
}
if( rgbImage.rows > 1024 )
{
cv::resize( rgbImage, rgbImage,
cv::Size(cutRgbImage.cols*1024/cutRgbImage.rows , 1024) );
}
//rgbImage = cutRgbImage;
Mat grayImage;
cv::cvtColor(rgbImage,grayImage,CV_BGR2GRAY);
//test
//cv::rectangle( cv_ptr->image, top,
// bottom,
// cv::Scalar(0,200,0), 5, 8);
try
{
cv::Mat img = grayImage;
std::vector<unsigned char>
buf(img.data, img.data + img.cols * img.rows * img.channels());
std::vector<int> encodeParam(2);
encodeParam[0] = CV_IMWRITE_PNG_COMPRESSION;
encodeParam[1] = 3;
//.........这里部分代码省略.........
示例5: main
int main(int argc, char** argv)
{
Mat currentFrame;
Mat current;
Mat grayFrame;
Mat gaussianblurFrame;
Mat undistorted;
Size frameSz;
Mat dst;
Mat dst2;
Mat dst3;
Mat dst4;
Mat dst5;
Mat dst6;
Mat frame;
VideoCapture cap(0);
cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
ROBOT_VISUAL_S Robot_Visual;
int fileid;
/**串口资源初始化**/
fileid = SerialPort_open("/dev/ttyAMA3", 0);
if (fileid < 0)
{
return fileid;
}
cap >> frame;
const int IMAGE_HEIGHT = 480;
Mat huitu;
huitu.create(Size(640, 640), CV_8UC3);
frameSz.height = 640;
frameSz.width = 480;
Mat ImgUndistort = frame.clone();
Mat CM = Mat(3, 3, CV_32FC1);
Mat D;
FileStorage fs2("left.yml", FileStorage::READ);
fs2["cameraMatrix"] >> CM;
fs2["distortionCoefficients"] >> D;
fs2.release();
float * laserDotArr = new float[frameSz.height];
grayFrame.create(Size(frameSz.width, frameSz.height), CV_8UC1);
undistorted.create(Size(frameSz.width, frameSz.height), CV_8UC3);
MeasureResult result = { 0, 0, 0 };
char buf[1024];
unsigned int count = 0;
unsigned int cycle_count = 0;
while (true)
{
cap >> currentFrame;
undistort(currentFrame, ImgUndistort, CM, D);
transpose(ImgUndistort, dst);
flip(dst, dst2, 1);
transpose(huitu, dst4);
flip(dst4, dst5, 1);
cvtColor(dst2, grayFrame, COLOR_BGR2GRAY);
GaussianBlur(grayFrame, gaussianblurFrame, Size(3, 3), 0, 0);
for (int y = 0; y < frameSz.height; ++y)
{
laserDotArr[y] = findLaserCenterByCol(gaussianblurFrame.ptr<uchar>(y), frameSz.width);
}
for (int y = 0; y < frameSz.height; ++y)
{
if (laserDotArr[y] != -1)
{
circle(dst2, cvPoint(laserDotArr[y], y), 2, Scalar(0, 255, 255));
calcDistanceByPos(laserDotArr[y], y, IMAGE_HEIGHT, &result);
if ((y+1)%TRANSMIT_FREQ != 0)
{
count = sprintf(buf, "%-d ", y);
count += sprintf(buf + count, "%-d\n", (unsigned short)result.distance);
}
if ((0 == (y+1)%TRANSMIT_FREQ) && (0 == cycle_count%5))
{
count += sprintf(buf + count, "%-d ", y);
count += sprintf(buf + count, "%-d\n", (unsigned short)result.distance);
printf("%s", buf);
Robot_Visual_Data_Write(fileid, buf, count);
count = 0;
memset((void*)buf, '\0', sizeof(buf));
cycle_count = 0;
}
//printf("%d %.2f\n", y, result.distance);
circle(dst5, cvPoint(result.distance, y), 2, Scalar(255, 255, 255));
//.........这里部分代码省略.........
示例6: captureTheFace
void captureTheFace(int picNum, Mat frame, VideoCapture cap, Mat *theFacePtr){
vector<Mat> images;
vector<int> labels;
// Read in the data (fails if no valid input filename is given, but you'll get an error message):
try {
read_csv("libs/faceLearn.csv", images, labels);
} catch (cv::Exception& e) {
cerr << "Error opening file \"" << "\". Reason: " << e.msg << endl;
// nothing more we can do
exit(1);
}
// Get the height from the first image. We'll need this
// later in code to reshape the images to their original
// size AND we need to reshape incoming faces to this size:
int im_width = 92;
int im_height = 112;
if(images.size()){
im_width = images[0].cols;
im_height = images[0].rows;
}
CascadeClassifier haar_cascade;
haar_cascade.load(face_cascade_name);
std::cout << "Press SPACE to take picture number " << picNum << "..." << endl;
Mat original, gray, norm, float_gray, blur, num, den;
for(;;) {
cap >> frame;
// Clone the current frame
original = frame.clone();
// Convert the current frame to grayscale and perform illumination normalization
cvtColor(original, gray, CV_BGR2GRAY);
// Find the faces in the frame:
vector< Rect_<int> > faces;
haar_cascade.detectMultiScale(gray, faces);
if(faces.size()){
// Process face:
Rect face_i = faces[0];
// Crop the face from the image:
Mat face = gray(face_i);
cv::resize(face, face, Size(im_width, im_height), 1.0, 1.0, INTER_CUBIC);
// convert to floating-point image
//face.convertTo(float_gray, CV_32F, 1.0/255.0);
// numerator = img - gauss_blur(img)
//cv::GaussianBlur(float_gray, blur, Size(0,0), 2, 2);
//num = float_gray - blur;
// denominator = sqrt(gauss_blur(img^2))
//cv::GaussianBlur(num.mul(num), blur, Size(0,0), 20, 20);
//cv::pow(blur, 0.5, den);
// output = numerator / denominator
//norm = num / den;
// normalize output into [0,1]
//cv::normalize(norm, norm, 0.0, 1.0, NORM_MINMAX, -1);
*theFacePtr = face;
// First of all draw a green rectangle around the detected face:
rectangle(original, face_i, CV_RGB(0, 255,0), 1);
imshow("Face", face);
}
// Show the result:
imshow("NewFaceCapture", original);
// And display it:
char key = (char) waitKey(32);
// Exit this loop on
if(key == 32)
break;
}
}
示例7: xoffset
//==============================================================================
void
face_detector::
train(ft_data &data,
const string fname,
const Mat &ref,
const bool mirror,
const bool visi,
const float frac,
const float scaleFactor,
const int minNeighbours,
const Size minSize)
{
detector.load(fname.c_str()); detector_fname = fname; reference = ref.clone();
vector<float> xoffset(0),yoffset(0),zoffset(0);
for(int i = 0; i < data.n_images(); i++){
Mat im = data.get_image(i,0); if(im.empty())continue;
vector<Point2f> p = data.get_points(i,false); int n = p.size();
Mat pt = Mat(p).reshape(1,2*n);
vector<Rect> faces; Mat eqIm; equalizeHist(im,eqIm);
detector.detectMultiScale(eqIm,faces,scaleFactor,minNeighbours,0
|CV_HAAR_FIND_BIGGEST_OBJECT
|CV_HAAR_SCALE_IMAGE,minSize);
if(faces.size() >= 1){
if(visi){
Mat I; cvtColor(im,I,CV_GRAY2RGB);
for(int i = 0; i < n; i++)circle(I,p[i],1,CV_RGB(0,255,0),2,CV_AA);
rectangle(I,faces[0].tl(),faces[0].br(),CV_RGB(255,0,0),3);
imshow("face detector training",I); waitKey(10);
}
//check if enough points are in detected rectangle
if(this->enough_bounded_points(pt,faces[0],frac)){
Point2f center = this->center_of_mass(pt); float w = faces[0].width;
xoffset.push_back((center.x - (faces[0].x+0.5*faces[0].width ))/w);
yoffset.push_back((center.y - (faces[0].y+0.5*faces[0].height))/w);
zoffset.push_back(this->calc_scale(pt)/w);
}
}
if(mirror){
im = data.get_image(i,1); if(im.empty())continue;
p = data.get_points(i,true);
pt = Mat(p).reshape(1,2*n);
equalizeHist(im,eqIm);
detector.detectMultiScale(eqIm,faces,scaleFactor,minNeighbours,0
|CV_HAAR_FIND_BIGGEST_OBJECT
|CV_HAAR_SCALE_IMAGE,minSize);
if(faces.size() >= 1){
if(visi){
Mat I; cvtColor(im,I,CV_GRAY2RGB);
for(int i = 0; i < n; i++)circle(I,p[i],1,CV_RGB(0,255,0),2,CV_AA);
rectangle(I,faces[0].tl(),faces[0].br(),CV_RGB(255,0,0),3);
imshow("face detector training",I); waitKey(10);
}
//check if enough points are in detected rectangle
if(this->enough_bounded_points(pt,faces[0],frac)){
Point2f center = this->center_of_mass(pt); float w = faces[0].width;
xoffset.push_back((center.x - (faces[0].x+0.5*faces[0].width ))/w);
yoffset.push_back((center.y - (faces[0].y+0.5*faces[0].height))/w);
zoffset.push_back(this->calc_scale(pt)/w);
}
}
}
}
//choose median value
Mat X = Mat(xoffset),Xsort,Y = Mat(yoffset),Ysort,Z = Mat(zoffset),Zsort;
cv::sort(X,Xsort,CV_SORT_EVERY_COLUMN|CV_SORT_ASCENDING); int nx = Xsort.rows;
cv::sort(Y,Ysort,CV_SORT_EVERY_COLUMN|CV_SORT_ASCENDING); int ny = Ysort.rows;
cv::sort(Z,Zsort,CV_SORT_EVERY_COLUMN|CV_SORT_ASCENDING); int nz = Zsort.rows;
detector_offset = Vec3f(Xsort.fl(nx/2),Ysort.fl(ny/2),Zsort.fl(nz/2)); return;
}
示例8: cannySegmentation
Mat skinDetector::cannySegmentation(Mat img0, int minPixelSize)
{
// Segments items in gray image (img0)
// minPixelSize=
// -1, returns largest region only
// pixels, threshold for removing smaller regions, with less than minPixelSize pixels
// 0, returns all detected segments
// LB: Zero pad image to remove edge effects when getting regions....
int padPixels=20;
// Rect border added at start...
Rect tempRect;
tempRect.x=padPixels;
tempRect.y=padPixels;
tempRect.width=img0.cols;
tempRect.height=img0.rows;
Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
img0.copyTo(img1(tempRect));
// apply your filter
Canny(img1, img1, 100, 200, 3); //100, 200, 3);
// find the contours
vector< vector<Point> > contours;
findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
// Mask for segmented regiond
Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);
vector<double> areas(contours.size());
if (minPixelSize==-1)
{ // Case of taking largest region
for(int i = 0; i < contours.size(); i++)
areas[i] = contourArea(Mat(contours[i]));
double max;
Point maxPosition;
minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
}
else
{ // Case for using minimum pixel size
for (int i = 0; i < contours.size(); i++)
{
if (contourArea(Mat(contours[i]))>minPixelSize)
drawContours(mask, contours, i, Scalar(1), CV_FILLED);
}
}
// normalize so imwrite(...)/imshow(...) shows the mask correctly!
normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);
Mat returnMask;
returnMask=mask(tempRect);
// show the images
if (verboseOutput) imshow("Canny Skin: Img in", img0);
if (verboseOutput) imshow("Canny Skin: Mask", returnMask);
if (verboseOutput) imshow("Canny Skin: Output", img1);
return returnMask;
}
示例9: handleStart
void ProcAbsPos::handleStart(ProjAcq& pAcq, AbsPos2D<float> const& pos) const {
Acq* acq = pAcq.getAcq();
Mat rgb = acq->getMat(BGR);
#ifdef WRITE_IMAGES
imwrite("rgb.png", rgb);
#endif
#ifdef COMP_HSV
Mat im_hsv = acq->getMat(HSV);
#ifdef HSV_TO_HGRAY
for (Mat_<Vec3b>::iterator it = im_hsv.begin<Vec3b>();
it != im_hsv.end<Vec3b>(); it++) {
(*it)[1] = (*it)[0];
(*it)[2] = (*it)[0];
}
#elif defined(HSV_TO_VGRAY)
for (Mat_<Vec3b>::iterator it = im_hsv.begin<Vec3b>();
it != im_hsv.end<Vec3b>(); it++) {
(*it)[0] = (*it)[2];
(*it)[1] = (*it)[2];
}
#endif
#ifdef WRITE_IMAGES
imwrite("hsv.png", im_hsv);
#endif /* WRITE_IMAGES */
#ifdef SHOW_IMAGES
imshow("hsv", im_hsv);
#endif /* SHOW_IMAGES */
#endif /* COMP_HSV */
#ifdef COMP_SIMULATED
// simulate acquisition
Mat _im3 = getSimulatedAt(pAcq, pos);
string _bn("_im3-sa");
#ifdef WRITE_IMAGES
imwrite(_bn + ".png", _im3);
#endif /* WRITE_IMAGES */
#ifdef SHOW_IMAGES
imshow(bn, _im3);
#endif /* SHOW_IMAGES */
#ifdef COMP_HSV
// show simulated acquisition in hsv
Mat im3_hsv = im3.clone();
cvtColor(im3, im3_hsv, COLOR_BGR2HSV);
#ifdef HSV_TO_HGRAY
for (Mat_<Vec3b>::iterator it = im3_hsv.begin<Vec3b>();
it != im3_hsv.end<Vec3b>(); it++) {
(*it)[1] = (*it)[0];
(*it)[2] = (*it)[0];
}
#elif defined(HSV_TO_VGRAY)
for(Mat_<Vec3b>::iterator it = im3_hsv.begin<Vec3b>(); it != im3_hsv.end<Vec3b>(); it++) {
(*it)[0] = (*it)[2];
(*it)[1] = (*it)[2];
}
#endif
#ifdef WRITE_IMAGES
imwrite(bn + "_hsv.png", im3_hsv);
#endif /* WRITE_IMAGES */
#ifdef SHOW_IMAGES
imshow(bn + "_hsv", im3_hsv);
#endif /* SHOW_IMAGES */
Mat diff_hsv = im_hsv - im3_hsv;
#ifdef WRITE_IMAGES
imwrite(bn + "_diff_hsv.png", diff_hsv);
#endif /* WRITE_IMAGES */
#ifdef SHOW_IMAGES
imshow(bn + "_diff_hsv", diff_hsv);
#endif /* SHOW_IMAGES */
#endif /* COMP_HSV */
#ifdef COMP_TESTPOINTS
Mat _im3_tp = getTestPointsAt(pAcq, pos.getTransform().getReverse());
_bn = "_im3_tp-sa";
#ifdef WRITE_IMAGES
imwrite(_bn + ".png", _im3_tp);
#endif /* WRITE_IMAGES */
#ifdef SHOW_IMAGES
imshow(bn, im3_tp);
#endif /* SHOW_IMAGES */
#endif /* COMP_TESTPOINTS */
#endif /* COMP_SIMULATED */
#ifdef COMP_TESTPOINTS
Mat _rgb_tp = rgb.clone();
addTestPointsAtTo(_rgb_tp, pAcq, pos.getTransform().getReverse());
_bn = "_im_tp-sa";
#ifdef WRITE_IMAGES
imwrite(_bn + ".png", _rgb_tp);
#endif /* WRITE_IMAGES */
#ifdef SHOW_IMAGES
imshow(bn, rgb_tp);
#endif /* SHOW_IMAGES */
//.........这里部分代码省略.........
示例10: detectFrontFaces
void detectFrontFaces(Mat front_body_roi, vector<Rect>boundRectFront,
size_t f, Mat image1, Mat frame1_gray, Ptr<FaceRecognizer> modelFront)
{
vector<Rect> frontalFaces; //Detected object(s)
float searchScaleFactor = 1.1; //How many sizes to search
int minNeighbors = 4; //Reliability vs many faces
int flags = 0 | CASCADE_SCALE_IMAGE; //Search for many faces
Size minFeatureSize(30, 30); //Smallest face size
frontFaceDetector.detectMultiScale(front_body_roi, frontalFaces,
searchScaleFactor, minNeighbors, flags, minFeatureSize);
if(frontalFaces.size() != 0){ //If faces are detected
isFrontFaceDetected = true;
for(size_t i = 0; i < 1; i++)
{
int faces_y1 = frontalFaces[i].y + boundRectFront[f].y;
int faces_y2 = frontalFaces[i].y + frontalFaces[i].height + boundRectFront[f].y;
Point f1(frontalFaces[i].x + boundRectFront[f].x, faces_y1);
Point f2(frontalFaces[i].x + frontalFaces[i].width + boundRectFront[f].x, faces_y2);
rectangle(image1, f1, f2, Scalar(0,0,255), 2, 8, 0);
//~ cout << "asdf: " << frontalFaces[i].x + frontalFaces[i].width + boundRectFront[f].x << endl;
Rect ROI(f1, f2);
Mat faceROI = frame1_gray(ROI);
Mat face_scaled;
if(!faceROI.empty())
{
frontFrameCounter++;
if(faceROI.cols != 50 && faceROI.rows!= 50){
resize(faceROI.clone(), face_scaled, Size(50,50));
}
else{
face_scaled = faceROI.clone();
}
equalizeHist(face_scaled, face_scaled);
imshow("Detected front face", face_scaled);
if(frontFrameCounter == 1){
time_t currentTime;
struct tm *localTime;
time( ¤tTime ); // Get the current time
localTime = localtime( ¤tTime ); // Convert the current time to the local time
int Hour = localTime->tm_hour;
int Min = localTime->tm_min;
int Sec = localTime->tm_sec;
stringstream ss;
ss << "Time: " << Hour << ":" << Min << ":" << Sec;
timeStamp = ss.str();
ss.str("");
}
recognizeFrontFaces(face_scaled, modelFront);
}
}
}
else{
frontFrameCounter = 0;
int array[10] =
{
a_1, b_1, c_1, d_1, e_1, f_1, g_1, h_1, i_1, j_1
};
for(int i=0;i<10;i++)
{
if(array[i]>
maxLabel1)
maxLabel1=array[i];
}
}
}
示例11: p
//
// エッジ抽出実行
//
int OillineDetector2::Execute(Mat &src, double *dist, double *gl_theta, int *step, Mat &result_img, bool debug){
// 初期化
*dist = *gl_theta = 0.0;
// 前回と同じ画像であれば、前回の結果を返す
if( is_same_image( src, _last_img ) ){
*dist = _last_dist;
*gl_theta = _last_gl_theta;
result_img = _last_result_img.clone();
return _last_exec_ret;
}
// source image
int w = src.cols, h = src.rows;
// Red Channel
vector<Mat> bgr;
split( src, bgr);
Mat red_img = bgr[2];
// 前処理 ----->
// ガンマ補正
Mat gamma_img = auto_gamma_correction(red_img, _gamma_base, _gamma_scale);
// ガウシアンフィルタ
Mat gauss;
cv::GaussianBlur(gamma_img, gauss, Size( _gaussian_window, _gaussian_window), _gaussian_sigma, _gaussian_sigma);
// <----前処理終了
Mat gray = gauss;
//
// 各縦ラインのピーク取得
//
vector< vector<int> > peaks;
for( int x=_peak_margin; x<gray.cols-_peak_margin; x+=_peak_interval ){
vector<int> pks = find_vertical_peaks( gray, x, _peak_isolate_interval, _peak_count, _peak_min );
peaks.push_back(pks);
}
//
// ピークをチェーン化する
//
vector< vector<Point> > chains;
chains = make_chains( peaks, _chain_max_interval, _peak_margin, _peak_interval );
//
// ぐちゃぐちゃしたチェーンを削除
//
remove_noisy_chains( chains, _chain_noisy_th );
//
// のっぺりとしたピークを削除(線っぽいピークだけに絞る)
//
remove_flat_peaks( chains, gray, _flat_peak_th );
//
// 短いチェーンを削除
//
for( int i=0; i<chains.size(); i++ ){
if(chains[i].size() < _chain_min_size ){
chains.erase( chains.begin() + i );
i--;
}
}
//
// 平均輝度の高いものに絞る
//
focus_top_chains( chains, gray, _max_slit_cnt );
//
// 抜けた箇所を線形補間する
//
vector< vector<double> > intplChains;
intplChains = interpolate_chains( chains );
// ここまでのchain 画像作成
Mat chain_img = Mat::zeros( h, w, CV_8UC3);
for( int i=0; i<chains.size(); i++ )
polylines( chain_img, chains[i], false, Scalar(215,176,255) );
// 平滑化で細かい振動を除去
vector< vector<double> > smoothChains;
for( int i=0; i<intplChains.size(); i++ ){
vector<double> chain = smoothing( intplChains[i], _smooth_win_size );
smoothChains.push_back(chain);
}
// 両サイドの段差を調べる
//.........这里部分代码省略.........
示例12: main
//.........这里部分代码省略.........
GWDataset train(trainFileArg.getValue(),imageDirArg.getValue());
GWDataset test(testFileArg.getValue(),imageDirArg.getValue());
spotter.setTraining_dataset(&train);
spotter.setCorpus_dataset(&test);
int ex=imageArg.getValue();
if ( image2Arg.getValue()>=0 )
{
int ex2= image2Arg.getValue();
double score = spotter.compare(test.image(ex),test.image(ex2));
cout <<"score: "<<score<<endl;
}
else
{
cout <<"test word "<<test.labels()[ex]<<endl;
imshow("test word",test.image(ex));
vector<float> scores = spotter.spot(test.image(ex), "", 1);
multimap<float, int> ranked;
for (int i=0; i<scores.size(); i++)
ranked.emplace(scores[i],i);
auto iter = ranked.end();
for (int i=1; i<=5; i++)
{
iter--;
cout<<"I rank "<<i<<" is "<<iter->second<<" with score "<<iter->first<<endl;
imshow("I "+to_string(i),test.image(iter->second));
}
scores = spotter.spot(test.image(ex), test.labels()[ex], 0);
ranked.clear();
for (int i=0; i<scores.size(); i++)
ranked.emplace(scores[i],i);
iter = ranked.end();
for (int i=1; i<=5; i++)
{
iter--;
cout<<"T rank "<<i<<" is "<<iter->second<<" with score "<<iter->first<<endl;
imshow("T "+to_string(i),test.image(iter->second));
}
waitKey();
}
}
if (compare1Arg.getValue().length()>0 && compare2Arg.getValue().length()>0)
{
EmbAttSpotter spotter(modelArg.getValue());
Mat im1 = imread(compare1Arg.getValue(),CV_LOAD_IMAGE_GRAYSCALE);
Mat im2 = imread(compare2Arg.getValue(),CV_LOAD_IMAGE_GRAYSCALE);
//im1 = GWDataset::preprocess(im1);
//im2 = GWDataset::preprocess(im2);
cout<<spotter.compare(im1,im2)<<endl;;
}
if (fullFileArg.getValue().length()>0)
{
assert(compare1Arg.getValue().length()>0);
EmbAttSpotter spotter(modelArg.getValue());
StrideDataset im(fullFileArg.getValue());
spotter.setCorpus_dataset_fullSub(&im);
Mat ex = imread(compare1Arg.getValue(),CV_LOAD_IMAGE_GRAYSCALE);
clock_t start = clock();
time_t start2 = time(0);
vector< SubwordSpottingResult > res = spotter.subwordSpot_full(ex,"",1);
clock_t end = clock();
time_t end2 = time(0);
double time = (double) (end-start) / CLOCKS_PER_SEC;
double time2 = difftime(end2, start2) * 1000.0;
cout<<"Took "<<time<<" secs."<<endl;
cout<<"Took "<<time2<<" secs."<<endl;
Mat img = imread(fullFileArg.getValue());
Mat orig = img.clone();
int top=100;
float maxS = res[0].score;
float minS = res[top].score;
for (int i=0; i<top; i++)
{
float s = 1-((res[i].score-minS)/(maxS-minS));
//cout <<res[i].score<<": "<<s<<endl;
for (int x=res[i].startX; x<=res[i].endX; x++)
for (int y=res[i].imIdx*3; y<res[i].imIdx*3 +65; y++)
{
Vec3b& p = img.at<Vec3b>(y,x);
Vec3b o = orig.at<Vec3b>(y,x);
p[0] = min(p[0],(unsigned char)(o[0]*s));
}
}
imwrite("spotting.png",img);
imshow("spotting",img);
waitKey();
}
} catch (TCLAP::ArgException &e) // catch any exceptions
{ std::cerr << "error: " << e.error() << " for arg " << e.argId() << std::endl; }
}
示例13: BilateralFilter
Mat BilateralFilter(const Mat& ImRef, const Mat& ImMsk, const Mat& ImSrc, const int wndSZ, double sig_sp, const double sig_clr)
{
// filter signal must be 1 channel
CV_Assert(ImSrc.type() == CV_32FC1 );
// range parameter is half window size
sig_sp = wndSZ / 2.0f;
int H = ImRef.rows;
int W = ImRef.cols;
int H_WD = wndSZ / 2;
Mat ImDst = ImSrc.clone();
if( 1 == ImRef.channels() )
{
for( int y = 0; y < H; y ++ )
{
uchar * pmsk = (uchar *)(ImMsk.ptr<uchar>( y )); //imMsk
float * psrc = (float *)(ImSrc.ptr<float>( y )); //imSrc;
float * pdst = (float *)(ImDst.ptr<float>( y )); //imDst
double * pref = (double *)(ImRef.ptr<double>)( y ); //imRef
for( int x = 0; x < W; x ++ )
{
if(pmsk[x] == 0) continue;
double sum = 0.0f;
double sumWgt = 0.0f;
for( int wy = - H_WD; wy <= H_WD; wy ++ )
{
int qy = y + wy;
if( qy < 0 )
{
qy += H;
}
if( qy >= H )
{
qy -= H;
}
uchar * qmsk = (uchar *)(ImMsk.ptr<uchar>( qy ));
float * qsrc = (float *)(ImSrc.ptr<float>( qy )); //imSrc
double * qref = (double *)(ImRef.ptr<double>( qy )); //imRef
for( int wx = - H_WD; wx <= H_WD; wx ++ )
{
int qx = x + wx;
if( qx < 0 )
{
qx += W;
}
if( qx >= W )
{
qx -= W;
}
if( qmsk[x] == 0) continue;
double spDis = wx * wx + wy * wy;
double clrDis = fabs( qref[ qx ] - pref[ x ] );
double wgt = exp( - spDis / ( sig_sp * sig_sp ) - clrDis * clrDis / ( sig_clr * sig_clr ) );
sum += wgt * qsrc[ qx ];
sumWgt += wgt;
}
}
pdst[ x ] = sum / sumWgt;
}
}
}
else if( 3 == ImRef.channels() )
{
for( int y = 0; y < H; y ++ )
{
uchar * pmsk = (uchar *)(ImMsk.ptr<uchar>(y));
float * psrc = (float *)(ImSrc.ptr<float>(y));
float * pdst = (float *)(ImDst.ptr<float>(y));
double * pref = (double *)(ImRef.ptr<double>(y));
for( int x = 0; x < W; x ++ )
{
if(pmsk[x] == 0) continue;
double * pClr = pref + 3 * x;
double sum = 0.0f;
double sumWgt = 0.0f;
for( int wy = - H_WD; wy <= H_WD; wy ++ )
{
int qy = y + wy;
if( qy < 0 )
{
qy += H;
}
if( qy >= H )
{
qy -= H;
}
uchar * qmsk = (uchar *)(ImMsk.ptr<uchar>(qy));
float * qsrc = (float *)(ImSrc.ptr<float>(qy));
double * qref = (double *)(ImRef.ptr<double>(qy));
//.........这里部分代码省略.........
示例14: ProcessInput
tResult cParkingDirection::ProcessInput(IMediaSample* pSample)
{
// VideoInput
RETURN_IF_POINTER_NULL(pSample);
cObjectPtr<IMediaSample> pNewRGBSample;
const tVoid* l_pSrcBuffer;
if (IS_OK(pSample->Lock(&l_pSrcBuffer)))
{
Mat image;
image = Mat(m_sInputFormat.nHeight,m_sInputFormat.nWidth,CV_8UC1,(tVoid*)l_pSrcBuffer,m_sInputFormat.nBytesPerLine);
Mat transformedImage = image(Rect(0,480,200,PROJECTED_IMAGE_HEIGTH)).clone();
Mat sobeledImage = image(Rect(0,480+250,200,PROJECTED_IMAGE_HEIGTH)).clone();
Mat groundPlane = image(Rect(0,480+2*250,200,PROJECTED_IMAGE_HEIGTH)).clone();
pSample->Unlock(l_pSrcBuffer);
//create an output image for debugging
Mat generalOutputImage(300,800,CV_8UC3,Scalar(0,0,0));
Mat transformedCol;
cvtColor(transformedImage,transformedCol,CV_GRAY2BGR);
/*
tUInt32 stamp = 0;
SendSignalValueMessage(&m_oDistanceOutputPin,cModel.getDistToCrossing(),stamp);*/
pModel.cameraUpdate(transformedImage,sobeledImage,groundPlane);
tFloat32 parallel = pModel.certaintyParallel / (pModel.certaintyParallel+pModel.certaintyCross);
tFloat32 cross = pModel.certaintyCross / (pModel.certaintyParallel+pModel.certaintyCross);
//LOG_INFO(cString::Format("ParallelC: %f, CrossC: %f",parallel,cross));
if(parallel > cross)
SendSignalValueMessage(&m_oDirectionOutputPin, 2, 0);
else if(cross > parallel)
SendSignalValueMessage(&m_oDirectionOutputPin, 1, 0);
else
SendSignalValueMessage(&m_oDirectionOutputPin, 0, 0);
#ifdef PAINT_OUTPUT
Point2d carCenter(100,0);
Mat ipmColor = transformedCol.clone();
pModel.paintDebugInfo(ipmColor);
circle(ipmColor,Point(3,ipmColor.rows-1-80),2,Scalar(0,255,255),-1);
circle(ipmColor,Point(3,ipmColor.rows-1),2,Scalar(0,255,255),-1);
circle(ipmColor,Point(ipmColor.cols-3,ipmColor.rows-1),2,Scalar(0,255,255),-1);
circle(ipmColor,Point(ipmColor.cols-3,ipmColor.rows-1-80),2,Scalar(0,255,255),-1);
circle(ipmColor,Point(126,50),2,Scalar(0,255,255),-1);
circle(ipmColor,Point(74,50),2,Scalar(0,255,255),-1);
ipmColor.copyTo(generalOutputImage(Rect(0,0,ipmColor.cols,ipmColor.rows)));
#endif
#ifdef PAINT_OUTPUT
cObjectPtr<IMediaSample> pNewRGBSample;
if (IS_OK(AllocMediaSample(&pNewRGBSample)))
{
tTimeStamp tmStreamTime = _clock ? _clock->GetStreamTime() : adtf_util::cHighResTimer::GetTime();
pNewRGBSample->Update(tmStreamTime, generalOutputImage.data, tInt32(3*800*300), 0);
RETURN_IF_FAILED(m_oVideoOutputPin.Transmit(pNewRGBSample));
}
#endif
}
RETURN_NOERROR;
}
示例15: main
int main( int argc, const char* argv[] )
{
cv::Mat sourceImage = cv::imread(fileName4);
namedWindow("Original Image", WINDOW_AUTOSIZE); // Create a window for display.
imshow("Original Image", sourceImage); // Show our image inside it.
cvtColor(sourceImage, sourceImage, CV_BGR2GRAY);
Mat img = sourceImage.clone();
Mat tmp, dst;
IplImage ipl = sourceImage;
cv::Mat m = cv::cvarrToMat(&ipl); // default additional arguments: don't copy data.
cvSmooth(&ipl, &ipl, CV_GAUSSIAN, 9, 9, 0);
cv::threshold(m, dst, 50, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
namedWindow("Binarized2", CV_WINDOW_AUTOSIZE); //create a window with the name "Binarized"
imshow("Binarized2", dst);
//cv::threshold(img, img, 50, 255, cv::THRESH_BINARY | cv::THRESH_OTSU);
CvMat cvmat = m;
cvAdaptiveThreshold(&cvmat, &cvmat, 255, CV_ADAPTIVE_THRESH_GAUSSIAN_C, CV_THRESH_BINARY, 13, 1);
localThreshold::binarisation(img, 41, 56);
Mat binImg = img.clone();
ideka::binOptimisation(img);
namedWindow( "Binarised Image", WINDOW_AUTOSIZE ); // Create a window for display.
imshow( "Binarised Image", binImg ); // Show our image inside it.
namedWindow( "Optimised Image", WINDOW_AUTOSIZE ); // Create a window for display.
imshow( "Optimised Image", img ); // Show our image inside it.
// Create markers image
/*cv::Mat markers(m.size(), CV_8U, cv::Scalar(-1));
//Rect(topleftcornerX, topleftcornerY, width, height);
//top rectangle
markers(Rect(0, 0, m.cols, 10)) = Scalar::all(1);
//bottom rectangle
markers(Rect(0, m.rows - 10, m.cols, 10)) = Scalar::all(1);
//left rectangle
markers(Rect(0, 0, 10, m.rows)) = Scalar::all(1);
//right rectangle
markers(Rect(m.cols - 10, 0, 10, m.rows)) = Scalar::all(1);
//centre rectangle
int centreW = m.cols / 3;
int centreH = m.rows / 3;
markers(Rect((m.cols / 2) - (centreW / 2), (m.rows / 2) - (centreH / 2), centreW, centreH)) = Scalar::all(3);
markers.convertTo(markers, CV_BGR2GRAY);
imshow("markers", markers);
//Create watershed segmentation object
Mat dest1;
WatershedSegmenter segmenter;
segmenter.setMarkers(markers);
cv::Mat wshedMask = segmenter.process(sourceImage);
cv::Mat mask;
convertScaleAbs(wshedMask, mask, 1, 0);
double thresh = threshold(mask, mask, 1, 255, THRESH_BINARY);
bitwise_and(m, m, dest1, mask);
dest1.convertTo(dest1, CV_8U);
imshow("final_result", dest1);*/
//skeletionizing
cv::bitwise_not(img, img); //Inverse for bit-operations
GuoHall::thinning(img);
cv::bitwise_not(img, img);
namedWindow( "Skeletenised Image", WINDOW_AUTOSIZE ); // Create a window for display.
imshow( "Skeletenised Image", img ); // Show our image inside it.
//Minutiae-Extraction
vector<Minutiae> minutiae;
crossingNumber::getMinutiae(img, minutiae, 30);
cout<<"Number of Minutiae Results : " << minutiae.size() << endl;
//Visualisation
Mat minutImg = img.clone();
cvtColor(img, minutImg, CV_GRAY2RGB);
for(std::vector<Minutiae>::size_type i = 0; i<minutiae.size(); i++){
//add an transparent square at each minutiae-location
int squareSize = 5; //has to be uneven
Mat roi = minutImg(Rect(minutiae[i].getLocX()-squareSize/2, minutiae[i].getLocY()-squareSize/2, squareSize, squareSize));
double alpha = 0.3;
if(minutiae[i].getType() == Minutiae::Type::RIDGEENDING){
Mat color(roi.size(), CV_8UC3, cv::Scalar(255,0,0)); //blue square for ridgeending
addWeighted(color, alpha, roi, 1.0 - alpha , 0.0, roi);
}else if(minutiae[i].getType() == Minutiae::Type::BIFURCATION){
Mat color(roi.size(), CV_8UC3, cv::Scalar(0,0,255)); //red square for bifurcation
addWeighted(color, alpha, roi, 1.0 - alpha , 0.0, roi);
}
}
namedWindow( "Minutie", WINDOW_AUTOSIZE ); // Create a window for display.
imshow( "Minutie", minutImg ); // Show our image inside it.
//.........这里部分代码省略.........