本文整理汇总了C++中Mat::adjustROI方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::adjustROI方法的具体用法?C++ Mat::adjustROI怎么用?C++ Mat::adjustROI使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat
的用法示例。
在下文中一共展示了Mat::adjustROI方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: process_image
// @param img_data: in YUYV format (e.g. read from camera).
void OCRer::process_image(unsigned char *img_data, int bytes_per_pixel, int bytes_per_line, int left, int top, int width, int height) {
Mat y (height, width, CV_8UC1);
Mat u (height, width, CV_8UC1);
Mat v (height, width, CV_8UC1);
Mat yuyv (height, width, CV_8UC3);
int i, j;
int n = bytes_per_line * height;
for (i = 0, j = 0; j < n; i++, j += 2)
y.data[i] = img_data[j];
for (i = 0, j = 1; j < n; i += 2, j += 4) {
u.data[i] = img_data[j];
u.data[i+1] = img_data[j];
}
for (i = 0, j = 3; j < n; i += 2, j += 4) {
v.data[i] = img_data[j];
v.data[i+1] = img_data[j];
}
Mat arr[3] = {y, u, v};
merge(arr, 3, yuyv);
Mat rgb (height, width, CV_8UC3);
Mat gray;
cvtColor(yuyv, rgb, CV_YCrCb2RGB);
cvtColor(rgb, gray, CV_RGB2GRAY);
gray.adjustROI(top, height, left, width);
process_image(gray);
}
示例2: processNewFrame
void processNewFrame(Mat &frame) {
//cut off the part above horizon
frame.adjustROI(-horizonPt.y,0,0,0);
imshow("Original", frame);
Mat dst;
pyrMeanShiftFiltering(frame, dst, 4, 20, 2);
imshow("posterized", dst);
Mat mask = floodFillPostprocess(dst);
imshow("flood", dst);
imshow("flood fill filtered", mask);
ballContour prevBall(-1);
vector <ballContour> candidates = doContours(mask);
ballContour maxColorConformityContour((double) -1);
Mat ballFound = Mat::zeros(dst.size(), dst.type());
for (unsigned int i = 0; i < candidates.size(); i++) {
Mat blobPix;
try {
// get the blob image pixels
blobPix = getContourPixels(candidates[i].contour, frame);
//Do color filtering
Mat contourHSV(blobPix.size(), blobPix.type());
cvtColor(blobPix, contourHSV, CV_BGR2HSV);
Mat colorRangeMask(blobPix.size(), CV_8UC1);
inRange(contourHSV, Scalar((low_b/100.0)*255, (low_g/100.0)*255, (low_r/100.0)*255, 0),
Scalar((high_b/100.0)*255, (high_g/100.0)*255, (high_r/100.0)*255, 0), colorRangeMask);
//Find the candidate with max number of pixels in range.
int pixCount = countNonZero(colorRangeMask);
candidates[i].sizeMeasure = pixCount;
if (pixCount > 0 && pixCount > maxColorConformityContour.sizeMeasure) {
maxColorConformityContour = candidates[i];
}
} catch(...) {
continue;
}
}
if (maxColorConformityContour.sizeMeasure != -1) {
Point updatedBallPosition = Point(maxColorConformityContour.pixelPosition.x,
maxColorConformityContour.pixelPosition.y
+ (actualFrameSize.height - ballFound.size().height));
publishMessage(updatedBallPosition);
ellipse( ballFound, maxColorConformityContour.pixelPosition, Size(5,5),
0, 0, 360, Scalar(0,0,255), CV_FILLED, 8, 0);
} else {
ROS_INFO("no ball found");
}
imshow("ball detected", ballFound+dst);
waitKey(3);
}
示例3: getUMat
UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const
{
UMat hdr;
if(!data)
return hdr;
Size wholeSize;
Point ofs;
locateROI(wholeSize, ofs);
Size sz(cols, rows);
if (ofs.x != 0 || ofs.y != 0)
{
Mat src = *this;
int dtop = ofs.y;
int dbottom = wholeSize.height - src.rows - ofs.y;
int dleft = ofs.x;
int dright = wholeSize.width - src.cols - ofs.x;
src.adjustROI(dtop, dbottom, dleft, dright);
return src.getUMat(accessFlags, usageFlags)(cv::Rect(ofs.x, ofs.y, sz.width, sz.height));
}
CV_Assert(data =
示例4: detectFacesInROI
// bardziej optymalne wykrywanie dla mniejszych regionów
void FaceDetector::detectFacesInROI(vector<Rect>& lastRects, const Mat& frame)
{
vector<Rect> newRects;
for(vector<Rect>::iterator i = lastRects.begin(); i != lastRects.end(); ++i)
{
vector<Rect> tempRects;
if( i->x < 0 || i->y < 0 || i->width < 2 || i->height < 2)
{
continue;
}
// openCV wykrywa prostokąt, stąd wystarczy tylko szerokość
int adjustFactor = max((static_cast<int>(i->width * roiScaleFactor) - i->width) / 2, 35);
// ustawia region na poprzednią klatkę
Mat roiFrame = frame(*i);
// poszerza klatkę
roiFrame.adjustROI(adjustFactor*1.10, adjustFactor*1.10, adjustFactor, adjustFactor);
// wykrycie na mniejszej klatce
runFaceDetect(roiFrame, tempRects);
// dodanie do rezultatów wektora newRects
for(vector<Rect>::iterator iRect = tempRects.begin(); iRect != tempRects.end(); ++iRect)
{
iRect->x += i->x - adjustFactor;
iRect->y += i->y - adjustFactor;
newRects.push_back(*iRect);
}
}
// jeśli nie znaleziono twarzy, skanujemy całą klatkę
if(newRects.empty())
{
runFaceDetect(frame, newRects);
}
// aktualizacja resultatów
setAreas(newRects);
}
示例5: ImageReconstruct
void ImageReconstruct(Mat& marker, Mat& mask)
{
queue<Pixel> queue;
int offset = 4;
Scalar scalar = Scalar(0);
Mat tempMarkerImage = Mat(marker.rows + 2 * offset, marker.cols + 2 * offset, marker.depth());
copyMakeBorder(marker, tempMarkerImage, offset, offset, offset, offset, BORDER_CONSTANT, scalar);
Mat tempMaskImage = Mat(mask.rows + 2 * offset, mask.cols + 2 * offset, mask.depth());
copyMakeBorder(mask, tempMaskImage, offset, offset, offset, offset, BORDER_CONSTANT, scalar);
for(int row = offset; row < tempMarkerImage.rows - offset; row++) {
for(int col = offset; col < tempMarkerImage.cols - offset; col++) {
T currentPixel = tempMarkerImage.at<T>(row, col);
if(currentPixel < tempMarkerImage.at<T>(row, col-1))
currentPixel = tempMarkerImage.at<T>(row, col-1);
if(currentPixel < tempMarkerImage.at<T>(row-1, col-1))
currentPixel = tempMarkerImage.at<T>(row-1, col-1);
if(currentPixel < tempMarkerImage.at<T>(row-1, col))
currentPixel = tempMarkerImage.at<T>(row-1, col);
if(currentPixel < tempMarkerImage.at<T>(row-1, col+1))
currentPixel = tempMarkerImage.at<T>(row-1, col+1);
if(currentPixel > tempMaskImage.at<T>(row, col))
tempMarkerImage.at<T>(row, col) = tempMaskImage.at<T>(row, col);
else
tempMarkerImage.at<T>(row, col) = currentPixel;
}
}
for(int row = tempMarkerImage.rows - offset - 1; row > offset - 1; row--) {
for(int col = tempMarkerImage.cols - offset - 1; col > offset - 1; col--) {
T currentPixel = tempMarkerImage.at<T>(row, col);
if(currentPixel < tempMarkerImage.at<T>(row, col+1))
currentPixel = tempMarkerImage.at<T>(row, col+1);
if(currentPixel < tempMarkerImage.at<T>(row+1, col+1))
currentPixel = tempMarkerImage.at<T>(row+1, col+1);
if(currentPixel < tempMarkerImage.at<T>(row+1, col))
currentPixel = tempMarkerImage.at<T>(row+1, col);
if(currentPixel < tempMarkerImage.at<T>(row+1, col-1))
currentPixel = tempMarkerImage.at<T>(row+1, col-1);
if(currentPixel > tempMaskImage.at<T>(row, col))
tempMarkerImage.at<T>(row, col) = tempMaskImage.at<T>(row, col);
else
tempMarkerImage.at<T>(row, col) = currentPixel;
currentPixel = tempMarkerImage.at<T>(row, col);
if(tempMarkerImage.at<T>(row, col+1) < currentPixel &&
tempMarkerImage.at<T>(row, col+1) < tempMaskImage.at<T>(row, col+1))
{
Pixel p = { row, col };
queue.push(p);
}
else if(tempMarkerImage.at<T>(row+1, col+1) < currentPixel &&
tempMarkerImage.at<T>(row+1, col+1) < tempMaskImage.at<T>(row+1, col+1))
{
Pixel p = { row, col };
queue.push(p);
}
else if(tempMarkerImage.at<T>(row+1, col) < currentPixel &&
tempMarkerImage.at<T>(row+1, col) < tempMaskImage.at<T>(row+1, col))
{
Pixel p = { row, col };
queue.push(p);
}
else if(tempMarkerImage.at<T>(row+1, col-1) < currentPixel &&
tempMarkerImage.at<T>(row+1, col-1) < tempMaskImage.at<T>(row+1, col-1))
{
Pixel p = { row, col };
queue.push(p);
}
}
}
while(!queue.empty()) {
Pixel pixel = queue.front(); queue.pop();
for(int i = -1; i <= 1; i++) {
for(int k = -1; k <= 1; k++) {
if(i == 0 && k == 0)
continue;
PropagationStep<T>(tempMarkerImage, tempMaskImage, pixel.row, pixel.col, i, k, queue);
}
}
}
tempMarkerImage.adjustROI(-offset, -offset, -offset, -offset);
tempMarkerImage.copyTo(marker);
tempMarkerImage.release();
tempMaskImage.release();
}
示例6: clip
int clip(Mat & image, Mat & dest_image, const char * config_path, int dest_width, int dest_height)
{
Size tmp_size;
float ratio_width = 0;
float ratio_height = 0;
float ratio = 0;
int clip_top = 0;
int clip_bottom = 0;
int clip_left = 0;
int clip_right = 0;
int result = 0;
int param;
LOGD("using config: %s", config_path);
LOGD("start to read image ");
start = clock();
LOGD("start to resize");
LOGD("width of dest image %d", dest_width);
LOGD("height of dest image %d", dest_height);
LOGD("width of origin image %d", image.size().width);
LOGD("height of origin image %d", image.size().height);
if (image.size().width * 3 <= image.size().height)
{
LOGD("type is 1");
ratio = (float)dest_width / image.size().width;
LOGD("ratio is %f", ratio);
tmp_size = Size((int)(image.size().width * ratio), (int)(image.size().height * ratio));
dest_image = Mat(tmp_size, CV_32S);
resize(image, dest_image, tmp_size);
clip_top = 0;
clip_bottom = dest_height - dest_image.size().height;
clip_left = 0;
clip_right = 0;
//Mat& Mat::adjustROI(int dtop, int dbottom, int dleft, int dright)
dest_image.adjustROI(clip_top, clip_bottom, clip_left, clip_right);
return -1;
}
ratio = (float)300.0 / image.size().width;
LOGD("ratio is %f", ratio);
tmp_size = Size((int)(image.size().width * ratio), (int)(image.size().height * ratio));
dest_image = Mat(tmp_size, CV_32S);
resize(image, dest_image, tmp_size);
LOGD("start to detectFace");
start = clock();
result = detectFace( dest_image, config_path);
clt = clock() - start;
LOGD("detectFace cost time %g", (double)clt/CLOCKS_PER_SEC);
LOGD("detectFace Y is %d", result);
LOGD("detectFace end");
if ( result == -1 )
{
LOGD("start to detectCahracter");
start = clock();
result = detectCharacter( dest_image );
clt = clock() - start;
LOGD("detectCharacter cost time %g", (double)clt/CLOCKS_PER_SEC);
LOGD("detectCharacter Y is %d", result);
LOGD("detectCharacter end");
}
result = result == -1 ? -1 : (int)((float)result / ratio);
LOGD("the origin result is %d", result);
ratio_width = (float)dest_width / image.size().width;
ratio_height = (float)dest_height / image.size().height;
LOGD("ratio of width %f", ratio_width);
LOGD("ratio of height %f", ratio_height);
if (ratio_width > ratio_height)
{
ratio = ratio_width;
}
else
{
ratio = ratio_height;
}
result = result == -1 ? -1 : (int)((float)result * ratio);
LOGD("ratio is %f", ratio);
tmp_size = Size((int)(image.size().width * ratio), (int)(image.size().height * ratio));
dest_image = Mat(tmp_size, CV_32S);
resize(image, dest_image, tmp_size);
LOGD("width of resize image %d", dest_image.size().width);
LOGD("height of resize image %d", dest_image.size().height);
//宽度小于高度
//source bitmap width less than height
//.........这里部分代码省略.........
示例7: getUMat
UMat Mat::getUMat(int accessFlags, UMatUsageFlags usageFlags) const
{
UMat hdr;
if(!data)
return hdr;
if (data != datastart)
{
Size wholeSize;
Point ofs;
locateROI(wholeSize, ofs);
Size sz(cols, rows);
if (ofs.x != 0 || ofs.y != 0)
{
Mat src = *this;
int dtop = ofs.y;
int dbottom = wholeSize.height - src.rows - ofs.y;
int dleft = ofs.x;
int dright = wholeSize.width - src.cols - ofs.x;
src.adjustROI(dtop, dbottom, dleft, dright);
return src.getUMat(accessFlags, usageFlags)(cv::Rect(ofs.x, ofs.y, sz.width, sz.height));
}
}
CV_Assert(data == datastart);
accessFlags |= ACCESS_RW;
UMatData* new_u = NULL;
{
MatAllocator *a = allocator, *a0 = getDefaultAllocator();
if(!a)
a = a0;
new_u = a->allocate(dims, size.p, type(), data, step.p, accessFlags, usageFlags);
}
bool allocated = false;
try
{
allocated = UMat::getStdAllocator()->allocate(new_u, accessFlags, usageFlags);
}
catch (const cv::Exception& e)
{
fprintf(stderr, "Exception: %s\n", e.what());
}
if (!allocated)
{
allocated = getDefaultAllocator()->allocate(new_u, accessFlags, usageFlags);
CV_Assert(allocated);
}
if (u != NULL)
{
#ifdef HAVE_OPENCL
if (ocl::useOpenCL() && new_u->currAllocator == ocl::getOpenCLAllocator())
{
CV_Assert(new_u->tempUMat());
}
#endif
new_u->originalUMatData = u;
CV_XADD(&(u->refcount), 1);
CV_XADD(&(u->urefcount), 1);
}
hdr.flags = flags;
setSize(hdr, dims, size.p, step.p);
finalizeHdr(hdr);
hdr.u = new_u;
hdr.offset = 0; //data - datastart;
hdr.addref();
return hdr;
}