本文整理汇总了C++中Image::Buffer方法的典型用法代码示例。如果您正苦于以下问题:C++ Image::Buffer方法的具体用法?C++ Image::Buffer怎么用?C++ Image::Buffer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Image
的用法示例。
在下文中一共展示了Image::Buffer方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: AttachImage
ExcStatus AttachImage(const Image& image)
{
m_image = ℑ
const ImageSamplingGeometry& imgSamplingGeometry = image.Buffer().BufferFormat().SamplingGeometry();
const ImageColorSpec& imgColorSpec = image.ColorSpec();
const ImageDataOrder& dataOrder = image.Buffer().BufferFormat().DataOrder();
if(dataOrder.ComponentOrder() != UIC::Interleaved)
return ExcStatusFail;
if(dataOrder.DataType() != T8u)
return ExcStatusFail;
Ipp32u nOfComponents = imgSamplingGeometry.NOfComponents();
Ipp32u component;
unsigned int bitDepth = 0;
for(component = 0; component < nOfComponents; component++)
{
RectSize& sampleSize = imgSamplingGeometry.SampleSize()[component];
if(sampleSize.Width() != 1)
return ExcStatusFail;
if(sampleSize.Height() != 1)
return ExcStatusFail;
ImageDataRange& dataRange = imgColorSpec.DataRange()[component];
if(dataRange.IsSigned())
return ExcStatusFail;
if(component)
{
if(dataRange.BitDepth() != bitDepth)
return ExcStatusFail;
}
bitDepth = dataRange.BitDepth();
}
if(imgColorSpec.ColorSpecMeth() != Enumerated)
return ExcStatusFail;
if(imgColorSpec.ComponentToColorMap() != Direct)
return ExcStatusFail;
return ExcStatusOk;
}
示例2: checkZone
/*! \fn FaceDetectorPlugin::checkZone(Zone *zone, const Image *zmImage)
* \param zone is a zone where faces will be detected
* \param zmImage is an image to perform face detection (in the form of ZM' Image)
* \return true if there were objects detected in given image and
* false otherwise
*/
bool FaceDetectorPlugin::checkZone(Zone *zone, const Image *zmImage)
{
//log(LOG_DEBUG, "Entering checkZone.");
double score;
Polygon zone_polygon = Polygon(zone->GetPolygon()); // Polygon of interest of the processed zone.
//char szMessage[50];
//sprintf(szMessage, "Polygon of the zone has %d vertices.", zone_polygon.getNumCoords());
//log(LOG_WARNING, szMessage);
//zone->ResetStats();
/*
log(LOG_WARNING, "precheck");
if ( !zone->CheckOverloadCount() )
{
log(LOG_WARNING, "CheckOverloadCount() return false, we'll return false.");
return(false);
}
*/
//zmLoadConfig();
// An image for highlighting detected objects.
Image *pMaskImage = new Image(zmImage->Width(), zmImage->Height(), ZM_COLOUR_GRAY8, ZM_SUBPIX_ORDER_NONE );
pMaskImage->Fill(BLACK);
//log(LOG_WARNING, "FILLBLACK.");
// An temporary image in the form of ZM for making from it CvMat.
// If don't use temp image, after rgb->bgr it will change.
Image *tempZmImage = new Image(*zmImage);
CvMat* cvInputImage = NULL;
CvMat* pScaledImage = NULL;
bool bDoResizing = (m_fImageScaleFactor != 1.0); // resize image or not
if (tempZmImage->Colours() == ZM_COLOUR_GRAY8)
{
// if image is not colored, create an one-channel CvMat.
cvInputImage = cvCreateMat(tempZmImage->Height(), tempZmImage->Width(), CV_8UC1);
unsigned char *buffer = (unsigned char*)tempZmImage->Buffer();
cvSetData(cvInputImage, buffer, tempZmImage->Width());
}
// NEXTIME XXX TODO: manage also 32 bit images!
else
{
// otherwise create a three-channel CvMat and then convert colors from RGB to BGR.
cvInputImage = cvCreateMat(tempZmImage->Height(), tempZmImage->Width(), CV_8UC3);
unsigned char *buffer = (unsigned char*)tempZmImage->Buffer();
cvSetData(cvInputImage, buffer, tempZmImage->Width() * 3);
cvCvtColor(cvInputImage, cvInputImage, CV_RGB2BGR);
}
if (bDoResizing)
{
int nNewWidth = int (m_fImageScaleFactor * zmImage->Width());
int nNewHeight = int (m_fImageScaleFactor * tempZmImage->Height());
int nImageElemType = cvGetElemType(cvInputImage);
pScaledImage = cvCreateMat(nNewHeight, nNewWidth, nImageElemType);
cvResize(cvInputImage, pScaledImage, CV_INTER_LINEAR);
}
//Process image
vector<CvRect> foundObjects;
if (bDoResizing)
foundObjects = _opencvHaarDetect(pScaledImage);
else
foundObjects = _opencvHaarDetect(cvInputImage);
if (foundObjects.size() > 0)
log(LOG_INFO, "OBJECTS WERE DETECTED");
score = 0;
for (vector<CvRect>::iterator it = foundObjects.begin(); it < foundObjects.end(); it++)
{
// Process found objects.
// Scale object's coordinates back if image has been scaled.
int x1 = int(it->x/m_fImageScaleFactor), x2 = int((it->x + it->width)/m_fImageScaleFactor), y1 = int(it->y/m_fImageScaleFactor), y2 = int((it->y + it->height)/m_fImageScaleFactor);
// Check if object's rectangle is inside zone's polygon of interest.
Coord rectVertCoords[4] = {Coord(x1, y1), Coord(x1, y2), Coord(x2, y1), Coord(x2, y2)};
int nNumVertInside = 0;
for (int i = 0; i < 4; i++)
{
nNumVertInside += zone_polygon.isInside(rectVertCoords[i]);
}
if (nNumVertInside < 3)
// if at least three rectangle coordinates are inside polygon, consider rectangle as belonging to the zone
// otherwise process next object
continue;
// Fill a box with object in the mask
Box *faceBox = new Box(x1, y1, x2, y2);
pMaskImage->Fill(WHITE, faceBox);
//.........这里部分代码省略.........
示例3: ReadImageBMP
IM_ERROR ReadImageBMP(
BaseStreamInput& in,
PARAMS_BMP&,
CIppImage& image)
{
int i;
Image imageCn;
IppiSize roi;
ImageDataPtr dataPtr;
ImageDataOrder dataOrder;
ImageColorSpec colorSpec;
ImageSamplingGeometry geometry;
BMPDecoder decoder;
if(ExcStatusOk != decoder.Init())
return IE_INIT;
if(ExcStatusOk != decoder.AttachStream(in))
return IE_ASTREAM;
if(ExcStatusOk != decoder.ReadHeader(colorSpec,geometry))
return IE_RHEADER;
int nOfComponents = geometry.NOfComponents();
dataOrder.SetDataType(T8u);
dataOrder.ReAlloc(Interleaved, nOfComponents);
dataOrder.PixelStep()[0] = nOfComponents;
dataOrder.LineStep() [0] = geometry.RefGridRect().Width() * nOfComponents + BYTES_PAD(geometry.RefGridRect().Width(), nOfComponents, 1);
imageCn.ColorSpec().ReAlloc(nOfComponents);
imageCn.ColorSpec().SetColorSpecMethod(Enumerated);
imageCn.ColorSpec().SetComponentToColorMap(Direct);
for(i = 0; i < nOfComponents; i++)
imageCn.ColorSpec().DataRange()[i].SetAsRange8u(255);
ImageEnumColorSpace in_color;
ImageEnumColorSpace out_color;
in_color = colorSpec.EnumColorSpace();
out_color = in_color;
imageCn.ColorSpec().SetEnumColorSpace(out_color);
image.Color((IM_COLOR)image.UicToIppColor(out_color));
roi.width = geometry.RefGridRect().Width();
roi.height = geometry.RefGridRect().Height();
if(0 != image.Alloc(roi,nOfComponents,8,1))
return IE_ALLOC;
dataPtr.p8u = image;
imageCn.Buffer().Attach(&dataPtr,dataOrder,geometry);
if(ExcStatusOk != decoder.ReadData(imageCn.Buffer().DataPtr(),dataOrder))
return IE_RDATA;
return IE_OK;
} // ReadImageBMP()
示例4: SaveImageBMP
IM_ERROR SaveImageBMP(
CIppImage& image,
PARAMS_BMP&,
BaseStreamOutput& out)
{
int i;
int nOfComponents;
Image imageCn;
Rect refgrid;
Point origin;
RectSize size;
ImageDataPtr dataPtr;
ImageColorSpec colorSpec;
ImageDataOrder dataOrder;
ImageSamplingGeometry geometry;
BMPEncoder encoder;
if(ExcStatusOk != encoder.Init())
return IE_INIT;
if(ExcStatusOk != encoder.AttachStream(out))
return IE_ASTREAM;
nOfComponents = image.NChannels();
dataOrder.SetDataType(T8u);
size.SetWidth(image.Width());
size.SetHeight(image.Height());
origin.SetX(0);
origin.SetY(0);
refgrid.SetOrigin(origin);
refgrid.SetSize(size);
geometry.SetRefGridRect(refgrid);
geometry.ReAlloc(nOfComponents);
geometry.SetEnumSampling(S444);
dataOrder.ReAlloc(Interleaved, nOfComponents);
dataOrder.PixelStep()[0] = nOfComponents;
dataOrder.LineStep() [0] = image.Step();
imageCn.ColorSpec().ReAlloc(nOfComponents);
imageCn.ColorSpec().SetColorSpecMethod(Enumerated);
imageCn.ColorSpec().SetComponentToColorMap(Direct);
for(i = 0; i < nOfComponents; i++)
imageCn.ColorSpec().DataRange()[i].SetAsRange8u(255);
switch(image.NChannels())
{
case 1: imageCn.ColorSpec().SetEnumColorSpace(Grayscale); break;
case 3: imageCn.ColorSpec().SetEnumColorSpace(BGR); break;
case 4: imageCn.ColorSpec().SetEnumColorSpace(BGRA); break;
default:
break;
}
int dstOrder_c3[3] = {2, 1, 0};
int dstOrder_c4[4] = {2, 1, 0, 3};
if(image.NChannels() == 3 && image.Color() == IC_RGB)
{
image.SwapChannels(dstOrder_c3);
image.Color((IM_COLOR)IC_BGR);
}
else if(image.NChannels() == 4 && image.Color() == IC_RGBA)
{
image.SwapChannels(dstOrder_c4);
image.Color((IM_COLOR)IC_BGRA);
}
dataPtr.p8u = image;
imageCn.Buffer().Attach(&dataPtr,dataOrder,geometry);
if(ExcStatusOk != encoder.AttachImage(imageCn))
return IE_AIMAGE;
if(ExcStatusOk != encoder.WriteHeader())
return IE_WHEADER;
if(ExcStatusOk != encoder.WriteData())
return IE_WDATA;
return IE_OK;
} // SaveImageBMP()
示例5: checkZone
/*! \fn ANPRPlugin::checkZone(Zone *zone, const Image *zmImage)
* \param zone is a zone where faces will be detected
* \param zmImage is an image to perform face detection (in the form of ZM' Image)
* \return true if there were objects detected in given image and
* false otherwise
*/
bool ANPRPlugin::checkZone(Zone *zone, const Image *zmImage)
{
double score;
Polygon zone_polygon = Polygon(zone->GetPolygon()); // Polygon of interest of the processed zone.
Image *pMaskImage = new Image(zmImage->Width(), zmImage->Height(), ZM_COLOUR_GRAY8, ZM_SUBPIX_ORDER_NONE );
pMaskImage->Fill(BLACK);
// An temporary image in the form of ZM for making from it CvMat.
// If don't use temp image, after rgb->bgr it will change.
Image *tempZmImage = new Image(*zmImage);
int imgtype=CV_8UC1;
if (tempZmImage->Colours() == ZM_COLOUR_RGB24)
imgtype=CV_8UC3;
Mat cvInputImage = Mat(
tempZmImage->Height(),
tempZmImage->Width(),
imgtype, (unsigned char*)tempZmImage->Buffer()).clone();
//Mat cvInputImage = cvtmpInputImage.reshape(0, tempZmImage->Colours());
//Mat cvInputImage = cvtmpInputImage.reshape(0, tempZmImage->Height());
if (tempZmImage->Colours() == ZM_COLOUR_RGB24)
{
cvtColor(cvInputImage, cvInputImage, CV_RGB2BGR);
}
//imwrite("/tmp/sarca.jpg", cvInputImage);
//Process image
liprec::LiPRec plateDetector;
liprec::PlatesImage plates;
plateDetector.detectPlates(cvInputImage, &plates);
score = 0;
if(plates.plates.size() > 0) {
log(LOG_INFO, "PLATES WERE DETECTED");
for(unsigned int i=0;i<plates.plates.size();i++) {
// Check if object's rectangle is inside zone's polygon of interest.
int x1 = plates.plates[i].rect.x, x2 = int(plates.plates[i].rect.x+plates.plates[i].rect.width);
int y1 = plates.plates[i].rect.y, y2 = int(plates.plates[i].rect.y+plates.plates[i].rect.height);
Coord rectVertCoords[4] = {Coord(x1, y1), Coord(x1, y2), Coord(x2, y1), Coord(x2, y2)};
int nNumVertInside = 0;
for (int p = 0; p < 4; p++)
{
nNumVertInside += zone_polygon.isInside(rectVertCoords[p]);
}
if (nNumVertInside < 3)
// if at least three rectangle coordinates are inside polygon,
// consider rectangle as belonging to the zone
// otherwise process next object
continue;
log(LOG_INFO, plates.plates[i].platetxt);
// Fill a box with object in the mask
Box *plateBox = new Box(x1, y1, x2, y2);
pMaskImage->Fill(WHITE, plateBox);
score=m_nAlarmScore;
delete plateBox;
}
}
if (score == 0)
{
//log(LOG_DEBUG, "No objects found. Exit.");
delete pMaskImage;
delete tempZmImage;
// XXX We need to delete Mats?
return( false );
}
/*
else
{
zone->SetOverloadCount(zone->GetOverloadFrames());
delete pMaskImage;
delete tempZmImage;
return( false );
}*/
zone->SetScore((int)score);
//Get mask by highlighting contours of objects and overlaying them with previous contours.
Rgb alarm_colour = RGB_GREEN;
Image *hlZmImage = pMaskImage->HighlightEdges(alarm_colour, ZM_COLOUR_RGB24,
ZM_SUBPIX_ORDER_RGB, &zone_polygon.Extent());
if (zone->Alarmed())
{
// if there were previous detection and they have already set up alarm image
// then overlay it with current mask
Image* pPrevZoneMask = new Image(*(zone->AlarmImage()));
pPrevZoneMask->Overlay(*hlZmImage);
zone->SetAlarmImage(pPrevZoneMask);
delete pPrevZoneMask;
//.........这里部分代码省略.........