本文整理汇总了C++中ofxCvColorImage::getCvImage方法的典型用法代码示例。如果您正苦于以下问题:C++ ofxCvColorImage::getCvImage方法的具体用法?C++ ofxCvColorImage::getCvImage怎么用?C++ ofxCvColorImage::getCvImage使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofxCvColorImage
的用法示例。
在下文中一共展示了ofxCvColorImage::getCvImage方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
//--------------------------------------------------------------------------------
void ofxCvFloatImage::operator = ( ofxCvColorImage& mom ) {
if( mom.width == width && mom.height == height ) {
cvCvtColor( mom.getCvImage(), cvImage, CV_RGB2GRAY );
} else {
cout << "error in =, images are different sizes" << endl;
}
}
示例2: cvFilterCartoon
/// ****************************************************
///
/// CARTOON FILTER
///
/// ****************************************************
bool testApp::cvFilterCartoon(ofxCvColorImage &src, ofxCvColorImage &dst, int w, int h)
{
//CvtColor(src, dst, code)
//cv::cvtColor(inputFrame, bgr, CV_BGRA2BGR);
// cv::pyrMeanShiftFiltering(bgr.clone(), bgr, sp, sr);
// PyrMeanShiftFiltering(src, dst, sp, sr, max_level=1, termcrit=(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 5, 1))
// Temporary storage.
IplImage* pyr = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 );
IplImage* edges = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 1 );
IplImage* edgesRgb = cvCreateImage( cvSize(w,h), IPL_DEPTH_8U, 3 );
//cvSet(s, cvScalar(0,0,0));
ofxCvGrayscaleImage tempGrayImg;
tempGrayImg.allocate(w, h);
tempGrayImg.setFromColorImage(src);
//------------------------------
cvPyrMeanShiftFiltering(src.getCvImage(), pyr, 10, 10);
// cv::Canny(gray, edges, 150, 150);
cvCanny(tempGrayImg.getCvImage(), edges, 150,150);
cvCvtColor(edges, edgesRgb, CV_GRAY2RGB);
cvAbsDiff(pyr, edgesRgb, pyr);
//cvAbsDiff(colorImg.getCvImage(), lastFrame.getCvImage(), colorDiff.getCvImage());
dst.setFromPixels((unsigned char *)pyr->imageData, w, h);
return true;
}
示例3:
//--------------------------------------------------------------------------------
void ofxCvColorImage::operator += ( ofxCvColorImage& mom ) {
if( mom.width == width && mom.height == height ) {
cvAdd( cvImage, mom.getCvImage(), cvImageTemp );
swapTemp();
} else {
cout << "error in +=, images are different sizes" << endl;
}
}
示例4: setFromCvColorImage
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::setFromCvColorImage( ofxCvColorImage& mom ) {
if( matchingROI(getROI(), mom.getROI()) ) {
cvCvtColor( mom.getCvImage(), cvImage, CV_RGB2GRAY );
flagImageChanged();
} else {
ofLog(OF_LOG_ERROR, "in =, ROI mismatch");
}
}
示例5: feedImg
void margDisplay::feedImg(ofxCvColorImage& _source) {
if (image.getWidth() != _source.getWidth()) {
image.clear();
image.allocate(source.getWidth(), source.getHeight());
}
cvWarpPerspective(_source.getCvImage(), image.getCvImage(), translate);
image.flagImageChanged();
}
示例6: keyImage
void chromaKeyer::keyImage( ofxCvColorImage & src, ofxCvColorImage & dst, int w, int h )
{
// resize images if not at same size already
if( hsvImage.width != w || hsvImage.height != h)
{
hsvImage.allocate(w,h);
hueImg.allocate(w,h);
satImg.allocate(w,h);
valImg.allocate(w,h);
}
// convert src to hsv color space
hsvImage.setFromPixels(src.getPixels(),w,h);
hsvImage.convertRgbToHsv();
// extract the hsv channels to a grayscale image
hsvImage.convertToGrayscalePlanarImages(hueImg,satImg,valImg);
unsigned char * pixelsHue = hsvImage.getPixels();
//unsigned char * pixelsSat = satImg.getPixels();
unsigned char * dstMask = new unsigned char[w*h];
// loop through and compare
/*
if( pixelsHue[i] >= H-tH && pixelsHue[i] <= H+tH&&
pixelsSat[i] >= S-tS && pixelsSat[i] <= S+tS
){
*/
for( int i = 0; i < w*h; i++)
{
if( pixelsHue[i*3] >= H-tH && pixelsHue[i*3] <= H+tH&&
pixelsHue[i*3+1] >= S-tS && pixelsHue[i*3+1] <= S+tS
){
dstMask[i] = 0;
}else{
dstMask[i] = 255;
}
}
hueImg.setFromPixels(dstMask,w,h);
cvCopy( hsvImage.getCvImage(),dst.getCvImage(),hueImg.getCvImage());//,hueImg.getCvImage());
dst.flagImageChanged();
dst.convertHsvToRgb();
delete dstMask;
}
示例7: scaleIntoMe
//--------------------------------------------------------------------------------
void ofxCvColorImage::scaleIntoMe( ofxCvColorImage& mom, int interpolationMethod){
if ((interpolationMethod != CV_INTER_NN) ||
(interpolationMethod != CV_INTER_LINEAR) ||
(interpolationMethod != CV_INTER_AREA) ||
(interpolationMethod != CV_INTER_CUBIC) ){
printf("error in scaleIntoMe / interpolationMethod, setting to CV_INTER_NN \n");
interpolationMethod = CV_INTER_NN;
}
cvResize( mom.getCvImage(), cvImage, interpolationMethod );
/*
you can pass in:
CV_INTER_NN - nearest-neigbor interpolation,
CV_INTER_LINEAR - bilinear interpolation (used by default)
CV_INTER_AREA - resampling using pixel area relation. It is preferred method for image decimation that gives moire-free results. In case of zooming it is similar to CV_INTER_NN method.
CV_INTER_CUBIC - bicubic interpolation.
----> http://opencvlibrary.sourceforge.net/CvReference
*/
}
示例8: update
void ofxBackground::update(ofxCvColorImage& input){
float now = ofGetElapsedTimeMillis();
// get width/height disregarding ROI
IplImage* ipltemp = input.getCvImage();
_width = ipltemp->width;
_height = ipltemp->height;
if( inputCopy.getWidth() == 0 ) {
allocate( _width, _height );
} else if( inputCopy.getWidth() != _width || inputCopy.getHeight() != _height ) {
// reallocate to new size
clear();
allocate( _width, _height );
} else { //don't do anything unless we have allocated! (and therefore set timeStartedLearning to a safe, non zero value)
inputCopy = input;
inputCopy.setROI( input.getROI() );
yuvImage.setROI( input.getROI() ); //pass on ROI'ness
yuvImage.setFromPixels(inputCopy.getPixels(), _width, _height);
yuvImage.convertRgbToYuv();
if((now-timeStartedLearning) < LEARNING_TIME){
//then we should be learning
//LEARNING THE AVERAGE AND AVG DIFF BACKGROUND
accumulateBackground(inputCopy.getCvImage());
//LEARNING THE CODEBOOK BACKGROUND
pColor = (uchar *)((yuvImage.getCvImage())->imageData);
for(int c=0; c<imageLen; c++)
{
cvupdateCodeBook(pColor, cB[c], cbBounds, nChannels);
pColor += 3;
}
//TODO: clear stale entries
bStatsDone = false;
bLearning = true;
}else {
//its either time to do stats or not
bLearning = false;
if(!bStatsDone){
//do the stats, just the once
createModelsfromStats(); //create the background model
bStatsDone = true;
}else {
//learn as normal, find the foreground if any
//FIND FOREGROUND BY AVG METHOD:
backgroundDiff(inputCopy.getCvImage(),ImaskAVG);
cvCopy(ImaskAVG,ImaskAVGCC);
cvconnectedComponents(ImaskAVGCC);
//FIND FOREGROUND BY CODEBOOK METHOD
uchar maskPixelCodeBook;
pColor = (uchar *)((yuvImage.getCvImage())->imageData); //3 channel yuv image
uchar *pMask = (uchar *)((ImaskCodeBook)->imageData); //1 channel image
for(int c=0; c<imageLen; c++)
{
maskPixelCodeBook = cvbackgroundDiff(pColor, cB[c], nChannels, minMod, maxMod);
*pMask++ = maskPixelCodeBook;
pColor += 3;
}
//This part just to visualize bounding boxes and centers if desired
cvCopy(ImaskCodeBook,ImaskCodeBookCC);
cvconnectedComponents(ImaskCodeBookCC);
//TODO: update the learned background pixels....
//TODO: clear stale codebook entries on a much slower frequency
}
}
backgroundAverage = ImaskAVG;
backgroundAverageConnectedComponents = ImaskAVGCC;
backgroundCodebook = ImaskCodeBook;
backgroundCodeBookConnectedComponents = ImaskCodeBookCC;
}
}
示例9: cvCopy
//--------------------------------------------------------------------------------
ofxCvColorImage::ofxCvColorImage( const ofxCvColorImage& mom ) {
cvCopy( mom.getCvImage(), cvImage, 0 );
}