本文整理汇总了C++中ofxCvGrayscaleImage::flagImageChanged方法的典型用法代码示例。如果您正苦于以下问题:C++ ofxCvGrayscaleImage::flagImageChanged方法的具体用法?C++ ofxCvGrayscaleImage::flagImageChanged怎么用?C++ ofxCvGrayscaleImage::flagImageChanged使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofxCvGrayscaleImage
的用法示例。
在下文中一共展示了ofxCvGrayscaleImage::flagImageChanged方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: convertToGrayscalePlanarImage
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){
if( !bAllocated ){
ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImage(): image not allocated";
return;
}
if( !grayImage.bAllocated ){
grayImage.allocate(width, height);
}
ofRectangle roi = getROI();
ofRectangle grayRoi = grayImage.getROI();
if( grayRoi.width == roi.width && grayRoi.height == roi.height ){
switch (whichPlane){
case 0:
cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL);
grayImage.flagImageChanged();
break;
case 1:
cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL);
grayImage.flagImageChanged();
break;
case 2:
cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL);
grayImage.flagImageChanged();
break;
}
} else {
ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch";
}
}
示例2: convertToGrayscalePlanarImages
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
if( !bAllocated ){
ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image not allocated";
return;
}
ofRectangle roi = getROI();
ofRectangle redRoi = red.getROI();
ofRectangle greenRoi = green.getROI();
ofRectangle blueRoi = blue.getROI();
if( !red.bAllocated ){
red.allocate(width, height);
}
if( !green.bAllocated ){
green.allocate(width, height);
}
if( !blue.bAllocated ){
blue.allocate(width, height);
}
if( redRoi.width == roi.width && redRoi.height == roi.height &&
greenRoi.width == roi.width && greenRoi.height == roi.height &&
blueRoi.width == roi.width && blueRoi.height == roi.height )
{
cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL);
red.flagImageChanged();
green.flagImageChanged();
blue.flagImageChanged();
} else {
ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch";
}
}
示例3: convertToGrayscalePlanarImage
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){
ofRectangle roi = getROI();
ofRectangle grayRoi = grayImage.getROI();
if( grayRoi.width == roi.width && grayRoi.height == roi.height ){
switch (whichPlane){
case 0:
cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL);
grayImage.flagImageChanged();
break;
case 1:
cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL);
grayImage.flagImageChanged();
break;
case 2:
cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL);
grayImage.flagImageChanged();
break;
}
} else {
ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, ROI/size mismatch");
}
}
示例4: convertToGrayscalePlanarImages
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
ofRectangle roi = getROI();
ofRectangle redRoi = red.getROI();
ofRectangle greenRoi = green.getROI();
ofRectangle blueRoi = blue.getROI();
if( redRoi.width == roi.width && redRoi.height == roi.height &&
greenRoi.width == roi.width && greenRoi.height == roi.height &&
blueRoi.width == roi.width && blueRoi.height == roi.height )
{
cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL);
red.flagImageChanged();
green.flagImageChanged();
blue.flagImageChanged();
} else {
ofLog(OF_LOG_ERROR, "in convertToGrayscalePlanarImages, ROI/size mismatch");
}
}
示例5: fillHolesUsingContourFinder
//--------------------------------------------------------------
void DepthHoleFiller::fillHolesUsingContourFinder (ofxCvGrayscaleImage &depthImage,
int maxContourArea,
int maxNContours ){
// Find just the holes, as geometric contours.
computeBlobContours (depthImage, maxContourArea, maxNContours);
// Rasterize those holes as filled polys, white on a black background.
computeBlobImage();
// Re-color the blobs with graylevels interpolated from the depth image.
fillBlobsWithInterpolatedData (depthImage);
// Add the interpolated holes back into the depth image
cvMax(depthImage.getCvImage(),
ofxCv8uC1_Blobs.getCvImage(),
depthImage.getCvImage());
// Flag changes to the images.
ofxCv8uC1_Blobs.flagImageChanged();
depthImage.flagImageChanged();
}
示例6: update
//--------------------------------------------------------------
void testApp::update(){
/************ UPDATE BALL ***********************/
//Update ball position
ballPositionX += ballVelocityX;
ballPositionY += ballVelocityY;
if(ballPositionX < 0 || ballPositionX > ofGetWidth()) {
ballVelocityX *= -1;
}
if (ballPositionY < 0 || ballPositionY > ofGetHeight()) {
ballVelocityY *= -1;
}
/************ UPDATE KINECT ***********************/
kinect.update();
// get color pixels
colorImageRGB = kinect.getPixels();
// get depth pixels
depthOrig = kinect.getDepthPixels();
// save original depth, and do some preprocessing
depthProcessed = depthOrig;
if(invert) depthProcessed.invert();
if(mirror) {
depthOrig.mirror(false, true);
depthProcessed.mirror(false, true);
colorImageRGB.mirror(false, true);
}
if(preBlur) cvSmooth(depthProcessed.getCvImage(), depthProcessed.getCvImage(), CV_BLUR , preBlur*2+1);
if(topThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), topThreshold * 255, 255, CV_THRESH_TRUNC);
if(bottomThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), bottomThreshold * 255, 255, CV_THRESH_TOZERO);
if(dilateBeforeErode) {
if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
} else {
if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
}
depthProcessed.flagImageChanged();
// find contours
depthContours.findContours(depthProcessed,
minBlobSize * minBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
maxBlobSize * maxBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
maxNumBlobs, findHoles, useApproximation);
// Clear old attraction points
attractPts.clear();
// Find centroid point for each blob area and add an attraction force to it
for (int i=0; i<depthContours.blobs.size(); i++) {
attractPts.push_back(new ofPoint(depthContours.blobs[i].centroid));
//printf("Blob %d: %f %f \n", i, depthContours.blobs[i].centroid.x, depthContours.blobs[i].centroid.y);
}
// if one blob found, find nearest point in blob area
static ofxVec3f newPoint;
if(depthContours.blobs.size() == 1) {
ofxCvBlob &blob = depthContours.blobs[0];
depthOrig.setROI(blob.boundingRect);
double minValue, maxValue;
CvPoint minLoc, maxLoc;
cvMinMaxLoc(depthOrig.getCvImage(), &minValue, &maxValue, &minLoc, &maxLoc, NULL);
depthOrig.resetROI();
newPoint.x = maxLoc.x + blob.boundingRect.x;
newPoint.y = maxLoc.y + blob.boundingRect.y;
// newPoint.z = (maxValue + offset) * depthScale; // read from depth map
//printf("Min: %f %f Max: %f %f \n", minLoc.x, minLoc.y, maxLoc.x, maxLoc.y);
// read directly from distance (in cm)
// this doesn't seem to work, need to look into it
newPoint.z = (kinect.getDistanceAt(newPoint) + depthOffset) * depthScale;
// apply kalman filtering
if(doKalman) {
newPoint.x = updateKalman(0, newPoint.x);
newPoint.y = updateKalman(1, newPoint.y);
newPoint.z = updateKalman(2, newPoint.z);
}
} else {
clearKalman(0);
clearKalman(1);
clearKalman(2);
}
//.........这里部分代码省略.........