本文整理汇总了C++中ofxCvGrayscaleImage::getHeight方法的典型用法代码示例。如果您正苦于以下问题:C++ ofxCvGrayscaleImage::getHeight方法的具体用法?C++ ofxCvGrayscaleImage::getHeight怎么用?C++ ofxCvGrayscaleImage::getHeight使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofxCvGrayscaleImage
的用法示例。
在下文中一共展示了ofxCvGrayscaleImage::getHeight方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: absDiff
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
ofxCvGrayscaleImage& dad ) {
if( !mom.bAllocated ){
ofLogError("ofxCvGrayscaleImage") << "absDiff(): first source image (mom) not allocated";
return;
}
if( !dad.bAllocated ){
ofLogError("ofxCvGrayscaleImage") << "absDiff(): second source image (dad) not allocated";
return;
}
if( !bAllocated ){
ofLogNotice("ofxCvGrayscaleImage") << "absDiff(): allocating to match dimensions: "
<< mom.getWidth() << " " << mom.getHeight();
allocate(mom.getWidth(), mom.getHeight());
}
ofRectangle roi = getROI();
ofRectangle momRoi = mom.getROI();
ofRectangle dadRoi = dad.getROI();
if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
(dadRoi.width == roi.width && dadRoi.height == roi.height ) )
{
cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
flagImageChanged();
} else {
ofLogError("ofxCvGrayscaleImage") << "absDiff(): source image size mismatch between first (mom) & second (dad) image";
}
}
示例2: absDiff
//--------------------------------------------------------------------------------
void ofxCvGrayscaleImage::absDiff( ofxCvGrayscaleImage& mom,
ofxCvGrayscaleImage& dad ) {
if( !mom.bAllocated ){
ofLog(OF_LOG_ERROR, "in absDiff, mom needs to be allocated");
return;
}
if( !dad.bAllocated ){
ofLog(OF_LOG_ERROR, "in absDiff, dad needs to be allocated");
return;
}
if( !bAllocated ){
ofLog(OF_LOG_NOTICE, "in absDiff, allocating to match dimensions");
allocate(mom.getWidth(), mom.getHeight());
}
ofRectangle roi = getROI();
ofRectangle momRoi = mom.getROI();
ofRectangle dadRoi = dad.getROI();
if( (momRoi.width == roi.width && momRoi.height == roi.height ) &&
(dadRoi.width == roi.width && dadRoi.height == roi.height ) )
{
cvAbsDiff( mom.getCvImage(), dad.getCvImage(), cvImage );
flagImageChanged();
} else {
ofLog(OF_LOG_ERROR, "in absDiff, images are different sizes");
}
}
示例3: resetRoiMask
void testApp::resetRoiMask(ofxCvGrayscaleImage img){
int w = img.getWidth();
int h = img.getHeight();
roiMask[0] = ofPoint(0,0);
roiMask[1] = ofPoint(w-1,0);
roiMask[2] = ofPoint(w-1,h-1);
roiMask[3] = ofPoint(0,h-1);
}
示例4: draw
//--------------------------------------------------------------
void testApp::draw(){
ofSetColor(255, 255, 255);
colorImg.draw(0, 0, ofGetWidth(), ofGetHeight());
glPushMatrix();
glScalef(ofGetWidth() / (float)greyImageSmall.getWidth(), ofGetHeight() / (float)greyImageSmall.getHeight(), 1);
// haarTracker.draw(0, 0);
ofNoFill();
for(int i = 0; i < haarFinder.blobs.size(); i++) {
ofRectangle cur = haarFinder.blobs[i].boundingRect;
// ofRect(cur.x, cur.y, cur.width, cur.height);
int iw = cur.width * 1.4;
img.draw(haarFinder.blobs[i].centroid, iw, iw * img.getHeight() / img.getWidth());
}
glPopMatrix();
}
示例5: addWeighted
//--------------------------------------------------------------------------------
void ofxCvFloatImage::addWeighted( ofxCvGrayscaleImage& mom, float f ) {
if( mom.getWidth() == 0 || mom.getHeight() == 0 ){
ofLog(OF_LOG_ERROR, "in addWeighted, mom width or height is 0");
return;
}
if( !bAllocated ){
ofLog(OF_LOG_ERROR, "in addWeighted, image is not allocated");
return;
}
if( matchingROI(getROI(), mom.getROI()) ) {
convertGrayToFloat(mom.getCvImage(), cvImageTemp);
cvAddWeighted( cvImageTemp, f, cvImage, 1.0f-f,0, cvImage );
flagImageChanged();
} else {
ofLog(OF_LOG_ERROR, "in addWeighted, ROI mismatch");
}
}
示例6: setFromGrayscalePlanarImages
//--------------------------------------------------------------------------------
void ofxCvColorImage::setFromGrayscalePlanarImages( ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
ofRectangle roi = getROI();
ofRectangle redRoi = red.getROI();
ofRectangle greenRoi = green.getROI();
ofRectangle blueRoi = blue.getROI();
if( !bAllocated ){
ofLogNotice("ofxCvColorImage") << "setFromGrayscalePlanarImages(): allocating to match dimensions";
allocate(red.getWidth(), red.getHeight());
}
if( redRoi.width == roi.width && redRoi.height == roi.height &&
greenRoi.width == roi.width && greenRoi.height == roi.height &&
blueRoi.width == roi.width && blueRoi.height == roi.height )
{
cvCvtPlaneToPix(red.getCvImage(), green.getCvImage(), blue.getCvImage(),NULL, cvImage);
flagImageChanged();
} else {
ofLogError("ofxCvColorImage") << "setFromGrayscalePlanarImages(): image size or region of interest mismatch";
}
}
示例7: mergeGrayImages
void pointCloudStitcher::mergeGrayImages(ofxCvGrayscaleImage imgOne, ofxCvGrayscaleImage imgTwo, float ** adj){
ofPoint tempPointCurrent;
ofPixelsRef imgOnePix = imgOne.getPixelsRef();
ofPixelsRef imgTwoPix = imgTwo.getPixelsRef();
int offset;
int brightnessOne, brightness;
for(int y = 0; y < imgTwo.getHeight(); ++y) {
for(int x = 0; x < imgTwo.getWidth(); ++x) {
brightnessOne = imgTwoPix.getColor(x, y).getBrightness();
if(brightnessOne>0) {
offset = int(x * adj[x][y]);
if(offset < imgOne.getWidth() && offset > 0) {
if( (brightness = imgOnePix.getColor(offset, y+kinectDistanceY).r) < brightnessOne ){
imgOnePix.setColor(offset, y+kinectDistanceY, imgTwoPix.getColor(x, y));
}
}
}
}
}
patchedImageCv.setFromPixels(imgOnePix.getPixels(), imgOne.width, imgOne.height);
patchedImageCv.flagImageChanged();
}
示例8: draw
//--------------------------------------------------------------
void testApp::draw(){
// Display background
ofSetColor(255, 255, 255, 50);
img.draw(0, 0, ofGetWidth(), ofGetHeight());
// Draw ball
ofSetColor(255, 150, 0);
ofCircle(ballPositionX, ballPositionY, 10);
/************ DRAW PARTICLE SYSTEM ***********************/
particleSystem.setTimeStep(timeStep);
ofEnableAlphaBlending();
ofSetColor(251, 236, 93, lineOpacity);
particleSystem.setupForces();
// apply per-particle forces
glBegin(GL_LINES);
for(int i = 0; i < particleSystem.size(); i++) {
Particle& cur = particleSystem[i];
// global force on other particles
particleSystem.addRepulsionForce(cur, particleNeighborhood, particleRepulsion);
// forces on this particle
cur.bounceOffWalls(0, 0, ofGetWidth(), ofGetHeight());
cur.addDampingForce();
}
glEnd();
// Apply attraction to selected points
for (int i=0; i < attractPts.size(); i++) {
particleSystem.addAttractionForce(attractPts[i]->x, attractPts[i]->y, ofGetWidth(), centerAttraction);
}
// Add repulsion force by mouse click
if(isMousePressed)
particleSystem.addRepulsionForce(mouseX, mouseY, 100, 10);
// Set the ball to repulse
particleSystem.addRepulsionForce(ballPositionX, ballPositionY, 25, 10);
particleSystem.update();
ofSetColor(255, 255, 255, pointOpacity);
particleSystem.draw();
ofDisableAlphaBlending();
ofSetColor(255, 255, 255);
//ofDrawBitmapString(ofToString(kParticles) + "k particles", 32, 32);
//ofDrawBitmapString(ofToString((int) ofGetFrameRate()) + " fps", 32, 52);
// draw depth and color view
//kinect.drawDepth(10, 10, 200, 150);
//kinect.draw(220, 10, 200, 150);
//colorImg.mirror(false, true);
//colorImg.draw(100,100);
/************ DRAW POINT CLOUD ***********************/
// draw point cloud
pointCloudX = 800;
pointCloudY = 800;
ofSetColor(0, 0, 0);
ofPushMatrix();
ofTranslate(pointCloudX, pointCloudY);
drawPointCloud();
ofPopMatrix();
// Make attraction with point
/*for (int i=0; i < cloudPts.size(); i++) {
particleSystem.addAttractionForce(cloudPts[i]->x + 800, cloudPts[i]->y + 650, 25, 1);
//printf("Point Clouds: %f %f \n", cloudPts[i]->x, cloudPts[i]->y);
} */
// Rotate drawing
if(depthContours.blobs.size() == 2) {
ofxVec2f center = depthContours.blobs[0].centroid + depthContours.blobs[1].centroid;
ofxVec2f targetViewRot;
targetViewRot.x = ofMap(center.x, 0, ofGetWidth(), -360, 360);
targetViewRot.y = ofMap(center.y, 0, ofGetHeight(), 360, -360);
viewRot += (targetViewRot - viewRot) * 0.05;
} else {
viewRot -= viewRot * 0.05;
}
ofxVec3f camToWorld(ofGetWidth()/depthOrig.getWidth(), ofGetHeight()/depthOrig.getHeight(), 1);
ofxVec3f up(0, 1, 0);
gui.draw();
//.........这里部分代码省略.........
示例9: update
//--------------------------------------------------------------
void testApp::update(){
/************ UPDATE BALL ***********************/
//Update ball position
ballPositionX += ballVelocityX;
ballPositionY += ballVelocityY;
if(ballPositionX < 0 || ballPositionX > ofGetWidth()) {
ballVelocityX *= -1;
}
if (ballPositionY < 0 || ballPositionY > ofGetHeight()) {
ballVelocityY *= -1;
}
/************ UPDATE KINECT ***********************/
kinect.update();
// get color pixels
colorImageRGB = kinect.getPixels();
// get depth pixels
depthOrig = kinect.getDepthPixels();
// save original depth, and do some preprocessing
depthProcessed = depthOrig;
if(invert) depthProcessed.invert();
if(mirror) {
depthOrig.mirror(false, true);
depthProcessed.mirror(false, true);
colorImageRGB.mirror(false, true);
}
if(preBlur) cvSmooth(depthProcessed.getCvImage(), depthProcessed.getCvImage(), CV_BLUR , preBlur*2+1);
if(topThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), topThreshold * 255, 255, CV_THRESH_TRUNC);
if(bottomThreshold) cvThreshold(depthProcessed.getCvImage(), depthProcessed.getCvImage(), bottomThreshold * 255, 255, CV_THRESH_TOZERO);
if(dilateBeforeErode) {
if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
} else {
if(erodeAmount) cvErode(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, erodeAmount);
if(dilateAmount) cvDilate(depthProcessed.getCvImage(), depthProcessed.getCvImage(), 0, dilateAmount);
}
depthProcessed.flagImageChanged();
// find contours
depthContours.findContours(depthProcessed,
minBlobSize * minBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
maxBlobSize * maxBlobSize * depthProcessed.getWidth() * depthProcessed.getHeight(),
maxNumBlobs, findHoles, useApproximation);
// Clear old attraction points
attractPts.clear();
// Find centroid point for each blob area and add an attraction force to it
for (int i=0; i<depthContours.blobs.size(); i++) {
attractPts.push_back(new ofPoint(depthContours.blobs[i].centroid));
//printf("Blob %d: %f %f \n", i, depthContours.blobs[i].centroid.x, depthContours.blobs[i].centroid.y);
}
// if one blob found, find nearest point in blob area
static ofxVec3f newPoint;
if(depthContours.blobs.size() == 1) {
ofxCvBlob &blob = depthContours.blobs[0];
depthOrig.setROI(blob.boundingRect);
double minValue, maxValue;
CvPoint minLoc, maxLoc;
cvMinMaxLoc(depthOrig.getCvImage(), &minValue, &maxValue, &minLoc, &maxLoc, NULL);
depthOrig.resetROI();
newPoint.x = maxLoc.x + blob.boundingRect.x;
newPoint.y = maxLoc.y + blob.boundingRect.y;
// newPoint.z = (maxValue + offset) * depthScale; // read from depth map
//printf("Min: %f %f Max: %f %f \n", minLoc.x, minLoc.y, maxLoc.x, maxLoc.y);
// read directly from distance (in cm)
// this doesn't seem to work, need to look into it
newPoint.z = (kinect.getDistanceAt(newPoint) + depthOffset) * depthScale;
// apply kalman filtering
if(doKalman) {
newPoint.x = updateKalman(0, newPoint.x);
newPoint.y = updateKalman(1, newPoint.y);
newPoint.z = updateKalman(2, newPoint.z);
}
} else {
clearKalman(0);
clearKalman(1);
clearKalman(2);
}
//.........这里部分代码省略.........
示例10: drawRoiMask
//--------------------------------------------------------------
void testApp::drawRoiMask(ofxCvGrayscaleImage &img){
int nPts = 4;
int c = 0;
int w = img.getWidth();
int h = img.getHeight();
CvPoint* pts = new CvPoint[nPts];
pts[0].x = 0;
pts[0].y = 0;
pts[1].x = w - 1;
pts[1].y = 0;
pts[2].x = roiMask[1].x;
pts[2].y = roiMask[1].y;
pts[3].x = roiMask[0].x;
pts[3].y = roiMask[0].y;
cvFillPoly( img.getCvImage(), &pts, &nPts, 1,
CV_RGB(c,c,c) );
pts[0].x = 0;
pts[0].y = 0;
pts[1].x = roiMask[0].x;
pts[1].y = roiMask[0].y;
pts[2].x = roiMask[3].x;
pts[2].y = roiMask[3].y;
pts[3].x = 0;
pts[3].y = h - 1;
cvFillPoly( img.getCvImage(), &pts, &nPts, 1,
CV_RGB(c,c,c) );
pts[0].x = w - 1;
pts[0].y = 0;
pts[1].x = w - 1;
pts[1].y = h - 1;
pts[2].x = roiMask[2].x;
pts[2].y = roiMask[2].y;
pts[3].x = roiMask[1].x;
pts[3].y = roiMask[1].y;
cvFillPoly( img.getCvImage(), &pts, &nPts, 1,
CV_RGB(c,c,c) );
pts[0].x = w - 1;
pts[0].y = w - 1;
pts[1].x = 0;
pts[1].y = h - 1;
pts[2].x = roiMask[3].x;
pts[2].y = roiMask[3].y;
pts[3].x = roiMask[2].x;
pts[3].y = roiMask[2].y;
cvFillPoly( img.getCvImage(), &pts, &nPts, 1,
CV_RGB(c,c,c) );
}