本文整理汇总了C++中ofxCvGrayscaleImage::getPixels方法的典型用法代码示例。如果您正苦于以下问题:C++ ofxCvGrayscaleImage::getPixels方法的具体用法?C++ ofxCvGrayscaleImage::getPixels怎么用?C++ ofxCvGrayscaleImage::getPixels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofxCvGrayscaleImage
的用法示例。
在下文中一共展示了ofxCvGrayscaleImage::getPixels方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: checkForCommonFill
void testApp :: checkForCommonFill ( ofxCvGrayscaleImage& imageOut, ofxCvGrayscaleImage& image1, ofxCvGrayscaleImage& image2 )
{
int noPixels;
noPixels = imageOut.width * imageOut.height;
unsigned char* imageOutPixels;
unsigned char* image1Pixels;
unsigned char* image2Pixels;
imageOutPixels = new unsigned char[ noPixels ];
image1Pixels = image1.getPixels();
image2Pixels = image2.getPixels();
for( int i=0; i<noPixels; i++ )
{
if
(
image1Pixels[ i ] == 255 &&
image2Pixels[ i ] == 255
)
{
imageOutPixels[ i ] = 255;
}
else
{
imageOutPixels[ i ] = 0;
}
}
imageOut.setFromPixels( imageOutPixels, imageOut.width, imageOut.height );
delete[] imageOutPixels;
}
示例2: convert
void convert(ofxCvGrayscaleImage &src, ofImage &dst) {
for(int i = 0; i < src.height*src.width; i += 1) {
if(src.getPixels()[i]==0) {
dst.getPixels()[i*4] = 0;
} else {
dst.getPixels()[i*4] = 0xffffffff;
}
}
dst.update();
}
示例3: drawPointCloud
void ofApp::drawPointCloud(ofxCvGrayscaleImage grayImage) {
int w = 640;
int h = 480;
ofMesh mesh;
mesh.setMode(OF_PRIMITIVE_POINTS);
//int step = 8;
int counter = 0;
unsigned char * pix = grayImage.getPixels();
for(int y = 0; y < h; y += step) {
counter++;
int start = 0;
if (stepOffset) { // if we are using the step offset
if ((counter)%2) {
start = step/2;
}
}
for(int x = start; x < w; x += step) {
if(kinect.getDistanceAt(x, y) > 0 && pix[x+y*w]>0) {
mesh.addColor(kinect.getColorAt(x,y));
mesh.addVertex(kinect.getWorldCoordinateAt(x, y));
ofPoint test = kinect.getWorldCoordinateAt(x, y);
if (makeTriangles){ // add to the triangles if required
triangulation.addPoint(test);
}
}
}
}
if (makeTriangles){ // add to the triangles if required
triangulation.triangulate();
}
glPointSize(3);
ofPushMatrix();
// the projected points are 'upside down' and 'backwards'
ofScale(1, -1, -1);
ofTranslate(0, 0, -1000); // center the points a bit
ofEnableDepthTest(); if (makeTriangles){ // add to the triangles if required
triangulation.draw();
} else {
mesh.drawVertices();
}
ofNoFill();
ofDisableDepthTest();
ofPopMatrix();
}
示例4: drawBrightnessScanGraph
void thresholdCalculator::drawBrightnessScanGraph(int x, int y, ofxCvGrayscaleImage & img, bool bIsVertical, float threshold_p,
float threshold_g, string graphname)
{
unsigned char * tempPixels = img.getPixels();
ofPushMatrix();
ofTranslate(x, y, 0);
ofSetColor(255, 255, 255, 100);
ofBeginShape();
if (!bIsVertical) {
int nLine = scanY;
for (int i = 0; i < img.width; i++){
ofVertex(i, 255 - (int)tempPixels[i + img.width * nLine]);
}
} else {
int nLine = scanX;
for (int i = 0; i < img.height; i++){
ofVertex(i, 255 - (int)tempPixels[nLine + i * img.width]);
}
}
ofEndShape(false);
ofSetColor(255, 0, 0, 80);
ofLine(0, 255 - threshold_p, img.width, 255 - threshold_p);
ofSetColor(0, 0, 255, 80);
ofLine(0, 255 - threshold_g, img.width, 255 - threshold_g);
ofSetColor(120, 120, 120, 80);
ofLine(0, 255 - getMinInWhite(), img.width, 255 - getMinInWhite());
ofLine(0, 255 - getPupilAvg(), img.width, 255 - getPupilAvg());
ofSetColor(120, 120, 255, 80);
ofLine(0, 255 - getGlintThreshold(true), img.width, 255 - getGlintThreshold(true));
ofSetColor(255, 255, 255);
ofRect(0, 0, img.width, img.height);
ofDrawBitmapString(graphname, 1, 255 + 12);
ofPopMatrix();
}
示例5: set
//---------------------------------------------------------------------------------
void videoBlob::set(ofxCvBlob myBlob, ofxCvColorImage myImage, ofxCvGrayscaleImage myMask){
memcpy(&blob, &myBlob, sizeof(ofxCvBlob));
// now, let's get the data in,
int w = blob.boundingRect.width;
int h = blob.boundingRect.height;
int imgw = myImage.width;
int imgh = myImage.height;
int imgx = blob.boundingRect.x;
int imgy = blob.boundingRect.y;
unsigned char * blobRGBA = new unsigned char [ w * h * 4 ];
unsigned char * colorPixels = myImage.getPixels();
unsigned char * grayPixels = myMask.getPixels();
for (int i = 0; i < w; i++){
for (int j = 0; j < h; j++){
int posTex = (j * w + i)*4;
int posGray = ((j+imgy)*imgw + (i + imgx));
int posCol = posGray * 3;
blobRGBA[posTex + 0] = colorPixels[posCol + 0];
blobRGBA[posTex + 1] = colorPixels[posCol + 1];
blobRGBA[posTex + 2] = colorPixels[posCol + 2];
blobRGBA[posTex + 3] = grayPixels[posGray];
}
}
// myTexture.clear();
// myTexture.allocate(w,h,GL_RGBA);
unsigned char * black = new unsigned char [ofNextPow2(w) * ofNextPow2(h) * 4];
memset(black, 0, ofNextPow2(w) * ofNextPow2(h) * 4);
// myTexture.loadData(black, ofNextPow2(w), ofNextPow2(h), GL_RGBA);
// myTexture.loadData(blobRGBA, w, h, GL_RGBA);
delete black;
delete blobRGBA;
pos.x = blob.centroid.x;
pos.y = blob.centroid.y;
scale = 1;
angle = 0;
}
示例6: learnBackground
void BackgroundLearner::learnBackground( ofxCvGrayscaleImage & graySrc, float rate )
{
unsigned char * pixels = graySrc.getPixels();
if( framesLearned == 0 ) rate = 1;
float dRate = 1 - rate;
for( int i = 0; i < w*h; i++)
{
imageFloat[i] = dRate * imageFloat[i] + rate * pixels[i];
imageByte[i] = (unsigned char)(imageFloat[i]);
}
grayImg.setFromPixels(imageByte,w,h);
}
示例7: update
void ofxOpticalFlowLK :: update ( ofxCvGrayscaleImage& source )
{
update( source.getPixels(), source.width, source.height, OF_IMAGE_GRAYSCALE );
}
示例8: update
void glintLineChecker::update(ofxCvGrayscaleImage & eyeImg, int nGlints, ofxCvContourFinder & contourFinder, bool bUseGlintInBrightEye, ofxCvContourFinder & contourFinderBright){
if (nGlints == 2) {
lineSegments.clear();
unsigned char * pixels = eyeImg.getPixels();
for (int j = 0; j < eyeImg.height; j++){
lineSegment temp;
bool bStarted = false;
for (int i = 0; i < eyeImg.width - 1; i++) {
int pixela = pixels [ j * eyeImg.width + i];
int pixelb = pixels [ j * eyeImg.width + i + 1];
if ((pixela == 255) && (pixelb == 0)) {
// yeah!! we are starting !!
temp.startx = i;
temp.starty = j;
bStarted = true;
}
if ((pixela == 0) && (pixelb == 255)) {
if (bStarted == true) {
// cool we are ending :)
temp.endx = i;
temp.endy = j;
temp.distance = temp.endx - temp.startx;
lineSegments.push_back(temp);
//printf("adding line segment %i %i %i %i -- %i \n", temp.startx, temp.starty, temp.endx, temp.endy, temp.distance );
bStarted = false;
}
}
}
}
if (bDeleteLine) {
// remove_if doesn't work now, so for now..
// lineSegments.erase(remove_if(lineSegments.begin(), lineSegments.end(), glintChecker::lineInRange), lineSegments.end());
for (int i = 0; i < lineSegments.size(); i++) {
if (lineInRange(lineSegments[i])) {
lineSegments.erase(lineSegments.begin() + i);
i--;
}
}
for (int i = 0; i < lineSegments.size(); i++) {
if (bUseGlintInBrightEye) {
if (lineCrossGlintInBrightEye(lineSegments[i], contourFinderBright)) {
lineSegments.erase(lineSegments.begin() + i);
i--;
}
}
}
}
cvSetZero(myStripesImage.getCvImage());
unsigned char * stripepixels = myStripesImage.getPixels();
for (int i = 0; i < lineSegments.size(); i++) {
int startx = lineSegments[i].startx;
int endx = lineSegments[i].endx;
int y = lineSegments[i].starty;
for (int j = startx; j < endx; j++){
stripepixels[y * myStripesImage.width + j] = 255;
}
}
myStripesImage.flagImageChanged();
int nBlobs = linesFinder.findContours(myStripesImage, 100, 10000, 1, false, true);
leftGlintID = -1;
rightGlintID = -1;
if (nBlobs > 0) {
ofRectangle foundLinesRect = linesFinder.blobs[0].boundingRect;
for (int i = 0; i < contourFinder.blobs.size(); i++){
ofRectangle blobRect = contourFinder.blobs[i].boundingRect;
if (ofInRange(foundLinesRect.x, blobRect.x, blobRect.x + blobRect.width) &&
(ofInRange(foundLinesRect.y, blobRect.y, blobRect.y + blobRect.height) ||
ofInRange(foundLinesRect.y + foundLinesRect.height, blobRect.y, blobRect.y + blobRect.height))){
leftGlintID = i;
} else if (ofInRange(foundLinesRect.x + foundLinesRect.width, blobRect.x, blobRect.x + blobRect.width) &&
(ofInRange(foundLinesRect.y, blobRect.y, blobRect.y + blobRect.height) ||
ofInRange(foundLinesRect.y + foundLinesRect.height, blobRect.y, blobRect.y + blobRect.height))) {
rightGlintID = i;
}
}
}
}
}
示例9: update
//--------------------------------------------------------------
void eyeTracker::update(ofxCvGrayscaleImage & grayImgFromCam, float threshold, float minSize, float maxSize, float minSquareness) {
//threshold?
//threshold = thresh;
grayImgPreWarp.setFromPixels(grayImgFromCam.getPixels(), grayImgFromCam.width, grayImgFromCam.height); // TODO: there's maybe an unnecessary grayscale image (and copy) here...
if( flipX || flipY ) {
grayImgPreWarp.mirror(flipY, flipX);
}
/* // before we were scaling and translating, but this is removed for now
if (fabs(xoffset-1) > 0.1f || fabs(yoffset-1) > 0.1f){
grayImgPreWarp.translate(xoffset, yoffset);
}
if (fabs(scalef-1) > 0.1f){
grayImgPreWarp.scale(scalef, scalef);
}*/
grayImg = grayImgPreWarp;
grayImgPreModification = grayImg;
grayImg.blur(5);
if (bUseContrast == true) {
grayImg.applyBrightnessContrast(brightness,contrast);
}
if (bUseGamma == true) {
grayImg.applyMinMaxGamma(gamma);
}
grayImg += edgeMask;
threshImg = grayImg;
threshImg.contrastStretch();
threshImg.threshold(threshold, true);
// the dilation of a 640 x 480 image is very slow, so let's just do a ROI near the thing we like:
threshImg.setROI(currentEyePoint.x-50, currentEyePoint.y-50, 100,100); // 200 pix ok?
if (bUseDilate == true) {
for (int i = 0; i < nDilations; i++) {
threshImg.dilate();
}
}
threshImg.resetROI();
bFoundOne = false;
int whoFound = -1;
int num = contourFinder.findContours(threshImg, minSize, maxSize, 100, false, true);
if( num ) {
for(int k = 0; k < num; k++) {
float ratio = contourFinder.blobs[k].boundingRect.width < contourFinder.blobs[k].boundingRect.height ?
contourFinder.blobs[k].boundingRect.width / contourFinder.blobs[k].boundingRect.height :
contourFinder.blobs[k].boundingRect.height / contourFinder.blobs[k].boundingRect.width;
float arcl = contourFinder.blobs[k].length;
float area = contourFinder.blobs[k].area;
float compactness = (float)((arcl*arcl/area)/FOUR_PI);
if (bUseCompactnessTest == true && compactness > maxCompactness) {
continue;
}
//printf("compactness %f \n", compactness);
//lets ignore rectangular blobs
if( ratio > minSquareness) {
currentEyePoint = contourFinder.blobs[k].centroid;
currentNormPoint.x = currentEyePoint.x;
currentNormPoint.y = currentEyePoint.y;
currentNormPoint.x /= w;
currentNormPoint.y /= h;
bFoundOne = true;
whoFound = k;
break;
}
}
//.........这里部分代码省略.........
示例10: update
//----------------------------------------------------
void eyeTracker::update(ofxCvGrayscaleImage & grayImgFromCam) {
// get the image from input manager.
currentImg.setFromPixels(grayImgFromCam.getPixels(), grayImgFromCam.width, grayImgFromCam.height);
// get the small size image to find eye position.
if (divisor !=1) smallCurrentImg.scaleIntoMe(currentImg, CV_INTER_LINEAR);
// get the eye position
bFoundOne = eFinder.update(smallCurrentImg, threshold_e, minBlobSize_e, maxBlobSize_e, true);
if (eFinder.centroid.x > w - (targetRect.width/2) || eFinder.centroid.x < (targetRect.width/2) ||
eFinder.centroid.y > h - (targetRect.height/2) || eFinder.centroid.y < (targetRect.height/2)){
bFoundOne = false;
}
bFoundEye = false;
bool bFoundPupil = false;
if (bFoundOne){
targetRect.x = eFinder.centroid.x - (targetRect.width/2);
targetRect.y = eFinder.centroid.y - (targetRect.height/2);
// make big eye image
currentImg.setROI(targetRect);
if (magRatio != 1) {
magCurrentImg.scaleIntoMe(currentImg, CV_INTER_CUBIC); // magnify by bicubic
} else {
magCurrentImg.setFromPixels(currentImg.getRoiPixels(), targetRect.width, targetRect.height);
}
currentImg.resetROI();
// get current bright eye image & dark eye image
bIsBrightEye = getBrightEyeDarkEye();
// get glint position in a bright eye image, if needed. <= shoul be here..? think about it.
if (bIsBrightEye && bUseGlintinBrightEye) {
thresCal.update(smallCurrentImg, eFinder.diffImg, currentImg, targetRect, true);
gFinder.findGlintCandidates(magCurrentImg, thresCal.getGlintThreshold(true), minBlobSize_g, maxBlobSize_g, true);
targetRectBright = targetRect;
}
// find Pupil image again with the big eye image. (try double ellipse fit later?)
if (!bIsBrightEye){
// get the averages of pupil & white part.
if (bUseAutoThreshold_g || bUseAutoThreshold_p){
thresCal.update(smallCurrentImg, eFinder.diffImg, currentImg, targetRect, false);
}
if (bUseAutoThreshold_g) threshold_g = thresCal.getGlintThreshold(false);
else threshold_g = threshold_g_frompanel;
// get glint position with dark eye image.
gFinder.update(magCurrentImg, threshold_g, minBlobSize_g, maxBlobSize_g, bUseGlintinBrightEye);
if (gFinder.bFound){
if (bUseAutoThreshold_p) threshold_p = thresCal.getPupilThreshold();
else threshold_p = threshold_p_frompanel;
if (bUseHomography && gFinder.bFourGlints){
// Homography..
ofxCvGrayscaleAdvanced* temp = homographyCal.getWarpedImage(magCurrentImg, gFinder, magRatio);
bFoundPupil = pFinder.update(*temp, threshold_p, minBlobSize_p, maxBlobSize_p, targetRect);
} else {
bFoundPupil = pFinder.update(magCurrentImg, threshold_p, minBlobSize_p, maxBlobSize_p, targetRect);
}
if (bFoundPupil){
pupilCentroid = pFinder.currentPupilCenter;
bFoundEye = true;
targetRectDark = targetRect;
}
}
}
}
// cout << "bFoundOne: " << bFoundOne << " bFoundPupil: " << bFoundPupil << " bFoundGlint: " << gFinder.bFound << endl;
}
示例11: fillBlobsWithInterpolatedData
//==============================================================
void DepthHoleFiller::fillBlobsWithInterpolatedData (ofxCvGrayscaleImage &depthImage){
// interpolates between one edge of the hole and the other
unsigned char *depthPixels = depthImage.getPixels();
unsigned char *blobsPixels = ofxCv8uC1_Blobs.getPixels();
const unsigned char HOLE = 255;
int row = 0;
int index=0;
int runIndexA;
int runIndexB;
unsigned char runValueA;
unsigned char runValueB;
for (int y=0; y<KH; y++){
runIndexA = 0;
runIndexB = 0;
bool bRunStarted = false;
unsigned char bval0;
unsigned char bval1;
row = y*KW;
for (int x=1; x<KW; x++){
index = row + x;
bval0 = blobsPixels[index-1];
bval1 = blobsPixels[index ];
if ((bval0 != HOLE) && (bval1 == HOLE)){
runIndexA = index;
runValueA = depthPixels[index-1];
bRunStarted = true;
}
if (bRunStarted){
if ((bval0 == HOLE) && (bval1 != HOLE)){
runIndexB = index-1;
runValueB = depthPixels[index];
bRunStarted = false;
// since we have identified a hole run, fill it appropriately
int runlen = (runIndexB - runIndexA);
int vallen = (runValueB - runValueA);
// interpolate between the values.
if (runlen <= 1){
int va = runValueA;
int vb = runValueB;
unsigned char val = (unsigned char)((va+vb)/2);
for (int i=runIndexA; i<=runIndexB; i++){
blobsPixels[i] = val;
}
} else {
float runlenf = (float)(runlen);
float vallenf = (float)(vallen);
for (int i=runIndexA; i<=runIndexB; i++){
float tf = (float)(i-runIndexA)/runlenf;
float vf = runValueA + tf*vallenf;
unsigned char val = (unsigned char)(vf + 0.5);
blobsPixels[i] = val;
}
}
}
}
}
}
}
示例12: calculateTiny
//--------------------------------------------------------------
void Wind::calculateTiny( ofxCvGrayscaleImage& img )
{
int img_cell_height = img.height/(TINY_HEIGHT+2);
int img_cell_width = img.width/(TINY_WIDTH+2);
int img_row_count = img.height/img_cell_height;
int img_col_count = img.width/img_cell_width;
unsigned char* pixels = img.getPixels();
/*
int tx=-1;
int ty=-1;
if ( mouse_x_pct >= 0.0f && mouse_x_pct <= 1.0f && mouse_y_pct >= 0.0f && mouse_y_pct <= 1.0f )
{
tx = mouse_x_pct*TINY_WIDTH;
ty = mouse_y_pct*TINY_HEIGHT;
}*/
for ( int i=1; i<img_row_count-1; i++ )
{
for ( int j=1; j<img_col_count-1; j++ )
{
// loop through everything in this cell, average, and max
int start_x = j*img_cell_width - img_cell_width/2;
int end_x = start_x + 2.0f*img_cell_width;
int start_y = i*img_cell_height - img_cell_height/2;
int end_y = start_y + 2.0f*img_cell_height;
float max = 0;
int total = 0;
for ( int u = start_y; u<end_y; u++ )
{
int base = u*img.width;
// calculate u falloff factor
float u_factor = 1.0f-(2.0f*fabsf( ((float)(u-start_y))/(end_y-start_y) - 0.5f ));
for ( int v = start_x; v<end_x; v++ )
{
// get value
int index = base + v;
float val = (float)pixels[index];
// calculate v falloff facotr
float v_factor = 1.0f-(2.0f*fabsf( ((float)(v-start_x))/(end_x-start_x) - 0.5f ));
// apply falloff factors
val *= (u_factor+v_factor);
// average
total += val;
// max
if ( max < val )
max = val;
}
}
float average = (float)total/(img_cell_height*img_cell_width*4);
/*
if ( i-1 == ty && j-1 == tx )
tiny[(i-1)*TINY_WIDTH+(j-1)] = 255;
else*/
tiny[(i-1)*TINY_WIDTH+(j-1)] = (unsigned char)(average*0.5f+max*0.5f);
}
}
}