本文整理汇总了C++中ofxCvGrayscaleImage::allocate方法的典型用法代码示例。如果您正苦于以下问题:C++ ofxCvGrayscaleImage::allocate方法的具体用法?C++ ofxCvGrayscaleImage::allocate怎么用?C++ ofxCvGrayscaleImage::allocate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofxCvGrayscaleImage
的用法示例。
在下文中一共展示了ofxCvGrayscaleImage::allocate方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: convertToGrayscalePlanarImages
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImages(ofxCvGrayscaleImage& red, ofxCvGrayscaleImage& green, ofxCvGrayscaleImage& blue){
if( !bAllocated ){
ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image not allocated";
return;
}
ofRectangle roi = getROI();
ofRectangle redRoi = red.getROI();
ofRectangle greenRoi = green.getROI();
ofRectangle blueRoi = blue.getROI();
if( !red.bAllocated ){
red.allocate(width, height);
}
if( !green.bAllocated ){
green.allocate(width, height);
}
if( !blue.bAllocated ){
blue.allocate(width, height);
}
if( redRoi.width == roi.width && redRoi.height == roi.height &&
greenRoi.width == roi.width && greenRoi.height == roi.height &&
blueRoi.width == roi.width && blueRoi.height == roi.height )
{
cvCvtPixToPlane(cvImage, red.getCvImage(), green.getCvImage(), blue.getCvImage(), NULL);
red.flagImageChanged();
green.flagImageChanged();
blue.flagImageChanged();
} else {
ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch";
}
}
示例2: setup
void setup()
{
ofSetWindowShape(width, height); // Set the window size.
grabber.initGrabber(width, height); // Set the grabber size.
// Allocate each of our helper images.
colorImage.allocate(width, height);
grayscaleImage.allocate(width, height);
grayscaleBackgroundImage.allocate(width, height);
grayscaleAbsoluteDifference.allocate(width, height);
grayscaleBinary.allocate(width, height);
}
示例3: setup
//--------------------------------------------------------------
void testApp::setup(){
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(640, 480);
colorImg.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImage.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImageSmall.allocate(120, 90);
haarFinder.setup("haarcascade_frontalface_alt2.xml");
img.loadImage("stevejobs.png");
img.setAnchorPercent(0.5, 0.5);
ofEnableAlphaBlending();
}
示例4: convertToGrayscalePlanarImage
//--------------------------------------------------------------------------------
void ofxCvColorImage::convertToGrayscalePlanarImage (ofxCvGrayscaleImage& grayImage, int whichPlane){
if( !bAllocated ){
ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImage(): image not allocated";
return;
}
if( !grayImage.bAllocated ){
grayImage.allocate(width, height);
}
ofRectangle roi = getROI();
ofRectangle grayRoi = grayImage.getROI();
if( grayRoi.width == roi.width && grayRoi.height == roi.height ){
switch (whichPlane){
case 0:
cvCvtPixToPlane(cvImage, grayImage.getCvImage(), NULL, NULL, NULL);
grayImage.flagImageChanged();
break;
case 1:
cvCvtPixToPlane(cvImage, NULL, grayImage.getCvImage(), NULL, NULL);
grayImage.flagImageChanged();
break;
case 2:
cvCvtPixToPlane(cvImage, NULL, NULL, grayImage.getCvImage(), NULL);
grayImage.flagImageChanged();
break;
}
} else {
ofLogError("ofxCvColorImage") << "convertToGrayscalePlanarImages(): image size or region of interest mismatch";
}
}
示例5: setup
void ofApp::setup() {
ofEnableSmoothing();
ofEnableAlphaBlending();
ofSetFrameRate(60);
ofSetVerticalSync(true);
ofEnableDepthTest();
ofEnableAntiAliasing();
memset( dmxData_, 0, DMX_DATA_LENGTH );
//open the device
dmxInterface_ = ofxGenericDmx::createDevice(DmxDevice::DMX_DEVICE_RAW);
bool opened = dmxInterface_->open();
if ( dmxInterface_ == 0 || !opened ) {
printf( "No FTDI Device Found\n" );
} else {
printf( "isOpen: %i\n", dmxInterface_->isOpen() );
}
printf("ofxGenericDmx addon version: %s.%s\n", ofxGenericDmx::VERSION_MAJOR, ofxGenericDmx::VERSION_MINOR);
std::string file = "Lightweave_loops2.json";
std::string columnsFile = "Lightweave_columns2.json";
std::string facesFile = "Lightweave_faces2.json";
bool parsingSuccessful = result.open(file);
bool parsingSuccessfulColumn = columnGeometry.open(columnsFile);
bool parsingSuccessfulFaces = faceGeometry.open(facesFile);
for (int region = 0; region < 6; region++) {
string blah = "region" + ofToString(region);
for (int rings = 0; rings < result[blah].size(); rings++) {
string ring = "ring" + ofToString(rings);
for (int pointPos = 0; pointPos < 3; pointPos++) {
string point = "point" + ofToString(pointPos);
}
}
}
//setupUDP();
camWidth = 320;
camHeight = 240;
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for (int i = 0; i < devices.size(); i++) {
if (devices[i].bAvailable) {
ofLogNotice() << devices[i].id << ": " << devices[i].deviceName;
} else {
ofLogNotice() << devices[i].id << ": " << devices[i].deviceName << " - unavailable ";
}
}
for (int i = 0; i < devices.size(); i++) {
if (!devices[i].deviceName.find("USB")) {
cout << devices[i].id << endl;
pcCams.push_back(devices[i].id);
}
}
vidGrabber.setDeviceID(pcCams[0]);
// vidGrabber.setDeviceID(0);
vidGrabber.initGrabber(320,240);
vidGrabber1.setDeviceID(pcCams[1]);
// vidGrabber1.setDeviceID(0);
vidGrabber1.initGrabber(320,240);
colorImg1.allocate(320,240);
grayImage1.allocate(320,240);
grayBg1.allocate(320,240);
grayDiff1.allocate(320,240);
colorImg.allocate(320,240);
grayImage.allocate(320,240);
grayBg.allocate(320,240);
grayDiff.allocate(320,240);
bLearnBackground = true;
bLearnBackground1 = true;
threshold = 80;
drawOne = false;
bottomSwarm.a = 1.1f;
bottomSwarm.b = (curWidth/4.0);
bottomSwarm.c = 100.0;
bottomSwarm.bVel = 1.0;
xPos = 0;
yPos = 0;
zPos = 0;
cam.setPosition(result["region0"]["ring0"]["point0"][0].asFloat(),result["region0"]["ring0"]["point0"][1].asFloat(),result["region0"]["ring0"]["point0"][2].asFloat());
//.........这里部分代码省略.........
示例6: setup
//--------------------------------------------------------------
void testApp::setup(){
/************ SET UP BALL ***********************/
// Initialize Ball
ballPositionX = 150;
ballPositionY = 150;
ballVelocityX = ofRandom(-5, 5);
ballVelocityY = ofRandom(-5, 5);
/************ SET UP BACKGROUND ***********************/
// Set the background
ofBackground(0, 0, 0);
// Load background picture
//mirrorTexture.allocate(camWidth, camHeight, GL_RGB);
img.loadImage("space.jpg");
// Load background movie
//video.loadMovie("MilkywayLow.mov");
//video.play();
/************ SET UP PARTICLE SYSTEM ***********************/
//Initialize bins
int binPower = 2;
particleSystem.setup(ofGetWidth(), ofGetHeight(), binPower);
kParticles = 2;
float padding = 1;
float maxVelocity = 1.5;
for(int i = 0; i < kParticles * (ofGetWidth() - padding); i++) {
float x = ofRandom(padding, ofGetWidth() - padding);
float y = ofRandom(padding, ofGetHeight() - padding);
float xv = ofRandom(-maxVelocity, maxVelocity);
float yv = ofRandom(-maxVelocity, maxVelocity);
Particle particle(x, y, xv, yv);
particleSystem.add(particle);
}
timeStep = 1;
lineOpacity = 100;
pointOpacity = 255;
isMousePressed = false;
keyAIsDown = false;
slowMotion = false;
particleNeighborhood = 4;
particleRepulsion = 1;
centerAttraction = .05;
/************ SET UP KINECT ***********************/
ofSetVerticalSync(true);
// Initialize Kinect
kinect.init();
kinect.setVerbose(true);
kinect.enableDepthNearValueWhite(true);
kinect.open();
// Set up different camera views
colorImg.allocate(kinect.width, kinect.height);
grayImage.allocate(kinect.width, kinect.height);
grayThresh.allocate(kinect.width, kinect.height);
grayThreshFar.allocate(kinect.width, kinect.height);
depthOrig.allocate(kinect.getWidth(), kinect.getHeight());
depthProcessed.allocate(kinect.getWidth(), kinect.getHeight());
colorImageRGB.allocate(kinect.getWidth(), kinect.getHeight());
// Establish depth and threshold settings
zDepth = 1.0;
nearThreshold = 50;
farThreshold = 180;
bThreshWithOpenCV = true;
/******** SET UP GUI ***********************/
gui.setup();
gui.config->gridSize.x = 250;
gui.addTitle("DEPTH PRE PROCESSING");
gui.addToggle("invert", invert);
gui.addToggle("mirror", mirror);
gui.addSlider("preBlur", preBlur, 0, 20);
gui.addSlider("bottomThreshold", bottomThreshold, 0, 1);
gui.addSlider("topThreshold", topThreshold, 0, 1);
gui.addSlider("erodeAmount", erodeAmount, 0, 10);
gui.addSlider("dilateAmount", dilateAmount, 0, 10);
gui.addToggle("dilateBeforeErode", dilateBeforeErode);
//.........这里部分代码省略.........