本文整理汇总了C++中ofxCvColorImage::allocate方法的典型用法代码示例。如果您正苦于以下问题:C++ ofxCvColorImage::allocate方法的具体用法?C++ ofxCvColorImage::allocate怎么用?C++ ofxCvColorImage::allocate使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofxCvColorImage
的用法示例。
在下文中一共展示了ofxCvColorImage::allocate方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setup
void setup(){
sampleImg.load("sample_img.jpg");
colorImg.allocate(900, 900);
colorImg = sampleImg;
grayImg = colorImg;
grayImg.threshold(200);
}
示例2: setup
void setup()
{
ofSetWindowShape(width, height); // Set the window size.
grabber.initGrabber(width, height); // Set the grabber size.
// Allocate each of our helper images.
colorImage.allocate(width, height);
grayscaleImage.allocate(width, height);
grayscaleBackgroundImage.allocate(width, height);
grayscaleAbsoluteDifference.allocate(width, height);
grayscaleBinary.allocate(width, height);
}
示例3: setup
//--------------------------------------------------------------
void testApp::setup(){
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(640, 480);
colorImg.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImage.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImageSmall.allocate(120, 90);
haarFinder.setup("haarcascade_frontalface_alt2.xml");
img.loadImage("stevejobs.png");
img.setAnchorPercent(0.5, 0.5);
ofEnableAlphaBlending();
}
示例4: setup
void ofApp::setup() {
ofEnableSmoothing();
ofEnableAlphaBlending();
ofSetFrameRate(60);
ofSetVerticalSync(true);
ofEnableDepthTest();
ofEnableAntiAliasing();
memset( dmxData_, 0, DMX_DATA_LENGTH );
//open the device
dmxInterface_ = ofxGenericDmx::createDevice(DmxDevice::DMX_DEVICE_RAW);
bool opened = dmxInterface_->open();
if ( dmxInterface_ == 0 || !opened ) {
printf( "No FTDI Device Found\n" );
} else {
printf( "isOpen: %i\n", dmxInterface_->isOpen() );
}
printf("ofxGenericDmx addon version: %s.%s\n", ofxGenericDmx::VERSION_MAJOR, ofxGenericDmx::VERSION_MINOR);
std::string file = "Lightweave_loops2.json";
std::string columnsFile = "Lightweave_columns2.json";
std::string facesFile = "Lightweave_faces2.json";
bool parsingSuccessful = result.open(file);
bool parsingSuccessfulColumn = columnGeometry.open(columnsFile);
bool parsingSuccessfulFaces = faceGeometry.open(facesFile);
for (int region = 0; region < 6; region++) {
string blah = "region" + ofToString(region);
for (int rings = 0; rings < result[blah].size(); rings++) {
string ring = "ring" + ofToString(rings);
for (int pointPos = 0; pointPos < 3; pointPos++) {
string point = "point" + ofToString(pointPos);
}
}
}
//setupUDP();
camWidth = 320;
camHeight = 240;
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for (int i = 0; i < devices.size(); i++) {
if (devices[i].bAvailable) {
ofLogNotice() << devices[i].id << ": " << devices[i].deviceName;
} else {
ofLogNotice() << devices[i].id << ": " << devices[i].deviceName << " - unavailable ";
}
}
for (int i = 0; i < devices.size(); i++) {
if (!devices[i].deviceName.find("USB")) {
cout << devices[i].id << endl;
pcCams.push_back(devices[i].id);
}
}
vidGrabber.setDeviceID(pcCams[0]);
// vidGrabber.setDeviceID(0);
vidGrabber.initGrabber(320,240);
vidGrabber1.setDeviceID(pcCams[1]);
// vidGrabber1.setDeviceID(0);
vidGrabber1.initGrabber(320,240);
colorImg1.allocate(320,240);
grayImage1.allocate(320,240);
grayBg1.allocate(320,240);
grayDiff1.allocate(320,240);
colorImg.allocate(320,240);
grayImage.allocate(320,240);
grayBg.allocate(320,240);
grayDiff.allocate(320,240);
bLearnBackground = true;
bLearnBackground1 = true;
threshold = 80;
drawOne = false;
bottomSwarm.a = 1.1f;
bottomSwarm.b = (curWidth/4.0);
bottomSwarm.c = 100.0;
bottomSwarm.bVel = 1.0;
xPos = 0;
yPos = 0;
zPos = 0;
cam.setPosition(result["region0"]["ring0"]["point0"][0].asFloat(),result["region0"]["ring0"]["point0"][1].asFloat(),result["region0"]["ring0"]["point0"][2].asFloat());
//.........这里部分代码省略.........