本文整理汇总了C++中ofVideoGrabber::initGrabber方法的典型用法代码示例。如果您正苦于以下问题:C++ ofVideoGrabber::initGrabber方法的具体用法?C++ ofVideoGrabber::initGrabber怎么用?C++ ofVideoGrabber::initGrabber使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofVideoGrabber
的用法示例。
在下文中一共展示了ofVideoGrabber::initGrabber方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setup
void setup() {
vid.initGrabber(w, h);
mesh.setMode(OF_PRIMITIVE_POINTS);
mesh.addVertices(vector<ofVec3f>(n));
mesh.addColors(vector<ofFloatColor>(n));
}
示例2: setup
//--------------------------------------------------------------
void ofApp::setup(){
//we can now get back a list of devices.
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for(int i = 0; i < devices.size(); i++){
cout << devices[i].id << ": " << devices[i].deviceName;
if( devices[i].bAvailable ){
cout << endl;
}else{
cout << " - unavailable " << endl;
}
}
vidGrabber.setDeviceID(1);
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(width, height);
recorded = (unsigned char*)malloc(3 * height * width * record_size);
tmp = (unsigned char*)malloc(3 * height * width);
back = (unsigned char*)malloc(3 * height * width);
merged = (unsigned char*)malloc(3 * height * width);
show = vidGrabber.getPixels();
udpConnection.Create();
udpConnection.Bind(1511);
udpConnection.SetNonBlocking(true);
for (int i = 0; i < trackers_cnt; i++) {
tracker t = tracker(i);
trackers.push_back(t);
}
outfile.open("/users/yui/desktop/yellow_imgs/teacher/log.txt", std::ios_base::app);
}
示例3: openCamera
FREObject openCamera(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
{
gGrabber.initGrabber(1280, 720);
return NULL;
}
示例4: setup
void setup()
{
ofSetWindowShape(width, height); // Set the window size.
grabber.initGrabber(width, height); // Set the grabber size.
// Allocate each of our helper images.
colorImage.allocate(width, height);
grayscaleImage.allocate(width, height);
grayscaleBackgroundImage.allocate(width, height);
grayscaleAbsoluteDifference.allocate(width, height);
grayscaleBinary.allocate(width, height);
}
示例5: setup
void setup()
{
ofSetFrameRate(60);
ofSetVerticalSync(true);
ofBackground(0);
video.initGrabber(1280, 720);
isf.setup(1280, 720, GL_RGB32F);
isf.load("isf-test");
isf.setImage("inputImage", video.getTexture());
}
示例6: setup
void setup()
{
ofSetFrameRate(60);
ofSetVerticalSync(true);
ofBackground(0);
video.initGrabber(1280, 720);
chain.setup(1280, 720);
chain.load("ZoomBlur.fs");
chain.load("CubicLensDistortion.fs");
chain.setImage(video.getTextureReference());
}
示例7: setup
//--------------------------------------------------------------
void testApp::setup(){
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(640, 480);
colorImg.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImage.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImageSmall.allocate(120, 90);
haarFinder.setup("haarcascade_frontalface_alt2.xml");
img.loadImage("stevejobs.png");
img.setAnchorPercent(0.5, 0.5);
ofEnableAlphaBlending();
}
示例8: setup
//--------------------------------------------------------------
void testApp::setup(){
ofBackground(50, 50, 50);
// dump everything to console
ofSetLogLevel(OF_LOG_VERBOSE);
// disable vsync (to allow >60fps)
ofSetVerticalSync(false);
// init grabber
videoGrabber.initGrabber(640, 480);
vidWidth = videoGrabber.getWidth();
vidHeight = videoGrabber.getHeight();
// allocate temp buffer
pixels = new unsigned char[vidWidth * vidHeight * 4];
// init OpenCL from OpenGL context to enable GL-CL data sharing
openCL.setupFromOpenGL();
// create OpenCL textures and related OpenGL textures
clImage[0].initWithTexture(vidWidth, vidHeight, GL_RGBA);
clImage[1].initWithTexture(vidWidth, vidHeight, GL_RGBA);
// load and compile OpenCL program
openCL.loadProgramFromFile("MSAOpenCL/ImageProcessing.cl");
// load kernels
openCL.loadKernel("msa_boxblur");
openCL.loadKernel("msa_flipx");
openCL.loadKernel("msa_flipy");
openCL.loadKernel("msa_greyscale");
openCL.loadKernel("msa_invert");
openCL.loadKernel("msa_threshold");
}
示例9: setup
void ofApp::setup() {
ofEnableSmoothing();
ofEnableAlphaBlending();
ofSetFrameRate(60);
ofSetVerticalSync(true);
ofEnableDepthTest();
ofEnableAntiAliasing();
memset( dmxData_, 0, DMX_DATA_LENGTH );
//open the device
dmxInterface_ = ofxGenericDmx::createDevice(DmxDevice::DMX_DEVICE_RAW);
bool opened = dmxInterface_->open();
if ( dmxInterface_ == 0 || !opened ) {
printf( "No FTDI Device Found\n" );
} else {
printf( "isOpen: %i\n", dmxInterface_->isOpen() );
}
printf("ofxGenericDmx addon version: %s.%s\n", ofxGenericDmx::VERSION_MAJOR, ofxGenericDmx::VERSION_MINOR);
std::string file = "Lightweave_loops2.json";
std::string columnsFile = "Lightweave_columns2.json";
std::string facesFile = "Lightweave_faces2.json";
bool parsingSuccessful = result.open(file);
bool parsingSuccessfulColumn = columnGeometry.open(columnsFile);
bool parsingSuccessfulFaces = faceGeometry.open(facesFile);
for (int region = 0; region < 6; region++) {
string blah = "region" + ofToString(region);
for (int rings = 0; rings < result[blah].size(); rings++) {
string ring = "ring" + ofToString(rings);
for (int pointPos = 0; pointPos < 3; pointPos++) {
string point = "point" + ofToString(pointPos);
}
}
}
//setupUDP();
camWidth = 320;
camHeight = 240;
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for (int i = 0; i < devices.size(); i++) {
if (devices[i].bAvailable) {
ofLogNotice() << devices[i].id << ": " << devices[i].deviceName;
} else {
ofLogNotice() << devices[i].id << ": " << devices[i].deviceName << " - unavailable ";
}
}
for (int i = 0; i < devices.size(); i++) {
if (!devices[i].deviceName.find("USB")) {
cout << devices[i].id << endl;
pcCams.push_back(devices[i].id);
}
}
vidGrabber.setDeviceID(pcCams[0]);
// vidGrabber.setDeviceID(0);
vidGrabber.initGrabber(320,240);
vidGrabber1.setDeviceID(pcCams[1]);
// vidGrabber1.setDeviceID(0);
vidGrabber1.initGrabber(320,240);
colorImg1.allocate(320,240);
grayImage1.allocate(320,240);
grayBg1.allocate(320,240);
grayDiff1.allocate(320,240);
colorImg.allocate(320,240);
grayImage.allocate(320,240);
grayBg.allocate(320,240);
grayDiff.allocate(320,240);
bLearnBackground = true;
bLearnBackground1 = true;
threshold = 80;
drawOne = false;
bottomSwarm.a = 1.1f;
bottomSwarm.b = (curWidth/4.0);
bottomSwarm.c = 100.0;
bottomSwarm.bVel = 1.0;
xPos = 0;
yPos = 0;
zPos = 0;
cam.setPosition(result["region0"]["ring0"]["point0"][0].asFloat(),result["region0"]["ring0"]["point0"][1].asFloat(),result["region0"]["ring0"]["point0"][2].asFloat());
//.........这里部分代码省略.........
示例10: setup
void setup() {
useSharedData();
tracker.setup();
cam.initGrabber(1280, 720);
}
示例11: setup
// Set up our sketch.
void setup()
{
ofSetWindowShape(1280, 720); // Set the window size.
grabber.initGrabber(1280, 720); // Set the grabber size.
pixels.allocate(1280, 720, OF_PIXELS_RGB); // Allocate memory for our pixels.
}
示例12: setup
void setup() {
ccv.setupFace("face.sqlite3");
cam.initGrabber(1920/2, 1080/2);
}