本文整理汇总了C++中ofVideoGrabber::setDeviceID方法的典型用法代码示例。如果您正苦于以下问题:C++ ofVideoGrabber::setDeviceID方法的具体用法?C++ ofVideoGrabber::setDeviceID怎么用?C++ ofVideoGrabber::setDeviceID使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofVideoGrabber
的用法示例。
在下文中一共展示了ofVideoGrabber::setDeviceID方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setup
//--------------------------------------------------------------
void ofApp::setup(){
//we can now get back a list of devices.
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for(int i = 0; i < devices.size(); i++){
cout << devices[i].id << ": " << devices[i].deviceName;
if( devices[i].bAvailable ){
cout << endl;
}else{
cout << " - unavailable " << endl;
}
}
vidGrabber.setDeviceID(1);
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(width, height);
recorded = (unsigned char*)malloc(3 * height * width * record_size);
tmp = (unsigned char*)malloc(3 * height * width);
back = (unsigned char*)malloc(3 * height * width);
merged = (unsigned char*)malloc(3 * height * width);
show = vidGrabber.getPixels();
udpConnection.Create();
udpConnection.Bind(1511);
udpConnection.SetNonBlocking(true);
for (int i = 0; i < trackers_cnt; i++) {
tracker t = tracker(i);
trackers.push_back(t);
}
outfile.open("/users/yui/desktop/yellow_imgs/teacher/log.txt", std::ios_base::app);
}
示例2: setup
void ofApp::setup() {
ofEnableSmoothing();
ofEnableAlphaBlending();
ofSetFrameRate(60);
ofSetVerticalSync(true);
ofEnableDepthTest();
ofEnableAntiAliasing();
memset( dmxData_, 0, DMX_DATA_LENGTH );
//open the device
dmxInterface_ = ofxGenericDmx::createDevice(DmxDevice::DMX_DEVICE_RAW);
bool opened = dmxInterface_->open();
if ( dmxInterface_ == 0 || !opened ) {
printf( "No FTDI Device Found\n" );
} else {
printf( "isOpen: %i\n", dmxInterface_->isOpen() );
}
printf("ofxGenericDmx addon version: %s.%s\n", ofxGenericDmx::VERSION_MAJOR, ofxGenericDmx::VERSION_MINOR);
std::string file = "Lightweave_loops2.json";
std::string columnsFile = "Lightweave_columns2.json";
std::string facesFile = "Lightweave_faces2.json";
bool parsingSuccessful = result.open(file);
bool parsingSuccessfulColumn = columnGeometry.open(columnsFile);
bool parsingSuccessfulFaces = faceGeometry.open(facesFile);
for (int region = 0; region < 6; region++) {
string blah = "region" + ofToString(region);
for (int rings = 0; rings < result[blah].size(); rings++) {
string ring = "ring" + ofToString(rings);
for (int pointPos = 0; pointPos < 3; pointPos++) {
string point = "point" + ofToString(pointPos);
}
}
}
//setupUDP();
camWidth = 320;
camHeight = 240;
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for (int i = 0; i < devices.size(); i++) {
if (devices[i].bAvailable) {
ofLogNotice() << devices[i].id << ": " << devices[i].deviceName;
} else {
ofLogNotice() << devices[i].id << ": " << devices[i].deviceName << " - unavailable ";
}
}
for (int i = 0; i < devices.size(); i++) {
if (!devices[i].deviceName.find("USB")) {
cout << devices[i].id << endl;
pcCams.push_back(devices[i].id);
}
}
vidGrabber.setDeviceID(pcCams[0]);
// vidGrabber.setDeviceID(0);
vidGrabber.initGrabber(320,240);
vidGrabber1.setDeviceID(pcCams[1]);
// vidGrabber1.setDeviceID(0);
vidGrabber1.initGrabber(320,240);
colorImg1.allocate(320,240);
grayImage1.allocate(320,240);
grayBg1.allocate(320,240);
grayDiff1.allocate(320,240);
colorImg.allocate(320,240);
grayImage.allocate(320,240);
grayBg.allocate(320,240);
grayDiff.allocate(320,240);
bLearnBackground = true;
bLearnBackground1 = true;
threshold = 80;
drawOne = false;
bottomSwarm.a = 1.1f;
bottomSwarm.b = (curWidth/4.0);
bottomSwarm.c = 100.0;
bottomSwarm.bVel = 1.0;
xPos = 0;
yPos = 0;
zPos = 0;
cam.setPosition(result["region0"]["ring0"]["point0"][0].asFloat(),result["region0"]["ring0"]["point0"][1].asFloat(),result["region0"]["ring0"]["point0"][2].asFloat());
//.........这里部分代码省略.........