本文整理汇总了C++中ofVideoGrabber类的典型用法代码示例。如果您正苦于以下问题:C++ ofVideoGrabber类的具体用法?C++ ofVideoGrabber怎么用?C++ ofVideoGrabber使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ofVideoGrabber类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setup
void ModeFour::setup( ofVideoGrabber &vidGrabber, vector<MovingBackground> &bkgListFour ) {
width = ofGetWindowWidth();
height = ofGetWindowHeight();
//setting up lines and control points
int numLines = ofGetWindowWidth() / 10;
int numBreaks = ofGetWindowHeight() / 8;
for (int i = -15; i <numLines+15; i++){
ofPolyline line;
lineList.push_back(line);
for (int j = -15; j < numBreaks+15; j++){
Curtain c;
c.setup(ofVec2f (i*10, j*8));
pList.push_back(c);
lineList[i+15].addVertex(ofVec2f(c.pos.x,c.pos.y));
}
}
background.loadImage("tint.jpg");
//setting up flowsolver
flowSolver.setup(vidGrabber.getWidth(), vidGrabber.getHeight(), 0.5, 3, 10, 1, 7, 1.5, false, false);
bkgListFour[0].setup("bkg_modeFour_0.png");
bkgListFour[1].setup("bkg_modeFour_1.png");
bkgListFour[2].setup("bkg_modeFour_2.png");
bkgListFour[3].setup("bkg_modeFour_3.png");
}
示例2: update
void update() {
cam->update();
if(cam->isFrameNew()) {
ofPixels& pix = cam->getPixels();
int skip = 2;
int range = mouseX / 25;
for(int y = 0; y < pix.getHeight(); y += skip) {
for(int x = 0; x < pix.getWidth(); x += skip) {
ofColor cur = pix.getColor(x, y);
ofColor result(0, 0, 0, 0);
if(cur.r < range || cur.r > 255-range) {
result.r = 255;
result.a = 255;
}
if(cur.g < range || cur.g > 255-range) {
result.g = 255;
result.a = 255;
}
if(cur.b < range || cur.b > 255-range) {
result.b = 255;
result.a = 255;
}
clipping.setColor(x, y, result);
}
}
clipping.update();
if(recording) {
string fn = "images/" + ofToString(frameCount, 6, '0') + ".jpg";
imageSaver.saveImage(pix, fn);
frameCount++;
}
}
}
示例3: setup
//--------------------------------------------------------------
void ofApp::setup(){
//we can now get back a list of devices.
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for(int i = 0; i < devices.size(); i++){
cout << devices[i].id << ": " << devices[i].deviceName;
if( devices[i].bAvailable ){
cout << endl;
}else{
cout << " - unavailable " << endl;
}
}
vidGrabber.setDeviceID(1);
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(width, height);
recorded = (unsigned char*)malloc(3 * height * width * record_size);
tmp = (unsigned char*)malloc(3 * height * width);
back = (unsigned char*)malloc(3 * height * width);
merged = (unsigned char*)malloc(3 * height * width);
show = vidGrabber.getPixels();
udpConnection.Create();
udpConnection.Bind(1511);
udpConnection.SetNonBlocking(true);
for (int i = 0; i < trackers_cnt; i++) {
tracker t = tracker(i);
trackers.push_back(t);
}
outfile.open("/users/yui/desktop/yellow_imgs/teacher/log.txt", std::ios_base::app);
}
示例4: updateCameraFrame
FREObject updateCameraFrame(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
{
gGrabber.update();
if( !gGrabber.isFrameNew() ) return NULL;
FREObject as3Bitmap = argv[0];
FREBitmapData bitmapData;
FREAcquireBitmapData(as3Bitmap, &bitmapData);
// do something
uint32_t r = rand() % 255;
uint32_t g = rand() % 255;
uint32_t b = rand() % 255;
unsigned char *pixel = gGrabber.getPixels();
uint32_t* ptr = bitmapData.bits32;
int offset = bitmapData.lineStride32 - bitmapData.width;
int alpha = 255;
for( uint32_t j = 0; j < bitmapData.height; j++ ){
ptr = bitmapData.bits32 + bitmapData.lineStride32*(bitmapData.height-j-1);
for( uint32_t i = 0; i < bitmapData.width; i++ ){
r = *pixel++; g = *pixel++; b = *pixel++;
*ptr++ = (alpha << 24) | (r << 16) | (g << 8) | b;
}
}
FREInvalidateBitmapDataRect(as3Bitmap, 0, 0, bitmapData.width, bitmapData.height);
FREReleaseBitmapData(as3Bitmap);
return NULL;
}
示例5: update
void update() {
cam.update();
if(cam.isFrameNew()) {
Mat camMat = toCv(cam);
tracker.update(camMat);
trackerDataSave.load(tracker);
}
}
示例6: update
void update()
{
// Update our little offset thingy.
offset += 0.01;
if (offset > 1)
{
offset = 0;
}
// Update our camera.
grabber.update();
// If the camera has a new frame to offer us ...
if (grabber.isFrameNew())
{
// Make a copy of our grabber pixels in the colorImage.
colorImage.setFromPixels(grabber.getPixelsRef());
// When we assign a color image to a grayscale image, it is converted automatically.
grayscaleImage = colorImage;
// If we set learnBackground to true using the keyboard, we'll take a snapshot of
// the background and use it to create a clean foreground image.
if (learnBackground == true)
{
// We assign the grayscaleImage to the grayscaleBackgroundImage.
grayscaleBackgroundImage = grayscaleImage;
// Now we set learnBakground so we won't set a background unless
// explicitly directed to with a keyboard command.
learnBackground = false;
}
// Create a difference image by comparing the background and the current grayscale images.
grayscaleAbsoluteDifference.absDiff(grayscaleBackgroundImage, grayscaleImage);
// Assign grayscaleAbsoluteDifference to the grayscaleBinary image.
grayscaleBinary = grayscaleAbsoluteDifference;
// Then threshold the grayscale image to create a binary image.
grayscaleBinary.threshold(threshold, invert);
// Find contours (blobs) that are between the size of 20 pixels and
// 1 / 3 * (width * height) of the camera. Also find holes.
contourFinder.findContours(grayscaleBinary, 100, (width * height) / 3.0, 10, true);
// Get the biggest blob and use it to draw.
if (contourFinder.nBlobs > 0)
{
holePositions.addVertex(contourFinder.blobs[0].boundingRect.getCenter());
}
else
{
holePositions.clear();
}
}
}
示例7: update
void update() {
ofSetWindowTitle(ofToString(ofGetFrameRate()));
cam.update();
if(cam.isFrameNew()) {
if(ofGetKeyPressed()) {
results = ccv.classifyFace(cam);
}
}
}
示例8: setup
void setup()
{
ofSetFrameRate(60);
ofSetVerticalSync(true);
ofBackground(0);
video.initGrabber(1280, 720);
isf.setup(1280, 720, GL_RGB32F);
isf.load("isf-test");
isf.setImage("inputImage", video.getTexture());
}
示例9: update
void update() {
vid.update();
if(vid.isFrameNew()) {
unsigned char* pix = vid.getPixels();
int j = 0;
for(int i = 0; i < n; i++) {
unsigned char& r = pix[j++];
unsigned char& g = pix[j++];
unsigned char& b = pix[j++];
mesh.setVertex(i, ofVec3f(r+20, g+20, b+20));
mesh.setColor(i, ofColor(r+20, g+20, b+20));
}
}
}
示例10: setup
void setup()
{
ofSetFrameRate(60);
ofSetVerticalSync(true);
ofBackground(0);
video.initGrabber(1280, 720);
chain.setup(1280, 720);
chain.load("ZoomBlur.fs");
chain.load("CubicLensDistortion.fs");
chain.setImage(video.getTextureReference());
}
示例11: update
//--------------------------------------------------------------
void testApp::update(){
vidGrabber.grabFrame();
if(vidGrabber.isFrameNew()) {
colorImg.setFromPixels(vidGrabber.getPixels(), vidGrabber.getWidth(), vidGrabber.getHeight());
colorImg.mirror(false, true);
greyImage = colorImg;
greyImageSmall.scaleIntoMe(greyImage);
haarFinder.findHaarObjects(greyImageSmall);
}
}
示例12: setup
void setup() {
vid.initGrabber(w, h);
mesh.setMode(OF_PRIMITIVE_POINTS);
mesh.addVertices(vector<ofVec3f>(n));
mesh.addColors(vector<ofFloatColor>(n));
}
示例13: openCamera
FREObject openCamera(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
{
gGrabber.initGrabber(1280, 720);
return NULL;
}
示例14: getCameraFrameSize
FREObject getCameraFrameSize(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
{
int w = gGrabber.getWidth();
int h = gGrabber.getHeight();
FREObject fX,fY;
FRENewObjectFromInt32(w,&fX);
FRENewObjectFromInt32(h,&fY);
FREObject value = argv[0];
FRESetObjectProperty(value,(const uint8_t*)"w",fX,NULL);
FRESetObjectProperty(value,(const uint8_t*)"h",fY,NULL);
return NULL;
}
示例15: update
void update()
{
video.update();
float t = ofGetElapsedTimef() * 2;
isf.setUniform<float>("blurAmount", ofNoise(1, 0, 0, t) * 1.5);
isf.update();
}