本文整理汇总了C++中ofVideoGrabber::getPixels方法的典型用法代码示例。如果您正苦于以下问题:C++ ofVideoGrabber::getPixels方法的具体用法?C++ ofVideoGrabber::getPixels怎么用?C++ ofVideoGrabber::getPixels使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofVideoGrabber
的用法示例。
在下文中一共展示了ofVideoGrabber::getPixels方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: update
void update() {
cam->update();
if(cam->isFrameNew()) {
ofPixels& pix = cam->getPixels();
int skip = 2;
int range = mouseX / 25;
for(int y = 0; y < pix.getHeight(); y += skip) {
for(int x = 0; x < pix.getWidth(); x += skip) {
ofColor cur = pix.getColor(x, y);
ofColor result(0, 0, 0, 0);
if(cur.r < range || cur.r > 255-range) {
result.r = 255;
result.a = 255;
}
if(cur.g < range || cur.g > 255-range) {
result.g = 255;
result.a = 255;
}
if(cur.b < range || cur.b > 255-range) {
result.b = 255;
result.a = 255;
}
clipping.setColor(x, y, result);
}
}
clipping.update();
if(recording) {
string fn = "images/" + ofToString(frameCount, 6, '0') + ".jpg";
imageSaver.saveImage(pix, fn);
frameCount++;
}
}
}
示例2: setup
//--------------------------------------------------------------
void ofApp::setup(){
//we can now get back a list of devices.
vector<ofVideoDevice> devices = vidGrabber.listDevices();
for(int i = 0; i < devices.size(); i++){
cout << devices[i].id << ": " << devices[i].deviceName;
if( devices[i].bAvailable ){
cout << endl;
}else{
cout << " - unavailable " << endl;
}
}
vidGrabber.setDeviceID(1);
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(width, height);
recorded = (unsigned char*)malloc(3 * height * width * record_size);
tmp = (unsigned char*)malloc(3 * height * width);
back = (unsigned char*)malloc(3 * height * width);
merged = (unsigned char*)malloc(3 * height * width);
show = vidGrabber.getPixels();
udpConnection.Create();
udpConnection.Bind(1511);
udpConnection.SetNonBlocking(true);
for (int i = 0; i < trackers_cnt; i++) {
tracker t = tracker(i);
trackers.push_back(t);
}
outfile.open("/users/yui/desktop/yellow_imgs/teacher/log.txt", std::ios_base::app);
}
示例3: updateCameraFrame
FREObject updateCameraFrame(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
{
gGrabber.update();
if( !gGrabber.isFrameNew() ) return NULL;
FREObject as3Bitmap = argv[0];
FREBitmapData bitmapData;
FREAcquireBitmapData(as3Bitmap, &bitmapData);
// do something
uint32_t r = rand() % 255;
uint32_t g = rand() % 255;
uint32_t b = rand() % 255;
unsigned char *pixel = gGrabber.getPixels();
uint32_t* ptr = bitmapData.bits32;
int offset = bitmapData.lineStride32 - bitmapData.width;
int alpha = 255;
for( uint32_t j = 0; j < bitmapData.height; j++ ){
ptr = bitmapData.bits32 + bitmapData.lineStride32*(bitmapData.height-j-1);
for( uint32_t i = 0; i < bitmapData.width; i++ ){
r = *pixel++; g = *pixel++; b = *pixel++;
*ptr++ = (alpha << 24) | (r << 16) | (g << 8) | b;
}
}
FREInvalidateBitmapDataRect(as3Bitmap, 0, 0, bitmapData.width, bitmapData.height);
FREReleaseBitmapData(as3Bitmap);
return NULL;
}
示例4: update
void update() {
vid.update();
if(vid.isFrameNew()) {
unsigned char* pix = vid.getPixels();
int j = 0;
for(int i = 0; i < n; i++) {
unsigned char& r = pix[j++];
unsigned char& g = pix[j++];
unsigned char& b = pix[j++];
mesh.setVertex(i, ofVec3f(r+20, g+20, b+20));
mesh.setColor(i, ofColor(r+20, g+20, b+20));
}
}
}
示例5: update
//--------------------------------------------------------------
void testApp::update(){
vidGrabber.grabFrame();
if(vidGrabber.isFrameNew()) {
colorImg.setFromPixels(vidGrabber.getPixels(), vidGrabber.getWidth(), vidGrabber.getHeight());
colorImg.mirror(false, true);
greyImage = colorImg;
greyImageSmall.scaleIntoMe(greyImage);
haarFinder.findHaarObjects(greyImageSmall);
}
}
示例6: draw
void draw() {
ofPushMatrix();
float screenWidth = config["screen"]["width"];
float screenHeight = config["screen"]["height"];
float camWidth = config["camera"]["width"];
float camHeight = config["camera"]["height"];
ofTranslate(screenWidth / 2, screenHeight / 2);
ofRotateZDeg(config["camera"]["rotate"]);
ofTranslate(-camWidth / 2, -camHeight / 2);
if(cam->isInitialized()) {
cam->draw(0,0);
}
clipping.draw(0, 0);
ofPopMatrix();
if(cam->isInitialized()) {
drawHistogram(cam->getPixels(), mouseY);
}
}
示例7: update
void ofApp::update() {
if (ofGetElapsedTimeMillis() - lastTime >= timeToReset) {
lastTime = ofGetElapsedTimeMillis();
bLearnBackground = true;
bLearnBackground1 = true;
}
micLevelsTopNew[4] = contourFinder.nBlobs;
micLevelsTopNew[5] = contourFinder1.nBlobs;
/* NEW CAMERA CODE */
bool bNewFrame = false;
bool bNewFrame1 = false;
vidGrabber.update();
bNewFrame = vidGrabber.isFrameNew();
vidGrabber1.update();
bNewFrame1 = vidGrabber.isFrameNew();
if (bNewFrame){
colorImg.setFromPixels(vidGrabber.getPixels(), 320,240);
grayImage = colorImg;
if (bLearnBackground == true){
grayBg = grayImage; // the = sign copys the pixels from grayImage into grayBg (operator overloading)
bLearnBackground = false;
}
grayDiff.absDiff(grayBg, grayImage);
grayDiff.threshold(threshold);
contourFinder.findContours(grayDiff, 20, (340*240)/3, 10, true);
}
if (bNewFrame1){
colorImg1.setFromPixels(vidGrabber1.getPixels(), 320,240);
grayImage1 = colorImg1;
if (bLearnBackground1 == true){
grayBg1 = grayImage1;
bLearnBackground1 = false;
}
grayDiff1.absDiff(grayBg1, grayImage1);
grayDiff1.threshold(threshold);
contourFinder1.findContours(grayDiff1, 20, (340*240)/3, 10, true);
}
switch (ANIMATION_STATE) {
case ACTIVATED: {
int max_pos = 0;
int max_element = -1000;
for (int i = 0; i < 12; i++) {
if (micLevelsTopNew[i] > max_element) {
max_pos = i;
max_element = micLevelsTopNew[i];
}
}
for (int x = 0; x < 1280; x++) {
float top = pow(x-bottomSwarm.b,2);
float bottom = 2*pow(bottomSwarm.c,2);
bottomSwarm.curve[x] = bottomSwarm.a*exp(-(top/bottom));
}
ofVec2f norm = swarmPosition;
bottomSwarm.b = norm.normalize().x*1280-160;
// ofVec2f btm = absColumnPositionTop[max_pos];
ofVec2f btm = cameraPositionsTop[max_pos];
ofVec2f desired = btm - swarmPosition;
float d = sqrt((desired.x*desired.x) + (desired.y+desired.y));
desired.normalize();
if (d < 100) {
float m = ofMap(d, 0.0, 100.0, 0.0, 4.0);
desired *= m;
} else {
desired *= 4.0;
}
swarmPosition += desired;
/* UPDATE WAVES */
for (int x = 0; x < 1280; x++) {
gaussianBottom[x] = ofMap(bottomSwarm.curve[x], 0.0, 1.1, ambientLevel, 255.0);
}
break;
}
case DEACTIVATED: {
for (int x = 0; x < 1280; x++) {
float top = pow(x-bottomSwarm.b,2);
float bottom = 2*pow(bottomSwarm.c,2);
bottomSwarm.curve[x] = bottomSwarm.a*exp(-(top/bottom));
}
//.........这里部分代码省略.........
示例8: update
//--------------------------------------------------------------
void ofApp::update(){
show = vidGrabber.getPixels();
vidGrabber.update();
}
示例9: update
//--------------------------------------------------------------
void testApp::update(){
// grab new frame
videoGrabber.update();
// if there is a new frame....
if(videoGrabber.isFrameNew()) {
// RGB textures don't seem to work well. so need to copy the vidgrabber data into a RGBA texture
int pixelIndex = 0;
for(int i=0; i<vidWidth; i++) {
for(int j=0; j<vidHeight; j++) {
int indexRGB = pixelIndex * 3;
int indexRGBA = pixelIndex * 4;
pixels[indexRGBA ] = videoGrabber.getPixels()[indexRGB ];
pixels[indexRGBA+1] = videoGrabber.getPixels()[indexRGB+1];
pixels[indexRGBA+2] = videoGrabber.getPixels()[indexRGB+2];
pixels[indexRGBA+3] = 255;
pixelIndex++;
}
}
// write the new pixel data into the OpenCL Image (and thus the OpenGL texture)
clImage[activeImageIndex].write(pixels);
if(doBlur) {
msa::OpenCLKernel *kernel = openCL.kernel("msa_boxblur");
for(int i=0; i<blurAmount; i++) {
cl_int offset = i * i / 2 + 1;
kernel->setArg(0, clImage[activeImageIndex].getCLMem());
kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
kernel->setArg(2, offset);
kernel->run2D(vidWidth, vidHeight);
activeImageIndex = 1 - activeImageIndex;
}
}
if(doFlipX) {
msa::OpenCLKernel *kernel = openCL.kernel("msa_flipx");
kernel->setArg(0, clImage[activeImageIndex].getCLMem());
kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
kernel->run2D(vidWidth, vidHeight);
activeImageIndex = 1 - activeImageIndex;
}
if(doFlipY) {
msa::OpenCLKernel *kernel = openCL.kernel("msa_flipy");
kernel->setArg(0, clImage[activeImageIndex].getCLMem());
kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
kernel->run2D(vidWidth, vidHeight);
activeImageIndex = 1 - activeImageIndex;
}
if(doGreyscale) {
msa::OpenCLKernel *kernel = openCL.kernel("msa_greyscale");
kernel->setArg(0, clImage[activeImageIndex].getCLMem());
kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
kernel->run2D(vidWidth, vidHeight);
activeImageIndex = 1 - activeImageIndex;
}
if(doInvert) {
msa::OpenCLKernel *kernel = openCL.kernel("msa_invert");
kernel->setArg(0, clImage[activeImageIndex].getCLMem());
kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
kernel->run2D(vidWidth, vidHeight);
activeImageIndex = 1 - activeImageIndex;
}
if(doThreshold) {
msa::OpenCLKernel *kernel = openCL.kernel("msa_threshold");
kernel->setArg(0, clImage[activeImageIndex].getCLMem());
kernel->setArg(1, clImage[1-activeImageIndex].getCLMem());
kernel->setArg(2, threshLevel);
kernel->run2D(vidWidth, vidHeight);
activeImageIndex = 1 - activeImageIndex;
}
// calculate capture fps
static float lastTime = 0;
float nowTime = ofGetElapsedTimef();
float timeDiff = nowTime - lastTime;
if(timeDiff > 0 ) captureFPS = 0.9f * captureFPS + 0.1f / timeDiff;
lastTime = nowTime;
}
}
示例10: update
void ofxOpticalFlowLK :: update ( ofVideoGrabber& source )
{
update( source.getPixels(), source.width, source.height, OF_IMAGE_COLOR ); // assume colour image type.
}
示例11: pixelate
void ofxImageTS::pixelate(ofVideoGrabber video, int pixelRatio) {
ofPixels R,G,B, copy;
copy.allocate(video.getWidth(), video.getHeight(), OF_PIXELS_RGB);
copy = video.getPixels();
pixelate(copy,pixelRatio);
}
示例12: update
void TTimbre::update(ofVideoGrabber input){
originalImage.setFromPixels(input.getPixels(), input.getWidth(), input.getHeight(), OF_IMAGE_COLOR);
internalUpdate();
}
示例13: update
void ofxOpticalFlowFarneback::update(ofVideoGrabber& source) {
update(source.getPixels().getData(), source.getWidth(), source.getHeight(), OF_IMAGE_COLOR); // assume colour image type.
}