本文整理汇总了C++中ofVideoGrabber::getWidth方法的典型用法代码示例。如果您正苦于以下问题:C++ ofVideoGrabber::getWidth方法的具体用法?C++ ofVideoGrabber::getWidth怎么用?C++ ofVideoGrabber::getWidth使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ofVideoGrabber
的用法示例。
在下文中一共展示了ofVideoGrabber::getWidth方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setup
//--------------------------------------------------------------
void testApp::setup(){
vidGrabber.setVerbose(true);
vidGrabber.initGrabber(640, 480);
colorImg.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImage.allocate(vidGrabber.getWidth(), vidGrabber.getHeight());
greyImageSmall.allocate(120, 90);
haarFinder.setup("haarcascade_frontalface_alt2.xml");
img.loadImage("stevejobs.png");
img.setAnchorPercent(0.5, 0.5);
ofEnableAlphaBlending();
}
示例2: setup
void ModeFour::setup( ofVideoGrabber &vidGrabber, vector<MovingBackground> &bkgListFour ) {
width = ofGetWindowWidth();
height = ofGetWindowHeight();
//setting up lines and control points
int numLines = ofGetWindowWidth() / 10;
int numBreaks = ofGetWindowHeight() / 8;
for (int i = -15; i <numLines+15; i++){
ofPolyline line;
lineList.push_back(line);
for (int j = -15; j < numBreaks+15; j++){
Curtain c;
c.setup(ofVec2f (i*10, j*8));
pList.push_back(c);
lineList[i+15].addVertex(ofVec2f(c.pos.x,c.pos.y));
}
}
background.loadImage("tint.jpg");
//setting up flowsolver
flowSolver.setup(vidGrabber.getWidth(), vidGrabber.getHeight(), 0.5, 3, 10, 1, 7, 1.5, false, false);
bkgListFour[0].setup("bkg_modeFour_0.png");
bkgListFour[1].setup("bkg_modeFour_1.png");
bkgListFour[2].setup("bkg_modeFour_2.png");
bkgListFour[3].setup("bkg_modeFour_3.png");
}
示例3: update
//--------------------------------------------------------------
void testApp::update(){
vidGrabber.grabFrame();
if(vidGrabber.isFrameNew()) {
colorImg.setFromPixels(vidGrabber.getPixels(), vidGrabber.getWidth(), vidGrabber.getHeight());
colorImg.mirror(false, true);
greyImage = colorImg;
greyImageSmall.scaleIntoMe(greyImage);
haarFinder.findHaarObjects(greyImageSmall);
}
}
示例4: getCameraFrameSize
FREObject getCameraFrameSize(FREContext ctx, void* funcData, uint32_t argc, FREObject argv[])
{
int w = gGrabber.getWidth();
int h = gGrabber.getHeight();
FREObject fX,fY;
FRENewObjectFromInt32(w,&fX);
FRENewObjectFromInt32(h,&fY);
FREObject value = argv[0];
FRESetObjectProperty(value,(const uint8_t*)"w",fX,NULL);
FRESetObjectProperty(value,(const uint8_t*)"h",fY,NULL);
return NULL;
}
示例5: setup
//--------------------------------------------------------------
void testApp::setup(){
ofBackground(50, 50, 50);
// dump everything to console
ofSetLogLevel(OF_LOG_VERBOSE);
// disable vsync (to allow >60fps)
ofSetVerticalSync(false);
// init grabber
videoGrabber.initGrabber(640, 480);
vidWidth = videoGrabber.getWidth();
vidHeight = videoGrabber.getHeight();
// allocate temp buffer
pixels = new unsigned char[vidWidth * vidHeight * 4];
// init OpenCL from OpenGL context to enable GL-CL data sharing
openCL.setupFromOpenGL();
// create OpenCL textures and related OpenGL textures
clImage[0].initWithTexture(vidWidth, vidHeight, GL_RGBA);
clImage[1].initWithTexture(vidWidth, vidHeight, GL_RGBA);
// load and compile OpenCL program
openCL.loadProgramFromFile("MSAOpenCL/ImageProcessing.cl");
// load kernels
openCL.loadKernel("msa_boxblur");
openCL.loadKernel("msa_flipx");
openCL.loadKernel("msa_flipy");
openCL.loadKernel("msa_greyscale");
openCL.loadKernel("msa_invert");
openCL.loadKernel("msa_threshold");
}
示例6: update
// Called every frame.
void update() {
// Update our camera.
grabber.update();
// If the camera has a new frame to offer us ...
if (grabber.isFrameNew())
{
// Get a reference (denoted by &) to the camera's pixels. Getting a
// reference means that we won't have to make a copy of all of the
// frame's pixels (since we only need one column anyway). This means our
// program requires less processing power.
//
// const prevents us from accidentally modifying the cameras's pixels.
const ofPixels& cameraPixels = grabber.getPixelsRef();
// Choose a slit location. In this case we'll collect slits from the
// column in the middle of the camera feed.
int slitPositionX = grabber.getWidth() / 2;
// Cycle through each pixel in the selected column and place that pixel
// at a position x = xPosition and y = to the same position as the
// oritinal.
for (int y = 0; y < grabber.getHeight(); y++)
{
// Get the pixel as a color at x / y from the grabber.
ofColor pixelFromGrabber = cameraPixels.getColor(slitPositionX, y);
// Set that pixel color to the x / y position in the output pixels.
pixels.setColor(xPosition, y, pixelFromGrabber);
}
// Increment our xPosition so next update we'll draw a colum shifted to
// the right by one pixel.
xPosition = xPosition + 1;
// If our xPosition is greater than or equal to the width of the display
// pixels, reset our x position to 0.
if (xPosition >= pixels.getWidth())
{
xPosition = 0;
}
}
}
示例7: pixelate
void ofxImageTS::pixelate(ofVideoGrabber video, int pixelRatio) {
ofPixels R,G,B, copy;
copy.allocate(video.getWidth(), video.getHeight(), OF_PIXELS_RGB);
copy = video.getPixels();
pixelate(copy,pixelRatio);
}
示例8: update
void TTimbre::update(ofVideoGrabber input){
originalImage.setFromPixels(input.getPixels(), input.getWidth(), input.getHeight(), OF_IMAGE_COLOR);
internalUpdate();
}
示例9: update
void ofxOpticalFlowFarneback::update(ofVideoGrabber& source) {
update(source.getPixels().getData(), source.getWidth(), source.getHeight(), OF_IMAGE_COLOR); // assume colour image type.
}