本文整理汇总了C++中CaptureRef::checkNewFrame方法的典型用法代码示例。如果您正苦于以下问题:C++ CaptureRef::checkNewFrame方法的具体用法?C++ CaptureRef::checkNewFrame怎么用?C++ CaptureRef::checkNewFrame使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CaptureRef
的用法示例。
在下文中一共展示了CaptureRef::checkNewFrame方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: update
void SlytherinApp::update() {
uint32_t elapsedFrames = getElapsedFrames();
bool needsFrame = mLastUpdateFrame == UINT32_MAX || (elapsedFrames - mLastUpdateFrame) * mLinesPerFrame >= 1.0f;
if (mCapture && needsFrame && mCapture->checkNewFrame()) {
uint32_t lineCount = (uint32_t)floorf((elapsedFrames - mLastUpdateFrame) * mLinesPerFrame);
Surface8u surface = mCapture->getSurface();
Texture texture = Texture(surface);
if (mLineIndex + lineCount < mFBO.getHeight()) {
// single segment
// mLineIndex to mLineIndex + lineCount
console() << "process (" << mLineIndex << "-" << mLineIndex+lineCount << ") on frame " << elapsedFrames << endl;
mLineIndex += lineCount;
} else {
// two segments
// mLineIndex to mFBO.getHeight() - 1
uint32_t overflowLineCount = mLineIndex + lineCount - mFBO.getHeight() + 1;
// 0 to overflowLineCount
console() << "process (" << mLineIndex << "-" << mFBO.getHeight() - 1 << ") and (0-" << overflowLineCount << ") on frame " << elapsedFrames << endl;
mLineIndex = overflowLineCount;
}
mLastUpdateFrame = elapsedFrames;
}
}
示例2: update
void CinderVideoStreamServerApp::update()
{
if( mCapture && mCapture->checkNewFrame() ) {
Surface8uRef surf = mCapture->getSurface();
#ifdef USE_JPEG_COMPRESSION
OStreamMemRef os = OStreamMem::create();
DataTargetRef target = DataTargetStream::createRef( os );
writeImage( target, *surf, ImageTarget::Options().quality(mQuality), "jpeg" );
const void *data = os->getBuffer();
size_t dataSize = os->tell();
totalStreamSize += dataSize;
BufferRef bufRef = Buffer::create(dataSize);
memcpy(bufRef->getData(), data, dataSize);
SurfaceRef jpeg = Surface::create(loadImage( DataSourceBuffer::create(bufRef)), SurfaceConstraintsDefault(), false );
queueToServer->push(jpeg->getData());
mTexture = gl::Texture::create( *jpeg );
mStatus.assign("Streaming JPG (")
.append(std::to_string((int)(mQuality*100.0f)))
.append("%) ")
.append(std::to_string((int)(totalStreamSize*0.001/getElapsedSeconds())))
.append(" kB/sec ")
.append(std::to_string((int)getFrameRate()))
.append(" fps ");
#else
queueToServer->push(surf->getData());
mTexture = gl::Texture::create( *surf );
mStatus.assign("Streaming ").append(std::to_string((int)getFrameRate())).append(" fps");
#endif
}
}
示例3: update
void projections_balletApp::update(){
if( mCapture && mCapture->checkNewFrame() ) {
mTexture = gl::Texture::create( mCapture->getSurface() );
}
}
示例4: update
void HexagonMirrorApp::update()
{
// update webcam image
if( mCapture && mCapture->checkNewFrame() )
mCaptureTexture = gl::Texture( mCapture->getSurface() );
else
mCaptureTexture = mDummyTexture;
}
示例5: update
void CinderCalibrationApp::update()
{
if ( mCapture->checkNewFrame() ) {
mCaptureSurf = mCapture->getSurface();
mCaptureTex = gl::Texture::create( mCaptureSurf );
mCaptureMat = toOcv( mCaptureSurf );
collectImages();
switch ( mState ) {
case STATE_CALIBRATING:
if ( callibrate() ) {
mState = STATE_CALIBRATED;
} else {
console() << "Calibration failed." << endl;
exit(1);
}
case STATE_CALIBRATED:
undistort( mCaptureMat, mUndistortedMat, intrinsic, distortion );
}
}
}
示例6: update
void camerasApp::update()
{
if( mCapture && mCapture->checkNewFrame() ) {
mTexture = gl::Texture::create( mCapture->getSurface());
}
}
示例7: update
void WayFinderApp::update()
{
if(getElapsedFrames() % FRAME_COUNT_THRESHOLD == 0) {
detected = false;
// TODO: Consider converting capture to grayscale or blurring then thresholding to improve performance.
if(capture && capture->checkNewFrame()) {
frame = toOcv(capture->getSurface());
//cv::Mat frameGray, frameBlurred, frameThresh, foreGray, backGray;
//cvtColor(frame, frameGray, CV_BGR2GRAY);
int blurAmount = 10;
//cv::blur(frame, frameBlurred, cv::Size(blurAmount, blurAmount));
//threshold(frameBlurred, frameThresh, 100, 255, CV_THRESH_BINARY);
// Get all contours.
//bg.operator()(frameThresh,fore);
bg.operator()(frame, fore);
bg.getBackgroundImage(back);
cv::erode(fore, fore, cv::Mat());
cv::dilate(fore, fore, cv::Mat());
cv::findContours(fore, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
// Get largest contour: http://stackoverflow.com/questions/15012073/opencv-draw-draw-contours-of-2-largest-objects
unsigned largestIndex = 0;
unsigned largestContour = 0;
for(unsigned i = 0; i < contours.size(); i++) {
if(contours[i].size() > largestContour) {
largestContour = contours[i].size();
largestIndex = i;
}
}
vector<std::vector<cv::Point>> hack;
cv::Rect rect;
cv::Point center;
if(contours.size() > 0) {
hack.push_back(contours[largestIndex]);
// Find bounding rectangle for largest countour.
rect = boundingRect(contours[largestIndex]);
// Make sure the blog is large enough to be a track-worthy.
println("Rext area = " + boost::lexical_cast<std::string>(rect.area()));
if(rect.area() >= 5000) { // TODO: Tweak this value.
// Get center of rectangle.
center = cv::Point(
rect.x + (rect.width / 2),
rect.y + (rect.height / 2)
);
// Show guide.
spotlightCenter2D.x = (float)center.x;
spotlightCenter2D.y = (float)center.y;
spotlightCenter3D.x = (float)center.x;
spotlightCenter3D.y = (float)center.y;
//spotlightRadius = (rect.width + rect.y) / 2;
detected = true;
}
}
// When debug mode is off, the background should be black.
if(debugView) {
if(contours.size() > 0) {
cv::drawContours(frame, contours, -1, cv::Scalar(0, 0, 255), 2);
cv::drawContours(frame, hack, -1, cv::Scalar(255, 0, 0), 2);
rectangle(frame, rect, cv::Scalar(0, 255, 0), 3);
circle(frame, center, 10, cv::Scalar(0, 255, 0), 3);
}
mTexture = gl::Texture(fromOcv(frame));
}
}
// TODO: Create control panel for all inputs.
}
}