本文整理汇总了C++中VideoFrame::getWidth方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrame::getWidth方法的具体用法?C++ VideoFrame::getWidth怎么用?C++ VideoFrame::getWidth使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoFrame
的用法示例。
在下文中一共展示了VideoFrame::getWidth方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: draw
void RecLoopRender::draw(){
VideoFrame * bufferFrame = buffer->getNextVideoFrame();
VideoFrame * liveFrame = live->getNextVideoFrame();
if(bufferFrame!=NULL && liveFrame!=NULL){
if(!imageAllocated){
image.allocate(liveFrame->getWidth(),liveFrame->getHeight(),OF_IMAGE_COLOR);
imageAllocated=true;
}
ofEnableAlphaBlending();
ofSetColor(tintR,tintG,tintB,alpha);
if(minmaxBlend)
glBlendEquationEXT(GL_MAX);
else
glBlendEquationEXT(GL_MIN);
liveFrame->getTextureRef().draw(0,0);
if(!stopped){
bufferFrame->getTextureRef().draw(0,0);
image.grabScreen(0,0,liveFrame->getWidth(),liveFrame->getHeight());
bufferFrame->getTextureRef().loadData(image.getPixelsRef());
}
liveFrame->release();
bufferFrame->release();
ofDisableAlphaBlending();
glBlendEquationEXT(GL_FUNC_ADD);
}
}
示例2: render
void Window::render(const VideoFrame& frame)
{
LogDebug("Rendering frame " << frame.getId());
glClear(GL_COLOR_BUFFER_BIT);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);
// TODO: consider linesize padding here
// TODO: use glTexSubImage2D for more performance
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RED,
frame.getWidth(),
frame.getHeight(),
0,
GL_RED,
GL_UNSIGNED_BYTE,
frame.getLumaData());
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, (void*) 0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(0);
glfwSwapBuffers(glfwWindow);
}
示例3:
bool VideoEncoderX264or5::doProcessFrame(Frame *org, Frame *dst)
{
if (!(org && dst)) {
utils::errorMsg("Error encoding video frame: org or dst are NULL");
return false;
}
VideoFrame* rawFrame = dynamic_cast<VideoFrame*> (org);
VideoFrame* codedFrame = dynamic_cast<VideoFrame*> (dst);
if (!rawFrame || !codedFrame) {
utils::errorMsg("Error encoding video frame: org and dst MUST be VideoFrame");
return false;
}
if (!reconfigure(rawFrame, codedFrame)) {
utils::errorMsg("Error encoding video frame: reconfigure failed");
return false;
}
if (!fill_x264or5_picture(rawFrame)){
utils::errorMsg("Could not fill x264_picture_t from frame");
return false;
}
if (!encodeFrame(codedFrame)) {
utils::errorMsg("Could not encode video frame");
return false;
}
codedFrame->setSize(rawFrame->getWidth(), rawFrame->getHeight());
return true;
}
示例4: render_frame
void SHMSink::render_frame(VideoFrame& src)
{
VideoFrame dst;
VideoScaler scaler;
const int width = src.getWidth();
const int height = src.getHeight();
const int format = VIDEO_PIXFMT_BGRA;
size_t bytes = dst.getSize(width, height, format);
shm_lock();
if (!resize_area(sizeof(SHMHeader) + bytes)) {
ERROR("Could not resize area");
return;
}
dst.setDestination(shm_area_->data, width, height, format);
scaler.scale(src, dst);
#ifdef DEBUG_FPS
const std::chrono::time_point<std::chrono::system_clock> currentTime = std::chrono::system_clock::now();
const std::chrono::duration<double> seconds = currentTime - lastFrameDebug_;
frameCount_++;
if (seconds.count() > 1) {
DEBUG("%s: FPS %f", shm_name_.c_str(), frameCount_ / seconds.count());
frameCount_ = 0;
lastFrameDebug_ = currentTime;
}
#endif
shm_area_->buffer_size = bytes;
shm_area_->buffer_gen++;
sem_post(&shm_area_->notification);
shm_unlock();
}