本文整理汇总了C++中VideoFrame类的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrame类的具体用法?C++ VideoFrame怎么用?C++ VideoFrame使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoFrame类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: clear_if
VideoFrame VideoDecoderContext::decodeVideo(OptionalErrorCode ec, const Packet &packet, size_t offset, size_t *decodedBytes, bool autoAllocateFrame)
{
clear_if(ec);
VideoFrame outFrame;
if (!autoAllocateFrame)
{
outFrame = {pixelFormat(), width(), height(), 32};
if (!outFrame.isValid())
{
throws_if(ec, Errors::FrameInvalid);
return VideoFrame();
}
}
int gotFrame = 0;
auto st = decodeCommon(outFrame, packet, offset, gotFrame, avcodec_decode_video_legacy);
if (get<1>(st)) {
throws_if(ec, get<0>(st), *get<1>(st));
return VideoFrame();
}
if (!gotFrame)
return VideoFrame();
outFrame.setPictureType(AV_PICTURE_TYPE_I);
if (decodedBytes)
*decodedBytes = get<0>(st);
return outFrame;
}
示例2: newVideoFrame
VideoFrame VideoFrame::newVideoFrame(VideoFrame videoFrame){
if(videoFrame.data->createdTexPixels){
return newVideoFrame(videoFrame.getPixelsRef());
}else{
return newVideoFrame(videoFrame.getFboRef());
}
}
示例3: locker
void VideoBuffers::CheckDecodedFrames(void)
{
QMutexLocker locker(m_lock);
QList<VideoFrame*> recovered;
QList<VideoFrame*>::iterator it = m_reference.begin();
for ( ; it != m_reference.end(); ++it)
if (!m_decoded.contains((*it)))
recovered.append((*it));
while (!recovered.isEmpty())
{
VideoFrame* frame = recovered.takeFirst();
m_reference.removeOne(frame);
if (frame->Discard())
{
delete frame;
m_frameCount--;
}
else
{
m_unused.append(frame);
}
}
}
示例4:
bool VideoEncoderX264or5::doProcessFrame(Frame *org, Frame *dst)
{
if (!(org && dst)) {
utils::errorMsg("Error encoding video frame: org or dst are NULL");
return false;
}
VideoFrame* rawFrame = dynamic_cast<VideoFrame*> (org);
VideoFrame* codedFrame = dynamic_cast<VideoFrame*> (dst);
if (!rawFrame || !codedFrame) {
utils::errorMsg("Error encoding video frame: org and dst MUST be VideoFrame");
return false;
}
if (!reconfigure(rawFrame, codedFrame)) {
utils::errorMsg("Error encoding video frame: reconfigure failed");
return false;
}
if (!fill_x264or5_picture(rawFrame)){
utils::errorMsg("Could not fill x264_picture_t from frame");
return false;
}
if (!encodeFrame(codedFrame)) {
utils::errorMsg("Could not encode video frame");
return false;
}
codedFrame->setSize(rawFrame->getWidth(), rawFrame->getHeight());
return true;
}
示例5: addFramesToDeinterlace
void DiscardDeint::filter(QQueue< FrameBuffer > &framesQueue)
{
int insertAt = addFramesToDeinterlace(framesQueue);
while (!internalQueue.isEmpty())
{
FrameBuffer dequeued = internalQueue.dequeue();
VideoFrame *videoFrame = VideoFrame::fromData(dequeued.data);
const bool TFF = isTopFieldFirst(videoFrame);
videoFrame->setNoInterlaced();
for (int p = 0; p < 3; ++p)
{
const int linesize = videoFrame->linesize[p];
quint8 *src = videoFrame->data[p];
quint8 *dst = videoFrame->data[p];
const int lines = (p ? h >> 2 : h >> 1) - 1;
if (!TFF)
{
memcpy(dst, src + linesize, linesize);
src += linesize;
dst += linesize;
}
dst += linesize;
src += linesize;
for (int i = 0; i < lines; ++i)
{
VideoFilters::averageTwoLines(dst, src - linesize, src + linesize, linesize);
src += linesize << 1;
dst += linesize << 1;
}
if (TFF)
memcpy(dst, src - linesize, linesize);
}
framesQueue.insert(insertAt++, dequeued);
}
}
示例6: drawNextFrame
void VideoRenderer::drawNextFrame(){
VideoFrame * frame = source->getNextVideoFrame();
if(frame!=NULL){
frame->getTextureRef().draw(0,0);
frame->release();
}
}
示例7: render
void Window::render(const VideoFrame& frame)
{
LogDebug("Rendering frame " << frame.getId());
glClear(GL_COLOR_BUFFER_BIT);
glEnableVertexAttribArray(0);
glBindBuffer(GL_ARRAY_BUFFER, vertexBuffer);
glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);
glEnableVertexAttribArray(1);
glBindBuffer(GL_ARRAY_BUFFER, uvBuffer);
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 0, (void*) 0);
// TODO: consider linesize padding here
// TODO: use glTexSubImage2D for more performance
glTexImage2D(GL_TEXTURE_2D,
0,
GL_RED,
frame.getWidth(),
frame.getHeight(),
0,
GL_RED,
GL_UNSIGNED_BYTE,
frame.getLumaData());
glDrawElements(GL_TRIANGLES, 6, GL_UNSIGNED_INT, (void*) 0);
glDisableVertexAttribArray(1);
glDisableVertexAttribArray(0);
glfwSwapBuffers(glfwWindow);
}
示例8: upload
bool VaApiMixer::upload(const VideoFrame &frame, bool deint) {
if (!m_glSurface)
return false;
static const int specs[MP_CSP_COUNT] = {
0, //MP_CSP_AUTO,
VA_SRC_BT601, //MP_CSP_BT_601,
VA_SRC_BT709, //MP_CSP_BT_709,
VA_SRC_SMPTE_240, //MP_CSP_SMPTE_240M,
0, //MP_CSP_RGB,
0, //MP_CSP_XYZ,
0, //MP_CSP_YCGCO,
};
static const int field[] = {
// Picture = 0, Top = 1, Bottom = 2
VA_FRAME_PICTURE, VA_TOP_FIELD, VA_BOTTOM_FIELD, VA_FRAME_PICTURE
};
const auto id = (VASurfaceID)(quintptr)frame.data(3);
int flags = specs[frame.format().colorspace()];
if (deint)
flags |= field[frame.field() & VideoFrame::Interlaced];
if (!check(vaCopySurfaceGLX(VaApi::glx(), m_glSurface, id, flags), "Cannot copy OpenGL surface."))
return false;
if (!check(vaSyncSurface(VaApi::glx(), id), "Cannot sync video surface."))
return false;
return true;
}
示例9: preparePixmap
bool QPainterRenderer::preparePixmap(const VideoFrame &frame)
{
DPTR_D(QPainterRenderer);
// already locked in a larger scope of receive()
QImage::Format imgfmt = frame.imageFormat();
if (frame.constBits(0)) {
d.video_frame = frame;
} else {
if (imgfmt == QImage::Format_Invalid) {
d.video_frame = frame.to(VideoFormat::Format_RGB32);
imgfmt = d.video_frame.imageFormat();
} else {
d.video_frame = frame.to(frame.pixelFormat());
}
}
const bool swapRGB = (int)imgfmt < 0;
if (swapRGB) {
imgfmt = (QImage::Format)(-imgfmt);
}
// DO NOT use frameData().data() because it's temp ptr while d.image does not deep copy the data
QImage image = QImage((uchar*)d.video_frame.constBits(), d.video_frame.width(), d.video_frame.height(), d.video_frame.bytesPerLine(), imgfmt);
if (swapRGB)
image = image.rgbSwapped();
d.pixmap = QPixmap::fromImage(image);
//Format_RGB32 is fast. see document
return true;
}
示例10: float
void avLooperRenderer::draw(int x,int y,int w,int h)
{
// audio -> video Sync !!
//////////////////////////
// 1
//VideoFrame * frame = vHeader.getVideoFrame(int(float(aHeader2.getIndex())/float(aBuffer->sizeInSamples()))*vBuffer->getMaxSize());
//printf("index %d of size %d = %d\n",aHeader2.getIndex(),aBuffer->sizeInSamples(),int(float(aHeader2.getIndex())/float(aBuffer->sizeInSamples()))*vBuffer->getMaxSize());
// 2
float delayToVideo = (float(aHeader2.getIndex()) / float(audioSampleRate)) * 1000.0;
vHeader.setDelayMs(float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs));
//printf("avR ::DELAY is = %f || maxSize %d delayToVideo in ms = %f / index %d\n",float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs),maximumSizeInMs,delayToVideo,aHeader2.getIndex());
//printf("AVLR:: videoDelayMs :: %f \n",float(maximumSizeInMs)-delayToVideo-float(videoOffsetInMs));
VideoFrame frame = vHeader.getNextVideoFrame();
if(frame!=NULL){
// draw the frame texture to screen
ofSetColor(vHeader.getOpacity(),vHeader.getOpacity(),vHeader.getOpacity());
frame.getTextureRef().draw(x,y,w,h);
}
// draw av header interfaces
aBuffer->draw();
vBuffer->draw();
vHeader.draw();
aHeader2.draw();
ofSetColor(255,255,255);
}
示例11: VideoDecoder
void VideoReader::init()
{
// analyse InputFile
avtranscoder::NoDisplayProgress p;
_inputFile->analyse( p );
_streamProperties = &_inputFile->getProperties().getStreamPropertiesWithIndex(_streamIndex);
_videoStreamProperties = static_cast<const VideoProperties*>(_streamProperties);
_inputFile->activateStream( _streamIndex );
// setup decoder
_decoder = new VideoDecoder( _inputFile->getStream( _streamIndex ) );
_decoder->setupDecoder();
// create src frame
_srcFrame = new VideoFrame( _inputFile->getStream( _streamIndex ).getVideoCodec().getVideoFrameDesc() );
VideoFrame* srcFrame = static_cast<VideoFrame*>(_srcFrame);
// create dst frame
if( _width == 0 )
_width = srcFrame->desc().getWidth();
if( _height == 0 )
_height = srcFrame->desc().getHeight();
VideoFrameDesc videoFrameDescToDisplay( _width, _height, getPixelFormat() );
_dstFrame = new VideoFrame( videoFrameDescToDisplay );
// create transform
_transform = new VideoTransform();
}
示例12: main
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
FrameReader r;
r.setMedia(a.arguments().last());
QQueue<qint64> t;
int count = 0;
qint64 t0 = QDateTime::currentMSecsSinceEpoch();
while (r.readMore()) {
while (r.hasEnoughVideoFrames()) {
const VideoFrame f = r.getVideoFrame(); //TODO: if eof
if (!f)
continue;
count++;
//r.readMore();
const qint64 now = QDateTime::currentMSecsSinceEpoch();
const qint64 dt = now - t0;
t.enqueue(now);
printf("decode @%.3f count: %d, elapsed: %lld, fps: %.1f/%.1f\r", f.timestamp(), count, dt, count*1000.0/dt, t.size()*1000.0/(now - t.first()));fflush(0);
if (t.size() > 10)
t.dequeue();
}
}
while (r.hasVideoFrame()) {
const VideoFrame f = r.getVideoFrame();
qDebug("pts: %.3f", f.timestamp());
}
qDebug("read done");
return 0;
}
示例13: videoFrameSize
void
ShmHolder::renderFrame(VideoFrame& src) noexcept
{
const auto width = src.width();
const auto height = src.height();
const auto format = VIDEO_PIXFMT_BGRA;
const auto frameSize = videoFrameSize(format, width, height);
if (!resizeArea(frameSize)) {
RING_ERR("ShmHolder[%s]: could not resize area",
openedName_.c_str());
return;
}
{
VideoFrame dst;
VideoScaler scaler;
dst.setFromMemory(area_->data + area_->writeOffset, format, width, height);
scaler.scale(src, dst);
}
{
SemGuardLock lk {area_->mutex};
++area_->frameGen;
std::swap(area_->readOffset, area_->writeOffset);
::sem_post(&area_->frameGenMutex);
}
}
示例14: pushNewVideoFrame
void VideoBuffer::pushNewVideoFrame(VideoFrame & frame){
int64_t time = frame.getTimestamp().epochMicroseconds();
if(microsOneSec==-1) microsOneSec=time;
framesOneSec++;
int64_t diff = time-microsOneSec;
if(diff>=1000000){
realFps = double(framesOneSec*1000000.)/double(diff);
framesOneSec = 0;
microsOneSec = time-(diff-1000000);
}
totalFrames++;
if(size()==0)initTime=frame.getTimestamp();
//timeMutex.lock();
if (size() >= maxSize) {
// THIS LINE IS GIVING ME CRASHES SOMETIMES ..... SERIOUS WTF : if i dont see this happen again its fixed
frames[ofClamp(framePos, 0, size()-1)] = frame; // Here we use the framePos variable to specify where new frames
// should be stored in the video buffer instead of using the vector push_back call.
}
else if (size() < maxSize) {
frames.push_back(frame);
}
while(size() > maxSize){
frames.erase(frames.begin()+framePos);
}
}
示例15: getNextVideoFrame
void FileGrabber::update(){
ofVideoPlayer::update();
if(isFrameNew()){
VideoFrame * frame = getNextVideoFrame();
newFrameEvent.notify(this,*frame);
frame->release();
}
}