本文整理汇总了C++中QVideoFrame类的典型用法代码示例。如果您正苦于以下问题:C++ QVideoFrame类的具体用法?C++ QVideoFrame怎么用?C++ QVideoFrame使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了QVideoFrame类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: qDebug
/*!
Stores the frame as member to allow it to be processed on paint.
Returns false when there is error, otherwise returns true.
*/
bool CustomCamera::updateFrame(const QVideoFrame &frame)
{
if (!frame.isValid()) {
qDebug() << "CustomCameras::updateFrame: Invalid frame";
return false;
}
if (m_processedFrameCounter != m_incomingFrameCounter) {
// Discard frame.
return true;
}
m_incomingFrameCounter++;
QVideoFrame f = frame;
if (f.map(QAbstractVideoBuffer::ReadOnly)) {
if (m_imageFrame.isNull() || m_imageFrame.width() != f.width() ||
m_imageFrame.height() != f.height()) {
m_imageFrame = QImage(f.width(), f.height(), QImage::Format_RGB32);
}
memcpy(m_imageFrame.bits(), f.bits(), f.mappedBytes());
f.unmap();
update();
}
return true;
}
示例2: painter
void DCameraView::paint(const QVideoFrame &frame)
{
QPainter painter(this);
QImage image(
frame.bits(),
frame.width(),
frame.height(),
frame.bytesPerLine(),
QVideoFrame::imageFormatFromPixelFormat(frame.pixelFormat()));
painter.drawImage(0, 0, image.mirrored(m_mirroredHorizontal, m_mirroredVertical));
}
示例3: QVERIFY
void tst_QVideoFrame::createNull()
{
QVideoFrame frame;
QVERIFY(!frame.isValid());
QCOMPARE(frame.handleType(), QAbstractVideoBuffer::NoHandle);
QCOMPARE(frame.pixelFormat(), QVideoFrame::Format_Invalid);
QCOMPARE(frame.size(), QSize());
QCOMPARE(frame.width(), -1);
QCOMPARE(frame.height(), -1);
QCOMPARE(frame.fieldType(), QVideoFrame::ProgressiveFrame);
QCOMPARE(frame.startTime(), qint64(-1));
QCOMPARE(frame.endTime(), qint64(-1));
}
示例4: mat
bool ChilitagsSurface::present(const QVideoFrame &frame) {
//qDebug("time: %d", m_timer.elapsed());
//qDebug("newFrame: %dx%d", frame.width(), frame.height());
//m_timer.restart();
QVideoFrame copy = frame;
//if (m_frame.pixelFormat() == QVideoFrame::Format_UYVY) {
if (copy.map(QAbstractVideoBuffer::ReadOnly)) {
const cv::Mat mat(copy.height(), copy.width(), CV_8UC3,
copy.bits());
if (m_videoSurface) {
m_converted.create(copy.height(), copy.width(), CV_8UC4);
cv::cvtColor(mat, m_converted, CV_BGR2RGBA);
}
m_item.setTags(m_chilitags.estimate(mat));
copy.unmap();
}
//qDebug("%lu tags", m_tags.size());
if (m_videoSurface) {
QImage image(m_converted.data,
m_converted.cols, m_converted.rows,
QImage::Format_ARGB32);
return m_videoSurface->present(QVideoFrame(image));
}
return true;
}
示例5: present
bool AndroidVideoSurface::present(const QVideoFrame &frame)
{
if (surfaceFormat().pixelFormat() != frame.pixelFormat()
|| surfaceFormat().frameSize() != frame.size()) {
setError(IncorrectFormatError);
stop();
return false;
} else {
paintLock.lock();
m_currentFrame = frame;
m_widget->update(m_targetRect);
paintLock.unlock();
return true;
}
}
示例6: qDebug
void MainWindow::onImageAvailable( int id, const QVideoFrame& buffer )
{
qDebug() << "Capture image available...";
QImage::Format imageFormat = QVideoFrame::imageFormatFromPixelFormat( buffer.pixelFormat() );
QImage img( buffer.bits(), buffer.width(), buffer.height(), buffer.bytesPerLine(), imageFormat );
QPixmap image = QPixmap::fromImage( img );
QLabel* l = new QLabel();
ui->tabWidget->addTab( l, QString( "%1" ).arg( id ) );
l->setPixmap( image );
l->show();
}
示例7: supportedPixelFormats
bool CaptureBuffer::present(const QVideoFrame &frame) {
QList<QVideoFrame::PixelFormat> formatos = supportedPixelFormats();
if (!formatos.contains(frame.pixelFormat())) {
return false;
} else {
// Copia del frame
QVideoFrame f(frame);
// Permitir copiar del buffer
f.map(QAbstractVideoBuffer::ReadOnly);
// Obtener imagen a partir del frame
QImage imagen = QImage(f.bits(),
f.width(),
f.height(),
f.bytesPerLine(),
QVideoFrame::imageFormatFromPixelFormat(f.pixelFormat()));
// Bloquear buffer
f.unmap();
// Emitir señal
emit transmitirImagen(imagen);
return true;
}
}
示例8: present
//! [4]
bool VideoWidgetSurface::present(const QVideoFrame &frame)
{
if (surfaceFormat().pixelFormat() != frame.pixelFormat()
|| surfaceFormat().frameSize() != frame.size()) {
setError(IncorrectFormatError);
stop();
return false;
} else {
currentFrame = frame;
widget->repaint(targetRect);
return true;
}
}
示例9: clonedFrame
bool WebcamCapture::present(QVideoFrame const & frame)
{
if (frame.isValid())
{
// QVideoFrame::map() is a non-const method, so we cannot call it on a const frame object.
// Therefore, we need to clone the original frame to get a non-const object.
//
QVideoFrame clonedFrame(frame);
if (clonedFrame.map(QAbstractVideoBuffer::ReadOnly))
{
QImage const frameImage(clonedFrame.bits(), clonedFrame.width(), clonedFrame.height(), QVideoFrame::imageFormatFromPixelFormat(clonedFrame.pixelFormat()));
// The previously constructed QImage object doesn't copy the data provided by QVideoFrame object.
// Instead of that, it relies on the validity of this data throughout its lifetime.
// Unfortunately, QVideoFrame will be destructed once it leaves this scope.
// Therefore, this situation forces to create a deep copy of the existing QImage object.
//
// Keeping in mind, that format RGBA8888 will later be used for QOpenGLTexture objects,
// this circumstance can be used to create the above mentioned deep copy. Additionally,
// the convertion is performed in advance, sparing some time in the main thread.
//
auto capturedImage = std::make_shared<QImage>(frameImage.convertToFormat(QImage::Format_RGBA8888));
emit imageCaptured(capturedImage);
clonedFrame.unmap();
return true;
}
}
return false;
}
示例10: cloneFrame
bool QCustomVideoSurface::present(const QVideoFrame &frame){
if(frame.isValid()) {
QVideoFrame cloneFrame(frame); // makes a shallow copy (since QVideoFrame is explicitly shared), to get the access to the pixel data
cloneFrame.map(QAbstractVideoBuffer::ReadOnly);
#ifdef Q_OS_ANDROID
cv::Mat mat(cloneFrame.height(), cloneFrame.width(), CV_8UC4, (void *)cloneFrame.bits());
emit frameAvailable(mat, QImage::Format_RGBX8888);
#else
QImage::Format format = QVideoFrame::imageFormatFromPixelFormat(cloneFrame.pixelFormat());
int cvtype = CV_8UC1;
switch(format) {
case QImage::Format_RGB32:
cvtype = CV_8UC4;
break;
case QImage::Format_RGB888:
cvtype = CV_8UC3;
break;
case QImage::Format_Invalid:
qWarning("QCustomVideoSurface Warning: image format is QImage::Format_Invalid");
return false;
default:
// TO DO add the new formats if find
qWarning("QCustomVideoSurface Warning: image format is not implemented (QImage::Format %d)", format);
return false;
}
cv::Mat mat(cloneFrame.height(), cloneFrame.width(), cvtype, (void *)cloneFrame.bits());
cv::flip(mat,mat,0);
emit frameAvailable(mat, format);
#endif
cloneFrame.unmap();
return true;
}
return false;
}
示例11: cloneFrame
bool MyVideoSurface::present(const QVideoFrame& frame){
if (frame.isValid()) {
QVideoFrame cloneFrame(frame);
cloneFrame.map(QAbstractVideoBuffer::ReadOnly);
img = QImage(cloneFrame.bits(),
cloneFrame.width(),
cloneFrame.height(),
QVideoFrame::imageFormatFromPixelFormat(cloneFrame.pixelFormat()));
//do something with the image ...
//img = &img1;
//counter++;
//if (counter % 100 == 0) {
counter = 0;
//qDebug() << "PrintImage";
QRect rect(400, 240, 800, 480);
img = img.copy(rect);
img = img.mirrored(true,false);
//qDebug() << img.width() << " " << img.height();
QImage image = show->fit500(&img);
show->setImage(image);
show->computeMostFitTemplateX(10);
GT.m_TV = show->getTV();
GT.m_image = image;
show->update();
//}
cloneFrame.unmap();
return true;
}
return false;
}
示例12: avpicture_fill
//如果转换成功,则调用者使用完 pOutFrame 后,需要调用 avpicture_free(pOutFrame) 释放内存
//成功返回0,不成功返回非0
int CTool::ConvertFormat(/*[in]*/ const QVideoFrame &inFrame,
/*[out]*/AVPicture &outFrame,
/*[in]*/ int nOutWidth,
/*[in]*/ int nOutHeight,
/*[in]*/ AVPixelFormat pixelFormat)
{
int nRet = 0;
AVPicture pic;
nRet = avpicture_fill(&pic, (uint8_t*) inFrame.bits(),
QVideoFrameFormatToFFMpegPixFormat(inFrame.pixelFormat()),
inFrame.width(),
inFrame.height());
if(nRet < 0)
{
LOG_MODEL_DEBUG("Tool", "avpicture_fill fail:%x", nRet);
return nRet;
}
nRet = ConvertFormat(pic, inFrame.width(), inFrame.height(),
QVideoFrameFormatToFFMpegPixFormat(inFrame.pixelFormat()),
outFrame, nOutWidth, nOutHeight, pixelFormat);
return nRet;
}
示例13: MFGetService
void D3DPresentEngine::presentSample(void *opaque, qint64)
{
HRESULT hr = S_OK;
IMFSample *sample = reinterpret_cast<IMFSample*>(opaque);
IMFMediaBuffer* buffer = NULL;
IDirect3DSurface9* surface = NULL;
if (m_surface && m_surface->isActive()) {
if (sample) {
// Get the buffer from the sample.
hr = sample->GetBufferByIndex(0, &buffer);
if (FAILED(hr))
goto done;
// Get the surface from the buffer.
hr = MFGetService(buffer, MR_BUFFER_SERVICE, IID_PPV_ARGS(&surface));
if (FAILED(hr))
goto done;
}
if (surface && updateTexture(surface)) {
QVideoFrame frame = QVideoFrame(new TextureVideoBuffer(m_glTexture),
m_surfaceFormat.frameSize(),
m_surfaceFormat.pixelFormat());
// WMF uses 100-nanosecond units, Qt uses microseconds
LONGLONG startTime = -1;
if (SUCCEEDED(sample->GetSampleTime(&startTime))) {
frame.setStartTime(startTime * 0.1);
LONGLONG duration = -1;
if (SUCCEEDED(sample->GetSampleDuration(&duration)))
frame.setEndTime((startTime + duration) * 0.1);
}
m_surface->present(frame);
}
}
done:
qt_wmf_safeRelease(&surface);
qt_wmf_safeRelease(&buffer);
qt_wmf_safeRelease(&sample);
}
示例14: QVideoFrame
void DataController::concatenateFrames(DataController::WhichFrame which, QVideoFrame frame)
{
if (!concatenatingFrameInitialized) {
int width = frame.width();
int height = frame.height();
//concatenatingImage = new QImage(width*2, height, QImage::Format_RGB888);
//concatenationPainter = new QPainter(concatenatingImage);
concatenatingFrame = new QVideoFrame(width * 2 * height * 3,
QSize(width*2,height), width*2, QVideoFrame::Format_RGB24);
qDebug() << "Creating a concatenating frame of size " << 2*width << " x " << height;
concatenatingFrameInitialized = true;
}
if (!frame.map(QAbstractVideoBuffer::ReadOnly))
qDebug() << "Failed to map current frame";
else {
if (!concatenatingFrame->map(QAbstractVideoBuffer::WriteOnly))
qDebug() << "Failed to map concatenating frame";
else {
//concatenationPainter->drawImage(frame.width() * (which==right),0,frame);
for (int i=0; i < frame.height(); i++)
memcpy(concatenatingFrame->bits() + concatenatingFrame->width()*3*i
+ frame.width()*3*(which==right),
frame.bits() + frame.width()*3*i, frame.width()*3);
concatenatingFrame->unmap();
if (frameConcatenationState == NOT_STARTED) {
frameConcatenationState = (which==left) ? LEFT_READY : RIGHT_READY;
} else if (frameConcatenationState == LEFT_READY) {
if (which == left)
qDebug() << "Two left frames received before a right frame";
else {
frameConcatenationState = NOT_STARTED;
emit newFrame(*concatenatingFrame);
}
} else if (frameConcatenationState == RIGHT_READY) {
if (which == right)
qDebug() << "Two right frames received before a right frame";
else {
frameConcatenationState = NOT_STARTED;
emit newFrame(*concatenatingFrame);
}
}
}
frame.unmap();
}
}
示例15: doRenderFrame
void QPxaVideoOutput::doRenderFrame( const QVideoFrame& frame )
{
//qWarning() << "QPxaVideoOutput::renderFrame" << geometry();
if ( frame.isNull() ) {
if ( d->overlay )
d->overlay->fill( 16,128,128 ); // yuv:black
return;
}
if ( frame.size() != d->videoSize ) {
d->videoSize = frame.size();
setModified(true);
}
//if something has changed, recalculate position of the image:
if ( isModified() ) {
setModified(false);
QRegion paintRegion = deviceMappedClipRegion();
QRect geometry = deviceMappedGeometry();
QSize imageSize = frame.size();
//respect frame aspect ratio
if ( frame.hasCustomAspectRatio() ) {
imageSize.setWidth( int(imageSize.height() * frame.aspectRatio()) );
}
switch ( effectiveRotation() ) {
case QtopiaVideo::Rotate0:
case QtopiaVideo::Rotate180:
break;
case QtopiaVideo::Rotate90:
case QtopiaVideo::Rotate270:
imageSize = QSize( imageSize.height(), imageSize.width() );
};
if ( scaleMode() == QtopiaVideo::FitWindow ) {
double scaleFactor = qMin( double(geometry.width())/imageSize.width(),
double(geometry.height())/imageSize.height() );
//don't scale if the size is close to required
if ( scaleFactor < 0.95 || scaleFactor > 1.1 ) {
imageSize *= scaleFactor;
}
}
d->imageRect = QRect( QPoint(0,0), imageSize );
d->imageRect.moveCenter( QPoint( geometry.width()/2, geometry.height()/2 ) );
if ( d->overlay )
d->overlay->fill( 16, 128, 128 );//black color in yuv
}
if ( d->overlay )
d->overlay->drawFrame( frame,
QRect( QPoint(0,0), frame.size() ),
d->imageRect,
effectiveRotation() );
}