本文整理汇总了C++中VideoFrame::displayAspectRatio方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrame::displayAspectRatio方法的具体用法?C++ VideoFrame::displayAspectRatio怎么用?C++ VideoFrame::displayAspectRatio使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoFrame
的用法示例。
在下文中一共展示了VideoFrame::displayAspectRatio方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: frame
VideoFrame VideoDecoderVDA::frame()
{
DPTR_D(VideoDecoderVDA);
CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3];
if (!cv_buffer) {
qDebug("Frame buffer is empty.");
return VideoFrame();
}
if (CVPixelBufferGetDataSize(cv_buffer) <= 0) {
qDebug("Empty frame buffer");
return VideoFrame();
}
VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer));
if (pixfmt == VideoFormat::Format_Invalid) {
qWarning("unsupported vda pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer));
return VideoFrame();
}
// we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture()
class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop {
bool glinterop;
CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed
public:
SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) {
//CVPixelBufferRetain(cvbuf);
}
~SurfaceInteropCVBuffer() {
CVPixelBufferRelease(cvbuf);
}
void* mapToHost(const VideoFormat &format, void *handle, int plane) {
Q_UNUSED(plane);
CVPixelBufferLockBaseAddress(cvbuf, 0);
const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf)));
if (!fmt.isValid()) {
CVPixelBufferUnlockBaseAddress(cvbuf, 0);
return NULL;
}
const int w = CVPixelBufferGetWidth(cvbuf);
const int h = CVPixelBufferGetHeight(cvbuf);
uint8_t *src[3];
int pitch[3];
for (int i = 0; i <fmt.planeCount(); ++i) {
// get address results in internal copy
src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i);
pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i);
}
CVPixelBufferUnlockBaseAddress(cvbuf, 0);
//CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed
VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch));
if (fmt != format)
frame = frame.to(format);
VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
frame.setTimestamp(f->timestamp());
frame.setDisplayAspectRatio(f->displayAspectRatio());
*f = frame;
return f;
}
示例2: convert
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.constBits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
m_cvt->setInRange(frame.colorRange());
const int pal = format.hasPalette();
QVector<const uchar*> pitch(format.planeCount() + pal);
QVector<int> stride(format.planeCount() + pal);
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.constBits(i);
stride[i] = frame.bytesPerLine(i);
}
const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray());
if (pal > 0) {
pitch[1] = (const uchar*)paldata.constData();
stride[1] = paldata.size();
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData());
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
f.setDisplayAspectRatio(frame.displayAspectRatio());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
return f;
}
示例3: mapToHost
void* InteropResource::mapToHost(const VideoFormat &format, void *handle, int picIndex, const CUVIDPROCPARAMS ¶m, int width, int height, int coded_height)
{
AutoCtxLock locker((cuda_api*)this, lock);
Q_UNUSED(locker);
CUdeviceptr devptr;
unsigned int pitch;
CUDA_ENSURE(cuvidMapVideoFrame(dec, picIndex, &devptr, &pitch, const_cast<CUVIDPROCPARAMS*>(¶m)), NULL);
CUVIDAutoUnmapper unmapper(this, dec, devptr);
Q_UNUSED(unmapper);
uchar* host_data = NULL;
const size_t host_size = pitch*coded_height*3/2;
CUDA_ENSURE(cuMemAllocHost((void**)&host_data, host_size), NULL);
// copy to the memory not allocated by cuda is possible but much slower
CUDA_ENSURE(cuMemcpyDtoH(host_data, devptr, host_size), NULL);
VideoFrame frame(width, height, VideoFormat::Format_NV12);
uchar *planes[] = {
host_data,
host_data + pitch * coded_height
};
frame.setBits(planes);
int pitches[] = { (int)pitch, (int)pitch };
frame.setBytesPerLine(pitches);
VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
frame.setTimestamp(f->timestamp());
frame.setDisplayAspectRatio(f->displayAspectRatio());
if (format == frame.format())
*f = frame.clone();
else
*f = frame.to(format);
cuMemFreeHost(host_data);
return f;
}