本文整理汇总了C++中VideoFrame::setBits方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrame::setBits方法的具体用法?C++ VideoFrame::setBits怎么用?C++ VideoFrame::setBits使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoFrame
的用法示例。
在下文中一共展示了VideoFrame::setBits方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: copyToFrame
VideoFrame VideoDecoderFFmpegHW::copyToFrame(const VideoFormat& fmt, int surface_h, quint8 *src[], int pitch[], bool swapUV)
{
DPTR_D(VideoDecoderFFmpegHW);
Q_ASSERT_X(src[0] && pitch[0] > 0, "VideoDecoderFFmpegHW::copyToFrame", "src[0] and pitch[0] must be set");
const int nb_planes = fmt.planeCount();
const int chroma_pitch = nb_planes > 1 ? fmt.bytesPerLine(pitch[0], 1) : 0;
const int chroma_h = fmt.chromaHeight(surface_h);
int h[] = { surface_h, 0, 0};
for (int i = 1; i < nb_planes; ++i) {
h[i] = chroma_h;
// set chroma address and pitch if not set
if (pitch[i] <= 0)
pitch[i] = chroma_pitch;
if (!src[i])
src[i] = src[i-1] + pitch[i-1]*h[i-1];
}
if (swapUV) {
std::swap(src[1], src[2]);
std::swap(pitch[1], pitch[2]);
}
VideoFrame frame;
if (copyMode() == VideoDecoderFFmpegHW::OptimizedCopy && d.gpu_mem.isReady()) {
int yuv_size = 0;
for (int i = 0; i < nb_planes; ++i) {
yuv_size += pitch[i]*h[i];
}
// additional 15 bytes to ensure 16 bytes aligned
QByteArray buf(15 + yuv_size, 0);
const int offset_16 = (16 - ((uintptr_t)buf.data() & 0x0f)) & 0x0f;
// plane 1, 2... is aligned?
uchar* plane_ptr = (uchar*)buf.data() + offset_16;
QVector<uchar*> dst(nb_planes, 0);
for (int i = 0; i < nb_planes; ++i) {
dst[i] = plane_ptr;
// TODO: add VideoFormat::planeWidth/Height() ?
// pitch instead of surface_width
plane_ptr += pitch[i] * h[i];
d.gpu_mem.copyFrame(src[i], dst[i], pitch[i], h[i], pitch[i]);
}
frame = VideoFrame(buf, width(), height(), fmt);
frame.setBits(dst);
frame.setBytesPerLine(pitch);
} else {
frame = VideoFrame(width(), height(), fmt);
frame.setBits(src);
frame.setBytesPerLine(pitch);
// TODO: why clone is faster()?
// TODO: buffer pool and create VideoFrame when needed to avoid copy? also for other va
frame = frame.clone();
}
frame.setTimestamp(double(d.frame->pkt_pts)/1000.0);
frame.setDisplayAspectRatio(d.getDAR(d.frame));
d.updateColorDetails(&frame);
return frame;
}
示例2: frame
//.........这里部分代码省略.........
case VA_FOURCC_NV12:
pixfmt = VideoFormat::Format_NV12;
break;
default:
break;
}
if (pixfmt == VideoFormat::Format_Invalid) {
qWarning("unsupported vaapi pixel format: %#x", d.image.format.fourcc);
return VideoFrame();
}
const VideoFormat fmt(pixfmt);
uint8_t *src[3];
int pitch[3];
for (int i = 0; i < fmt.planeCount(); ++i) {
src[i] = (uint8_t*)p_base + d.image.offsets[i];
pitch[i] = d.image.pitches[i];
}
if (swap_uv) {
std::swap(src[1], src[2]);
std::swap(pitch[1], pitch[2]);
}
VideoFrame frame;
if (d.copy_uswc && d.gpu_mem.isReady()) {
int yuv_size = 0;
if (pixfmt == VideoFormat::Format_NV12)
yuv_size = pitch[0]*d.surface_height*3/2;
else
yuv_size = pitch[0]*d.surface_height + pitch[1]*d.surface_height/2 + pitch[2]*d.surface_height/2;
// additional 15 bytes to ensure 16 bytes aligned
QByteArray buf(15 + yuv_size, 0);
const int offset_16 = (16 - ((uintptr_t)buf.data() & 0x0f)) & 0x0f;
// plane 1, 2... is aligned?
uchar* plane_ptr = (uchar*)buf.data() + offset_16;
QVector<uchar*> dst(fmt.planeCount(), 0);
for (int i = 0; i < dst.size(); ++i) {
dst[i] = plane_ptr;
// TODO: add VideoFormat::planeWidth/Height() ?
const int plane_w = pitch[i];//(i == 0 || pixfmt == VideoFormat::Format_NV12) ? d.surface_width : fmt.chromaWidth(d.surface_width);
const int plane_h = i == 0 ? d.surface_height : fmt.chromaHeight(d.surface_height);
plane_ptr += pitch[i] * plane_h;
d.gpu_mem.copyFrame(src[i], dst[i], plane_w, plane_h, pitch[i]);
}
frame = VideoFrame(buf, d.width, d.height, fmt);
frame.setBits(dst);
frame.setBytesPerLine(pitch);
} else {
frame = VideoFrame(d.width, d.height, fmt);
frame.setBits(src);
frame.setBytesPerLine(pitch);
// TODO: why clone is faster()?
frame = frame.clone();
}
if ((status = vaUnmapBuffer(d.display, d.image.buf)) != VA_STATUS_SUCCESS) {
qWarning("vaUnmapBuffer(VADisplay:%p, VABufferID:%#x) == %#x", d.display, d.image.buf, status);
return VideoFrame();
}
if (!d.disable_derive && d.supports_derive) {
vaDestroyImage(d.display, d.image.image_id);
d.image.image_id = VA_INVALID_ID;
}
return frame;
}
struct display_names_t {
VideoDecoderVAAPI::DisplayType display;
QString name;
};
static const display_names_t display_names[] = {
{ VideoDecoderVAAPI::GLX, "GLX" },
{ VideoDecoderVAAPI::X11, "X11" },
{ VideoDecoderVAAPI::DRM, "DRM" }
};
static VideoDecoderVAAPI::DisplayType displayFromName(QString name) {
for (unsigned int i = 0; i < sizeof(display_names)/sizeof(display_names[0]); ++i) {
if (name.toUpper().contains(display_names[i].name.toUpper())) {
return display_names[i].display;
}
}
return VideoDecoderVAAPI::X11;
}
static QString displayToName(VideoDecoderVAAPI::DisplayType t) {
for (unsigned int i = 0; i < sizeof(display_names)/sizeof(display_names[0]); ++i) {
if (t == display_names[i].display) {
return display_names[i].name;
}
}
return QString();
}
void VideoDecoderVAAPI::setDisplayPriority(const QStringList &priority)
{
DPTR_D(VideoDecoderVAAPI);
d.display_priority.clear();
foreach (QString disp, priority) {
d.display_priority.push_back(displayFromName(disp));
}