本文整理汇总了C++中VideoFrame::format方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrame::format方法的具体用法?C++ VideoFrame::format怎么用?C++ VideoFrame::format使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoFrame
的用法示例。
在下文中一共展示了VideoFrame::format方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: tryGraph
void tryGraph() {
if (type != Graph || !graph.initialize(option, in->format().size(), in->format().imgfmt())
|| !graph.push(in->mpi()))
return;
while (auto out = graph.pull())
push(out);
}
示例2: setCurrentFrame
void VideoMaterial::setCurrentFrame(const VideoFrame &frame)
{
DPTR_D(VideoMaterial);
d.update_texure = true;
d.bpp = frame.format().bitsPerPixel(0);
d.width = frame.width();
d.height = frame.height();
const VideoFormat fmt(frame.format());
// http://forum.doom9.org/archive/index.php/t-160211.html
ColorTransform::ColorSpace cs = ColorTransform::RGB;
if (fmt.isRGB()) {
if (fmt.isPlanar())
cs = ColorTransform::GBR;
} else {
if (frame.width() >= 1280 || frame.height() > 576) //values from mpv
cs = ColorTransform::BT709;
else
cs = ColorTransform::BT601;
}
d.colorTransform.setInputColorSpace(cs);
d.frame = frame;
if (fmt != d.video_format) {
qDebug("pixel format changed: %s => %s", qPrintable(d.video_format.name()), qPrintable(fmt.name()));
d.video_format = fmt;
}
}
示例3: tryPostProc
void tryPostProc() {
if (type != PP || !pp.initialize(option, in->format().size(), in->format().imgfmt()))
return;
const bool topFirst = in->mpi()->fields & MP_IMGFIELD_TOP_FIRST;
push(topFirst ? topField() : bottomField());
if (deint.doubler)
push(!topFirst ? topField() : bottomField());
}
示例4: push
void push(mp_image *mpi) {
mpi->colorspace = in->format().colorspace();
mpi->levels = in->format().range();
mpi->display_w = in->format().displaySize().width();
mpi->display_h = in->format().displaySize().height();
mpi->pts = p->nextPTS();
queue->push_back(VideoFrame(true, mpi, in->field()));
++pushed;
}
示例5: upload
bool VaApiMixer::upload(const VideoFrame &frame, bool deint) {
if (!m_glSurface)
return false;
static const int specs[MP_CSP_COUNT] = {
0, //MP_CSP_AUTO,
VA_SRC_BT601, //MP_CSP_BT_601,
VA_SRC_BT709, //MP_CSP_BT_709,
VA_SRC_SMPTE_240, //MP_CSP_SMPTE_240M,
0, //MP_CSP_RGB,
0, //MP_CSP_XYZ,
0, //MP_CSP_YCGCO,
};
static const int field[] = {
// Picture = 0, Top = 1, Bottom = 2
VA_FRAME_PICTURE, VA_TOP_FIELD, VA_BOTTOM_FIELD, VA_FRAME_PICTURE
};
const auto id = (VASurfaceID)(quintptr)frame.data(3);
int flags = specs[frame.format().colorspace()];
if (deint)
flags |= field[frame.field() & VideoFrame::Interlaced];
if (!check(vaCopySurfaceGLX(VaApi::glx(), m_glSurface, id, flags), "Cannot copy OpenGL surface."))
return false;
if (!check(vaSyncSurface(VaApi::glx(), id), "Cannot sync video surface."))
return false;
return true;
}
示例6: setCurrentFrame
void VideoMaterial::setCurrentFrame(const VideoFrame &frame)
{
DPTR_D(VideoMaterial);
// TODO: lock?
d.frame = frame;
d.bpp = frame.format().bitsPerPixel(0);
// http://forum.doom9.org/archive/index.php/t-160211.html
ColorTransform::ColorSpace cs = ColorTransform::RGB;
if (!frame.format().isRGB()) {
if (d.frame.width() >= 1280 || d.frame.height() > 576) //values from mpv
cs = ColorTransform::BT709;
else
cs = ColorTransform::BT601;
}
d.colorTransform.setInputColorSpace(cs);
d.update_texure = true;
}
示例7: encode
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
DPTR_D(VideoEncoderFFmpeg);
AVFrame *f = NULL;
if (frame.isValid()) {
f = av_frame_alloc();
f->format = frame.format().pixelFormatFFmpeg();
f->width = frame.width();
f->height = frame.height();
// f->quality = d.avctx->global_quality;
switch (timestampMode()) {
case TimestampCopy:
f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
break;
case TimestampMonotonic:
f->pts = d.nb_encoded+1;
break;
default:
break;
}
// pts is set in muxer
const int nb_planes = frame.planeCount();
for (int i = 0; i < nb_planes; ++i) {
f->linesize[i] = frame.bytesPerLine(i);
f->data[i] = (uint8_t*)frame.constBits(i);
}
if (d.avctx->width <= 0) {
d.avctx->width = frame.width();
}
if (d.avctx->height <= 0) {
d.avctx->height = frame.width();
}
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = (uint8_t*)d.buffer.constData();
pkt.size = d.buffer.size();
int got_packet = 0;
int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
av_frame_free(&f);
if (ret < 0) {
qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
return false; //false
}
d.nb_encoded++;
if (!got_packet) {
qWarning("no packet got");
d.packet = Packet();
// invalid frame means eof
return frame.isValid();
}
// qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
// qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
return true;
}
示例8: doDeepCopy
void VideoFrame::doDeepCopy(const VideoFrame &frame) {
d.detach();
Q_ASSERT(d->format == frame.format());
auto p = d->buffer.data();
for (int i=0; i<d->format.planes(); ++i) {
const int len = d->format.bytesPerPlain(i);
memcpy(p, frame.data(i), len);
p += len;
}
}
示例9: convert
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.constBits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
m_cvt->setInRange(frame.colorRange());
const int pal = format.hasPalette();
QVector<const uchar*> pitch(format.planeCount() + pal);
QVector<int> stride(format.planeCount() + pal);
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.constBits(i);
stride[i] = frame.bytesPerLine(i);
}
const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray());
if (pal > 0) {
pitch[1] = (const uchar*)paldata.constData();
stride[1] = paldata.size();
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData());
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
f.setDisplayAspectRatio(frame.displayAspectRatio());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
return f;
}
示例10: upload
bool VdaMixer::upload(const VideoFrame &frame, bool /*deint*/) {
Q_ASSERT(frame.format().imgfmt() == IMGFMT_VDA);
CGLError error = kCGLNoError;
for (auto &texture : m_textures) {
const auto cgl = CGLGetCurrentContext();
const auto surface = CVPixelBufferGetIOSurface((CVPixelBufferRef)frame.data(3));
texture.bind();
const auto w = IOSurfaceGetWidthOfPlane(surface, texture.plane());
const auto h = IOSurfaceGetHeightOfPlane(surface, texture.plane());
if (_Change(error, CGLTexImageIOSurface2D(cgl, texture.target(), texture.format(), w, h, texture.transfer().format, texture.transfer().type, surface, texture.plane()))) {
_Error("CGLError: %%(0x%%)", CGLErrorString(error), _N(error, 16));
return false;
}
}
return true;
}
示例11: encode
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
DPTR_D(VideoEncoderFFmpeg);
AVFrame *f = NULL;
if (frame.isValid()) {
f = av_frame_alloc();
f->format = frame.format().pixelFormatFFmpeg();
f->width = frame.width();
f->height = frame.height();
// TODO: record last pts
f->pts = int64_t(frame.timestamp()*frameRate());
// pts is set in muxer
const int nb_planes = frame.planeCount();
for (int i = 0; i < nb_planes; ++i) {
f->linesize[i] = frame.bytesPerLine(i);
f->data[i] = (uint8_t*)frame.bits(i);
}
if (d.avctx->width <= 0) {
d.avctx->width = frame.width();
}
if (d.avctx->height <= 0) {
d.avctx->height = frame.width();
}
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = (uint8_t*)d.buffer.constData();
pkt.size = d.buffer.size();
int got_packet = 0;
int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
av_frame_free(&f);
if (ret < 0) {
//qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
return false; //false
}
if (!got_packet) {
qWarning("no packet got");
return false; //false
}
qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
return true;
}
示例12: initializeOnFrame
void QPainterFilterContext::initializeOnFrame(Frame *frame)
{
if (!frame) {
if (!painter) {
painter = new QPainter(); //warning: more than 1 painter on 1 device
}
if (!paint_device) {
paint_device = painter->device();
}
if (!paint_device && !painter->isActive()) {
qWarning("No paint device and painter is not active. No painting!");
return;
}
if (!painter->isActive())
painter->begin(paint_device);
return;
}
VideoFrame *vframe = static_cast<VideoFrame*>(frame);
VideoFormat format = vframe->format();
if (!format.isValid()) {
qWarning("Not a valid format");
return;
}
if (format.imageFormat() == QImage::Format_Invalid) {
format.setPixelFormat(VideoFormat::Format_RGB32);
vframe->convertTo(format);
}
if (paint_device) {
if (painter && painter->isActive()) {
painter->end(); //destroy a paint device that is being painted is not allowed!
}
delete paint_device;
paint_device = 0;
}
Q_ASSERT(video_width > 0 && video_height > 0);
// direct draw on frame data, so use VideoFrame::bits()
paint_device = new QImage((uchar*)vframe->bits(0), video_width, video_height, vframe->bytesPerLine(0), format.imageFormat());
if (!painter)
painter = new QPainter();
own_painter = true;
own_paint_device = true; //TODO: what about renderer is not a widget?
painter->begin((QImage*)paint_device);
}
示例13: convert
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.bits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
QVector<const uchar*> pitch(format.planeCount());
QVector<int> stride(format.planeCount());
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.bits(i);
stride[i] = frame.bytesPerLine(i);
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(m_cvt->outData(), frame.width(), frame.height(), fmt);
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknow);
}
return f;
}