本文整理汇总了C++中VideoFormat类的典型用法代码示例。如果您正苦于以下问题:C++ VideoFormat类的具体用法?C++ VideoFormat怎么用?C++ VideoFormat使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoFormat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: print_active_format
void print_active_format (const VideoFormat& format)
{
std::cout << "Active format:\n"
<< "Format: \t" << fourcc_to_description(format.get_fourcc())
<< "\nResolution: \t" << format.get_size().width << "x" << format.get_size().height
<< "\nFramerate: \t" << format.get_framerate() << "\n" << std::endl;
}
示例2: fmt
void VideoShader::update(VideoMaterial *material)
{
if (!material->bind())
return;
const VideoFormat fmt(material->currentFormat());
//format is out of date because we may use the same shader for different formats
setVideoFormat(fmt);
// uniforms begin
program()->bind(); //glUseProgram(id). for glUniform
// all texture ids should be binded when renderering even for packed plane!
const int nb_planes = fmt.planeCount(); //number of texture id
for (int i = 0; i < nb_planes; ++i) {
// use glUniform1i to swap planes. swap uv: i => (3-i)%3
// TODO: in shader, use uniform sample2D u_Texture[], and use glUniform1iv(u_Texture, 3, {...})
program()->setUniformValue(textureLocation(i), (GLint)i);
}
if (nb_planes < textureLocationCount()) {
for (int i = nb_planes; i < textureLocationCount(); ++i) {
program()->setUniformValue(textureLocation(i), (GLint)(nb_planes - 1));
}
}
//qDebug() << "color mat " << material->colorMatrix();
program()->setUniformValue(colorMatrixLocation(), material->colorMatrix());
program()->setUniformValue(bppLocation(), (GLfloat)material->bpp());
//program()->setUniformValue(matrixLocation(), material->matrix()); //what about sgnode? state.combindMatrix()?
// uniform end. attribute begins
}
示例3: DPTR_D
void VideoMaterial::setCurrentFrame(const VideoFrame &frame)
{
DPTR_D(VideoMaterial);
d.update_texure = true;
d.bpp = frame.format().bitsPerPixel(0);
d.width = frame.width();
d.height = frame.height();
const VideoFormat fmt(frame.format());
// http://forum.doom9.org/archive/index.php/t-160211.html
ColorTransform::ColorSpace cs = ColorTransform::RGB;
if (fmt.isRGB()) {
if (fmt.isPlanar())
cs = ColorTransform::GBR;
} else {
if (frame.width() >= 1280 || frame.height() > 576) //values from mpv
cs = ColorTransform::BT709;
else
cs = ColorTransform::BT601;
}
d.colorTransform.setInputColorSpace(cs);
d.frame = frame;
if (fmt != d.video_format) {
qDebug("pixel format changed: %s => %s", qPrintable(d.video_format.name()), qPrintable(fmt.name()));
d.video_format = fmt;
}
}
示例4:
QDebug operator<< ( QDebug os, const VideoFormat & videoFormat ){
os << "w: " << videoFormat.frameWidth()
<< ", h: " << videoFormat.frameHeight()
<< ", fps: " << videoFormat.framesPerSecond()
<< ", count: " << videoFormat.frameCount()
<< ", type: " << videoFormat.type();
return os;
}
示例5: DPTR_D
VideoFrame VideoDecoderDXVA::frame()
{
DPTR_D(VideoDecoderDXVA);
//qDebug("frame size: %dx%d", d.frame->width, d.frame->height);
if (!d.frame->opaque || !d.frame->data[0])
return VideoFrame();
if (d.frame->width <= 0 || d.frame->height <= 0 || !d.codec_ctx)
return VideoFrame();
IDirect3DSurface9 *d3d = (IDirect3DSurface9*)(uintptr_t)d.frame->data[3];
if (copyMode() == ZeroCopy && d.interop_res) {
dxva::SurfaceInteropDXVA *interop = new dxva::SurfaceInteropDXVA(d.interop_res);
interop->setSurface(d3d, width(), height());
VideoFrame f(width(), height(), VideoFormat::Format_RGB32); //p->width()
f.setBytesPerLine(d.width * 4); //used by gl to compute texture size
f.setMetaData(QStringLiteral("surface_interop"), QVariant::fromValue(VideoSurfaceInteropPtr(interop)));
f.setTimestamp(d.frame->pkt_pts/1000.0);
f.setDisplayAspectRatio(d.getDAR(d.frame));
return f;
}
class ScopedD3DLock {
IDirect3DSurface9 *mpD3D;
public:
ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect) : mpD3D(d3d) {
if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
qWarning("Failed to lock surface");
mpD3D = 0;
}
}
~ScopedD3DLock() {
if (mpD3D)
mpD3D->UnlockRect();
}
};
D3DLOCKED_RECT lock;
ScopedD3DLock(d3d, &lock);
if (lock.Pitch == 0) {
return VideoFrame();
}
//picth >= desc.Width
D3DSURFACE_DESC desc;
d3d->GetDesc(&desc);
const VideoFormat fmt = VideoFormat(pixelFormatFromD3D(desc.Format));
if (!fmt.isValid()) {
qWarning("unsupported dxva pixel format: %#x", desc.Format);
return VideoFrame();
}
//YV12 need swap, not imc3?
// imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
// nv12 bpp(1)==1
// 3rd plane is not used for nv12
int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
uint8_t *src[] = { (uint8_t*)lock.pBits, 0, 0}; //compute chroma later
const bool swap_uv = desc.Format == MAKEFOURCC('I','M','C','3');
return copyToFrame(fmt, d.surface_height, src, pitch, swap_uv);
}
示例6: DPTR_D
VideoFrame VideoDecoderFFmpegHW::copyToFrame(const VideoFormat& fmt, int surface_h, quint8 *src[], int pitch[], bool swapUV)
{
DPTR_D(VideoDecoderFFmpegHW);
Q_ASSERT_X(src[0] && pitch[0] > 0, "VideoDecoderFFmpegHW::copyToFrame", "src[0] and pitch[0] must be set");
const int nb_planes = fmt.planeCount();
const int chroma_pitch = nb_planes > 1 ? fmt.bytesPerLine(pitch[0], 1) : 0;
const int chroma_h = fmt.chromaHeight(surface_h);
int h[] = { surface_h, 0, 0};
for (int i = 1; i < nb_planes; ++i) {
h[i] = chroma_h;
// set chroma address and pitch if not set
if (pitch[i] <= 0)
pitch[i] = chroma_pitch;
if (!src[i])
src[i] = src[i-1] + pitch[i-1]*h[i-1];
}
if (swapUV) {
std::swap(src[1], src[2]);
std::swap(pitch[1], pitch[2]);
}
VideoFrame frame;
if (copyMode() == VideoDecoderFFmpegHW::OptimizedCopy && d.gpu_mem.isReady()) {
int yuv_size = 0;
for (int i = 0; i < nb_planes; ++i) {
yuv_size += pitch[i]*h[i];
}
// additional 15 bytes to ensure 16 bytes aligned
QByteArray buf(15 + yuv_size, 0);
const int offset_16 = (16 - ((uintptr_t)buf.data() & 0x0f)) & 0x0f;
// plane 1, 2... is aligned?
uchar* plane_ptr = (uchar*)buf.data() + offset_16;
QVector<uchar*> dst(nb_planes, 0);
for (int i = 0; i < nb_planes; ++i) {
dst[i] = plane_ptr;
// TODO: add VideoFormat::planeWidth/Height() ?
// pitch instead of surface_width
plane_ptr += pitch[i] * h[i];
d.gpu_mem.copyFrame(src[i], dst[i], pitch[i], h[i], pitch[i]);
}
frame = VideoFrame(buf, width(), height(), fmt);
frame.setBits(dst);
frame.setBytesPerLine(pitch);
} else {
frame = VideoFrame(width(), height(), fmt);
frame.setBits(src);
frame.setBytesPerLine(pitch);
// TODO: why clone is faster()?
// TODO: buffer pool and create VideoFrame when needed to avoid copy? also for other va
frame = frame.clone();
}
frame.setTimestamp(double(d.frame->pkt_pts)/1000.0);
frame.setDisplayAspectRatio(d.getDAR(d.frame));
d.updateColorDetails(&frame);
return frame;
}
示例7: to
VideoFrame VideoFrame::to(const VideoFormat &fmt, const QSize& dstSize, const QRectF& roi) const
{
if (!isValid() || !constBits(0)) {// hw surface. map to host. only supports rgb packed formats now
Q_D(const VideoFrame);
const QVariant v = d->metadata.value(QStringLiteral("surface_interop"));
if (!v.isValid())
return VideoFrame();
VideoSurfaceInteropPtr si = v.value<VideoSurfaceInteropPtr>();
if (!si)
return VideoFrame();
VideoFrame f;
f.setDisplayAspectRatio(displayAspectRatio());
f.setTimestamp(timestamp());
if (si->map(HostMemorySurface, fmt, &f)) {
if ((!dstSize.isValid() ||dstSize == QSize(width(), height())) && (!roi.isValid() || roi == QRectF(0, 0, width(), height()))) //roi is not supported now
return f;
return f.to(fmt, dstSize, roi);
}
return VideoFrame();
}
const int w = dstSize.width() > 0 ? dstSize.width() : width();
const int h = dstSize.height() > 0 ? dstSize.height() : height();
if (fmt.pixelFormatFFmpeg() == pixelFormatFFmpeg()
&& w == width() && h == height()
// TODO: roi check.
)
return *this;
Q_D(const VideoFrame);
ImageConverterSWS conv;
conv.setInFormat(pixelFormatFFmpeg());
conv.setOutFormat(fmt.pixelFormatFFmpeg());
conv.setInSize(width(), height());
conv.setOutSize(w, h);
conv.setInRange(colorRange());
if (!conv.convert(d->planes.constData(), d->line_sizes.constData())) {
qWarning() << "VideoFrame::to error: " << format() << "=>" << fmt;
return VideoFrame();
}
VideoFrame f(w, h, fmt, conv.outData());
f.setBits(conv.outPlanes());
f.setBytesPerLine(conv.outLineSizes());
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
f.setTimestamp(timestamp());
f.setDisplayAspectRatio(displayAspectRatio());
f.d_ptr->metadata = d->metadata; // need metadata?
return f;
}
示例8: channelMap
static QMatrix4x4 channelMap(const VideoFormat& fmt)
{
if (fmt.isPlanar()) //currently only for planar
return QMatrix4x4();
switch (fmt.pixelFormat()) {
case VideoFormat::Format_UYVY:
return QMatrix4x4(0.0f, 0.5f, 0.0f, 0.5f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
case VideoFormat::Format_YUYV:
return QMatrix4x4(0.5f, 0.0f, 0.5f, 0.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f,
0.0f, 0.0f, 0.0f, 1.0f);
case VideoFormat::Format_VYUY:
return QMatrix4x4(0.0f, 0.5f, 0.0f, 0.5f,
0.0f, 0.0f, 1.0f, 0.0f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
case VideoFormat::Format_YVYU:
return QMatrix4x4(0.5f, 0.0f, 0.5f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f,
0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
case VideoFormat::Format_VYU:
return QMatrix4x4(0.0f, 1.0f, 0.0f, 0.0f,
0.0f, 0.0f, 1.0f, 0.0f,
1.0f, 0.0f, 0.0f, 0.0f,
0.0f, 0.0f, 0.0f, 1.0f);
default:
break;
}
const quint8 *channels = NULL;//{ 0, 1, 2, 3};
for (int i = 0; gl_channel_maps[i].pixfmt != VideoFormat::Format_Invalid; ++i) {
if (gl_channel_maps[i].pixfmt == fmt.pixelFormat()) {
channels = gl_channel_maps[i].channels;
break;
}
}
QMatrix4x4 m;
if (!channels)
return m;
m.fill(0);
for (int i = 0; i < 4; ++i) {
m(i, channels[i]) = 1;
}
qDebug() << m;
return m;
}
示例9: DPTR_D
VideoFrame VideoDecoderDXVA::frame()
{
DPTR_D(VideoDecoderDXVA);
if (!d.frame->opaque || !d.frame->data[0])
return VideoFrame();
if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
return VideoFrame();
class ScopedD3DLock {
public:
ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect)
: mpD3D(d3d)
{
if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
qWarning("Failed to lock surface");
mpD3D = 0;
}
}
~ScopedD3DLock() {
if (mpD3D)
mpD3D->UnlockRect();
}
private:
IDirect3DSurface9 *mpD3D;
};
IDirect3DSurface9 *d3d = (IDirect3DSurface9*)(uintptr_t)d.frame->data[3];
//picth >= desc.Width
//D3DSURFACE_DESC desc;
//d3d->GetDesc(&desc);
D3DLOCKED_RECT lock;
ScopedD3DLock(d3d, &lock);
if (lock.Pitch == 0) {
return VideoFrame();
}
const VideoFormat fmt = VideoFormat((int)D3dFindFormat(d.render)->avpixfmt);
if (!fmt.isValid()) {
qWarning("unsupported dxva pixel format: %#x", d.render);
return VideoFrame();
}
//YV12 need swap, not imc3?
// imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
// nv12 bpp(1)==1
// 3rd plane is not used for nv12
int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
uint8_t *src[] = { (uint8_t*)lock.pBits, 0, 0}; //compute chroma later
const bool swap_uv = d.render == MAKEFOURCC('I','M','C','3');
return copyToFrame(fmt, d.surface_height, src, pitch, swap_uv);
}
示例10: VideoFrame
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.constBits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
m_cvt->setInRange(frame.colorRange());
const int pal = format.hasPalette();
QVector<const uchar*> pitch(format.planeCount() + pal);
QVector<int> stride(format.planeCount() + pal);
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.constBits(i);
stride[i] = frame.bytesPerLine(i);
}
const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray());
if (pal > 0) {
pitch[1] = (const uchar*)paldata.constData();
stride[1] = paldata.size();
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData());
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
f.setDisplayAspectRatio(frame.displayAspectRatio());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
return f;
}
示例11: Q_UNUSED
void* SurfaceInteropDXVA::mapToHost(const VideoFormat &format, void *handle, int plane)
{
Q_UNUSED(plane);
class ScopedD3DLock {
IDirect3DSurface9 *mpD3D;
public:
ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect) : mpD3D(d3d) {
if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
qWarning("Failed to lock surface");
mpD3D = 0;
}
}
~ScopedD3DLock() {
if (mpD3D)
mpD3D->UnlockRect();
}
};
D3DLOCKED_RECT lock;
ScopedD3DLock(m_surface, &lock);
if (lock.Pitch == 0)
return NULL;
//picth >= desc.Width
D3DSURFACE_DESC desc;
m_surface->GetDesc(&desc);
const VideoFormat fmt = VideoFormat(pixelFormatFromFourcc(desc.Format));
if (!fmt.isValid()) {
qWarning("unsupported dxva pixel format: %#x", desc.Format);
return NULL;
}
//YV12 need swap, not imc3?
// imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
// nv12 bpp(1)==1
// 3rd plane is not used for nv12
int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
quint8 *src[] = { (quint8*)lock.pBits, 0, 0}; //compute chroma later
Q_ASSERT(src[0] && pitch[0] > 0);
const bool swap_uv = desc.Format == MAKEFOURCC('I','M','C','3');
// try to use SSE. fallback to normal copy if SSE is not supported
VideoFrame frame(VideoFrame::fromGPU(fmt, frame_width, frame_height, desc.Height, src, pitch, true, swap_uv));
// TODO: check rgb32 because d3d can use hw to convert
if (format != fmt)
frame = frame.to(format);
VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
frame.setTimestamp(f->timestamp());
*f = frame;
return f;
}
示例12: fmt
void GLWidgetRendererPrivate::updateShaderIfNeeded()
{
const VideoFormat& fmt(video_frame.format());
if (fmt != video_format) {
qDebug("pixel format changed: %s => %s", qPrintable(video_format.name()), qPrintable(fmt.name()));
}
VideoMaterialType *newType = materialType(fmt);
if (material_type == newType)
return;
material_type = newType;
// http://forum.doom9.org/archive/index.php/t-160211.html
ColorTransform::ColorSpace cs = ColorTransform::RGB;
if (fmt.isRGB()) {
if (fmt.isPlanar())
cs = ColorTransform::GBR;
} else {
if (video_frame.width() >= 1280 || video_frame.height() > 576) //values from mpv
cs = ColorTransform::BT709;
else
cs = ColorTransform::BT601;
}
if (!prepareShaderProgram(fmt, cs)) {
qWarning("shader program create error...");
return;
} else {
qDebug("shader program created!!!");
}
}
示例13: set_active_format
bool set_active_format (std::shared_ptr<CaptureDevice> dev, const std::string& new_format)
{
VideoFormat v;
bool ret = v.from_string(new_format);
if (ret)
{
return dev->set_video_format(v);
}
else
{
std::cout << "Invalid string description!" << std::endl;
}
return false;
}
示例14: allocate
void VideoFrame::allocate(const VideoFormat &format) {
if (format.isEmpty() && d->buffer.isEmpty())
return;
if (!d->buffer.isEmpty() && d->format == format)
return;
d.detach();
d->format = format;
int len = 0;
int offsets[4] = {0};
for (int i=0; i<format.planes(); ++i) {
offsets[i] = len;
len += format.bytesPerPlain(i);
}
d->buffer.resize(len);
for (int i=0; i< format.planes(); ++i)
d->data[i] = (uchar*)d->buffer.data() + offsets[i];
}
示例15: QPainter
void QPainterFilterContext::initializeOnFrame(VideoFrame *vframe)
{
if (!vframe) {
if (!painter) {
painter = new QPainter(); //warning: more than 1 painter on 1 device
}
if (!paint_device) {
paint_device = painter->device();
}
if (!paint_device && !painter->isActive()) {
qWarning("No paint device and painter is not active. No painting!");
return;
}
if (!painter->isActive())
painter->begin(paint_device);
return;
}
VideoFormat format = vframe->format();
if (!format.isValid()) {
qWarning("Not a valid format");
return;
}
if (format.imageFormat() == QImage::Format_Invalid) {
format.setPixelFormat(VideoFormat::Format_RGB32);
if (!cvt) {
cvt = new VideoFrameConverter();
}
*vframe = cvt->convert(*vframe, format);
}
if (paint_device) {
if (painter && painter->isActive()) {
painter->end(); //destroy a paint device that is being painted is not allowed!
}
delete paint_device;
paint_device = 0;
}
Q_ASSERT(video_width > 0 && video_height > 0);
// direct draw on frame data, so use VideoFrame::constBits()
paint_device = new QImage((uchar*)vframe->constBits(0), video_width, video_height, vframe->bytesPerLine(0), format.imageFormat());
if (!painter)
painter = new QPainter();
own_painter = true;
own_paint_device = true; //TODO: what about renderer is not a widget?
painter->begin((QImage*)paint_device);
}