本文整理汇总了C++中VideoFormat::pixelFormatFFmpeg方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFormat::pixelFormatFFmpeg方法的具体用法?C++ VideoFormat::pixelFormatFFmpeg怎么用?C++ VideoFormat::pixelFormatFFmpeg使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoFormat
的用法示例。
在下文中一共展示了VideoFormat::pixelFormatFFmpeg方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: to
VideoFrame VideoFrame::to(const VideoFormat &fmt, const QSize& dstSize, const QRectF& roi) const
{
if (!isValid() || !constBits(0)) {// hw surface. map to host. only supports rgb packed formats now
Q_D(const VideoFrame);
const QVariant v = d->metadata.value(QStringLiteral("surface_interop"));
if (!v.isValid())
return VideoFrame();
VideoSurfaceInteropPtr si = v.value<VideoSurfaceInteropPtr>();
if (!si)
return VideoFrame();
VideoFrame f;
f.setDisplayAspectRatio(displayAspectRatio());
f.setTimestamp(timestamp());
if (si->map(HostMemorySurface, fmt, &f)) {
if ((!dstSize.isValid() ||dstSize == QSize(width(), height())) && (!roi.isValid() || roi == QRectF(0, 0, width(), height()))) //roi is not supported now
return f;
return f.to(fmt, dstSize, roi);
}
return VideoFrame();
}
const int w = dstSize.width() > 0 ? dstSize.width() : width();
const int h = dstSize.height() > 0 ? dstSize.height() : height();
if (fmt.pixelFormatFFmpeg() == pixelFormatFFmpeg()
&& w == width() && h == height()
// TODO: roi check.
)
return *this;
Q_D(const VideoFrame);
ImageConverterSWS conv;
conv.setInFormat(pixelFormatFFmpeg());
conv.setOutFormat(fmt.pixelFormatFFmpeg());
conv.setInSize(width(), height());
conv.setOutSize(w, h);
conv.setInRange(colorRange());
if (!conv.convert(d->planes.constData(), d->line_sizes.constData())) {
qWarning() << "VideoFrame::to error: " << format() << "=>" << fmt;
return VideoFrame();
}
VideoFrame f(w, h, fmt, conv.outData());
f.setBits(conv.outPlanes());
f.setBytesPerLine(conv.outLineSizes());
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
f.setTimestamp(timestamp());
f.setDisplayAspectRatio(displayAspectRatio());
f.d_ptr->metadata = d->metadata; // need metadata?
return f;
}
示例2: convert
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.constBits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
m_cvt->setInRange(frame.colorRange());
const int pal = format.hasPalette();
QVector<const uchar*> pitch(format.planeCount() + pal);
QVector<int> stride(format.planeCount() + pal);
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.constBits(i);
stride[i] = frame.bytesPerLine(i);
}
const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray());
if (pal > 0) {
pitch[1] = (const uchar*)paldata.constData();
stride[1] = paldata.size();
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData());
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
f.setDisplayAspectRatio(frame.displayAspectRatio());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
return f;
}
示例3: convert
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.bits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
QVector<const uchar*> pitch(format.planeCount());
QVector<int> stride(format.planeCount());
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.bits(i);
stride[i] = frame.bytesPerLine(i);
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(m_cvt->outData(), frame.width(), frame.height(), fmt);
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknow);
}
return f;
}
示例4: initTextures
bool GLWidgetRendererPrivate::initTextures(const VideoFormat &fmt)
{
// isSupported(pixfmt)
if (!fmt.isValid())
return false;
video_format.setPixelFormatFFmpeg(fmt.pixelFormatFFmpeg());
//http://www.berkelium.com/OpenGL/GDC99/internalformat.html
//NV12: UV is 1 plane. 16 bits as a unit. GL_LUMINANCE4, 8, 16, ... 32?
//GL_LUMINANCE, GL_LUMINANCE_ALPHA are deprecated in GL3, removed in GL3.1
//replaced by GL_RED, GL_RG, GL_RGB, GL_RGBA? for 1, 2, 3, 4 channel image
//http://www.gamedev.net/topic/634850-do-luminance-textures-still-exist-to-opengl/
//https://github.com/kivy/kivy/issues/1738: GL_LUMINANCE does work on a Galaxy Tab 2. LUMINANCE_ALPHA very slow on Linux
//ALPHA: vec4(1,1,1,A), LUMINANCE: (L,L,L,1), LUMINANCE_ALPHA: (L,L,L,A)
/*
* To support both planar and packed use GL_ALPHA and in shader use r,g,a like xbmc does.
* or use Swizzle_mask to layout the channels: http://www.opengl.org/wiki/Texture#Swizzle_mask
* GL ES2 support: GL_RGB, GL_RGBA, GL_LUMINANCE, GL_LUMINANCE_ALPHA, GL_ALPHA
* http://stackoverflow.com/questions/18688057/which-opengl-es-2-0-texture-formats-are-color-depth-or-stencil-renderable
*/
internal_format = QVector<GLint>(fmt.planeCount(), FMT_INTERNAL);
data_format = QVector<GLenum>(fmt.planeCount(), FMT);
data_type = QVector<GLenum>(fmt.planeCount(), GL_UNSIGNED_BYTE);
if (fmt.isPlanar()) {
/*!
* GLES internal_format == data_format, GL_LUMINANCE_ALPHA is 2 bytes
* so if NV12 use GL_LUMINANCE_ALPHA, YV12 use GL_ALPHA
*/
qDebug("///////////bpp %d", fmt.bytesPerPixel());
internal_format[0] = data_format[0] = GL_LUMINANCE; //or GL_RED for GL
if (fmt.planeCount() == 2) {
internal_format[1] = data_format[1] = GL_LUMINANCE_ALPHA;
} else {
if (fmt.bytesPerPixel(1) == 2) {
// read 16 bits and compute the real luminance in shader
internal_format[0] = data_format[0] = GL_LUMINANCE_ALPHA;
internal_format[1] = data_format[1] = GL_LUMINANCE_ALPHA; //vec4(L,L,L,A)
internal_format[2] = data_format[2] = GL_LUMINANCE_ALPHA;
} else {
internal_format[1] = data_format[1] = GL_LUMINANCE; //vec4(L,L,L,1)
internal_format[2] = data_format[2] = GL_ALPHA;//GL_ALPHA;
}
}
for (int i = 0; i < internal_format.size(); ++i) {
// xbmc use bpp not bpp(plane)
//internal_format[i] = GetGLInternalFormat(data_format[i], fmt.bytesPerPixel(i));
//data_format[i] = internal_format[i];
}
} else {
//glPixelStorei(GL_UNPACK_ALIGNMENT, fmt.bytesPerPixel());
// TODO: if no alpha, data_fmt is not GL_BGRA. align at every upload?
}
for (int i = 0; i < fmt.planeCount(); ++i) {
//qDebug("format: %#x GL_LUMINANCE_ALPHA=%#x", data_format[i], GL_LUMINANCE_ALPHA);
if (fmt.bytesPerPixel(i) == 2 && fmt.planeCount() == 3) {
//data_type[i] = GL_UNSIGNED_SHORT;
}
int bpp_gl = bytesOfGLFormat(data_format[i], data_type[i]);
int pad = qCeil((qreal)(texture_size[i].width() - effective_tex_width[i])/(qreal)bpp_gl);
texture_size[i].setWidth(qCeil((qreal)texture_size[i].width()/(qreal)bpp_gl));
effective_tex_width[i] /= bpp_gl; //fmt.bytesPerPixel(i);
//effective_tex_width_ratio =
qDebug("texture width: %d - %d = pad: %d. bpp(gl): %d", texture_size[i].width(), effective_tex_width[i], pad, bpp_gl);
}
/*
* there are 2 fragment shaders: rgb and yuv.
* only 1 texture for packed rgb. planar rgb likes yuv
* To support both planar and packed yuv, and mixed yuv(NV12), we give a texture sample
* for each channel. For packed, each (channel) texture sample is the same. For planar,
* packed channels has the same texture sample.
* But the number of actural textures we upload is plane count.
* Which means the number of texture id equals to plane count
*/
if (textures.size() != fmt.planeCount()) {
glDeleteTextures(textures.size(), textures.data());
qDebug("delete %d textures", textures.size());
textures.clear();
textures.resize(fmt.planeCount());
glGenTextures(textures.size(), textures.data());
}
if (!hasGLSL) {
initTexture(textures[0], internal_format[0], data_format[0], data_type[0], texture_size[0].width(), texture_size[0].height());
// more than 1?
qWarning("Does not support GLSL!");
return false;
}
qDebug("init textures...");
initTexture(textures[0], internal_format[0], data_format[0], data_type[0], texture_size[0].width(), texture_size[0].height());
for (int i = 1; i < textures.size(); ++i) {
initTexture(textures[i], internal_format[i], data_format[i], data_type[i], texture_size[i].width(), texture_size[i].height());
}
return true;
}
示例5: prepareShaderProgram
bool GLWidgetRendererPrivate::prepareShaderProgram(const VideoFormat &fmt)
{
// isSupported(pixfmt)
if (!fmt.isValid())
return false;
releaseShaderProgram();
video_format.setPixelFormatFFmpeg(fmt.pixelFormatFFmpeg());
// TODO: only to kinds, packed.glsl, planar.glsl
QString frag;
if (fmt.isPlanar()) {
frag = getShaderFromFile("shaders/yuv_rgb.f.glsl");
} else {
frag = getShaderFromFile("shaders/rgb.f.glsl");
}
if (frag.isEmpty())
return false;
if (!fmt.isRGB() && fmt.isPlanar() && fmt.bytesPerPixel(0) == 2) {
if (fmt.isBigEndian())
frag.prepend("#define YUV16BITS_BE_LUMINANCE_ALPHA\n");
else
frag.prepend("#define YUV16BITS_LE_LUMINANCE_ALPHA\n");
frag.prepend(QString("#define YUV%1P\n").arg(fmt.bitsPerPixel(0)));
}
#if NO_QGL_SHADER
program = createProgram(kVertexShader, frag.toUtf8().constData());
if (!program) {
qWarning("Could not create shader program.");
return false;
}
// vertex shader
a_Position = glGetAttribLocation(program, "a_Position");
a_TexCoords = glGetAttribLocation(program, "a_TexCoords");
u_matrix = glGetUniformLocation(program, "u_MVP_matrix");
// fragment shader
u_colorMatrix = glGetUniformLocation(program, "u_colorMatrix");
#else
if (!shader_program->addShaderFromSourceCode(QGLShader::Vertex, kVertexShader)) {
qWarning("Failed to add vertex shader: %s", shader_program->log().toUtf8().constData());
return false;
}
if (!shader_program->addShaderFromSourceCode(QGLShader::Fragment, frag)) {
qWarning("Failed to add fragment shader: %s", shader_program->log().toUtf8().constData());
return false;
}
if (!shader_program->link()) {
qWarning("Failed to link shader program...%s", shader_program->log().toUtf8().constData());
return false;
}
// vertex shader
a_Position = shader_program->attributeLocation("a_Position");
a_TexCoords = shader_program->attributeLocation("a_TexCoords");
u_matrix = shader_program->uniformLocation("u_MVP_matrix");
// fragment shader
u_colorMatrix = shader_program->uniformLocation("u_colorMatrix");
#endif //NO_QGL_SHADER
qDebug("glGetAttribLocation(\"a_Position\") = %d\n", a_Position);
qDebug("glGetAttribLocation(\"a_TexCoords\") = %d\n", a_TexCoords);
qDebug("glGetUniformLocation(\"u_MVP_matrix\") = %d\n", u_matrix);
qDebug("glGetUniformLocation(\"u_colorMatrix\") = %d\n", u_colorMatrix);
if (fmt.isRGB())
u_Texture.resize(1);
else
u_Texture.resize(fmt.channels());
for (int i = 0; i < u_Texture.size(); ++i) {
QString tex_var = QString("u_Texture%1").arg(i);
#if NO_QGL_SHADER
u_Texture[i] = glGetUniformLocation(program, tex_var.toUtf8().constData());
#else
u_Texture[i] = shader_program->uniformLocation(tex_var);
#endif
qDebug("glGetUniformLocation(\"%s\") = %d\n", tex_var.toUtf8().constData(), u_Texture[i]);
}
return true;
}
示例6: convert
VideoFrame VideoFrameConverter::convert(const VideoFrame& frame, const VideoFormat &fmt) const
{
return convert(frame, fmt.pixelFormatFFmpeg());
}
示例7: convertTo
bool convertTo(const VideoFormat& fmt) {
return convertTo(fmt.pixelFormatFFmpeg());
}
示例8: setOutFormat
void ImageConverter::setOutFormat(const VideoFormat& format)
{
setOutFormat(format.pixelFormatFFmpeg());
}
示例9: setInFormat
void ImageConverter::setInFormat(const VideoFormat& format)
{
d_func().fmt_in = (AVPixelFormat)format.pixelFormatFFmpeg();
}