本文整理汇总了C++中VideoFrame::timestamp方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrame::timestamp方法的具体用法?C++ VideoFrame::timestamp怎么用?C++ VideoFrame::timestamp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoFrame
的用法示例。
在下文中一共展示了VideoFrame::timestamp方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
FrameReader r;
r.setMedia(a.arguments().last());
QQueue<qint64> t;
int count = 0;
qint64 t0 = QDateTime::currentMSecsSinceEpoch();
while (r.readMore()) {
while (r.hasEnoughVideoFrames()) {
const VideoFrame f = r.getVideoFrame(); //TODO: if eof
if (!f)
continue;
count++;
//r.readMore();
const qint64 now = QDateTime::currentMSecsSinceEpoch();
const qint64 dt = now - t0;
t.enqueue(now);
printf("decode @%.3f count: %d, elapsed: %lld, fps: %.1f/%.1f\r", f.timestamp(), count, dt, count*1000.0/dt, t.size()*1000.0/(now - t.first()));fflush(0);
if (t.size() > 10)
t.dequeue();
}
}
while (r.hasVideoFrame()) {
const VideoFrame f = r.getVideoFrame();
qDebug("pts: %.3f", f.timestamp());
}
qDebug("read done");
return 0;
}
示例2: frame
VideoFrame VideoDecoderVDA::frame()
{
DPTR_D(VideoDecoderVDA);
CVPixelBufferRef cv_buffer = (CVPixelBufferRef)d.frame->data[3];
if (!cv_buffer) {
qDebug("Frame buffer is empty.");
return VideoFrame();
}
if (CVPixelBufferGetDataSize(cv_buffer) <= 0) {
qDebug("Empty frame buffer");
return VideoFrame();
}
VideoFormat::PixelFormat pixfmt = format_from_cv(CVPixelBufferGetPixelFormatType(cv_buffer));
if (pixfmt == VideoFormat::Format_Invalid) {
qWarning("unsupported vda pixel format: %#x", CVPixelBufferGetPixelFormatType(cv_buffer));
return VideoFrame();
}
// we can map the cv buffer addresses to video frame in SurfaceInteropCVBuffer. (may need VideoSurfaceInterop::mapToTexture()
class SurfaceInteropCVBuffer Q_DECL_FINAL: public VideoSurfaceInterop {
bool glinterop;
CVPixelBufferRef cvbuf; // keep ref until video frame is destroyed
public:
SurfaceInteropCVBuffer(CVPixelBufferRef cv, bool gl) : glinterop(gl), cvbuf(cv) {
//CVPixelBufferRetain(cvbuf);
}
~SurfaceInteropCVBuffer() {
CVPixelBufferRelease(cvbuf);
}
void* mapToHost(const VideoFormat &format, void *handle, int plane) {
Q_UNUSED(plane);
CVPixelBufferLockBaseAddress(cvbuf, 0);
const VideoFormat fmt(format_from_cv(CVPixelBufferGetPixelFormatType(cvbuf)));
if (!fmt.isValid()) {
CVPixelBufferUnlockBaseAddress(cvbuf, 0);
return NULL;
}
const int w = CVPixelBufferGetWidth(cvbuf);
const int h = CVPixelBufferGetHeight(cvbuf);
uint8_t *src[3];
int pitch[3];
for (int i = 0; i <fmt.planeCount(); ++i) {
// get address results in internal copy
src[i] = (uint8_t*)CVPixelBufferGetBaseAddressOfPlane(cvbuf, i);
pitch[i] = CVPixelBufferGetBytesPerRowOfPlane(cvbuf, i);
}
CVPixelBufferUnlockBaseAddress(cvbuf, 0);
//CVPixelBufferRelease(cv_buffer); // release when video frame is destroyed
VideoFrame frame(VideoFrame::fromGPU(fmt, w, h, h, src, pitch));
if (fmt != format)
frame = frame.to(format);
VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
frame.setTimestamp(f->timestamp());
frame.setDisplayAspectRatio(f->displayAspectRatio());
*f = frame;
return f;
}
示例3: encode
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
DPTR_D(VideoEncoderFFmpeg);
AVFrame *f = NULL;
if (frame.isValid()) {
f = av_frame_alloc();
f->format = frame.format().pixelFormatFFmpeg();
f->width = frame.width();
f->height = frame.height();
// f->quality = d.avctx->global_quality;
switch (timestampMode()) {
case TimestampCopy:
f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
break;
case TimestampMonotonic:
f->pts = d.nb_encoded+1;
break;
default:
break;
}
// pts is set in muxer
const int nb_planes = frame.planeCount();
for (int i = 0; i < nb_planes; ++i) {
f->linesize[i] = frame.bytesPerLine(i);
f->data[i] = (uint8_t*)frame.constBits(i);
}
if (d.avctx->width <= 0) {
d.avctx->width = frame.width();
}
if (d.avctx->height <= 0) {
d.avctx->height = frame.width();
}
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = (uint8_t*)d.buffer.constData();
pkt.size = d.buffer.size();
int got_packet = 0;
int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
av_frame_free(&f);
if (ret < 0) {
qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
return false; //false
}
d.nb_encoded++;
if (!got_packet) {
qWarning("no packet got");
d.packet = Packet();
// invalid frame means eof
return frame.isValid();
}
// qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
// qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
return true;
}
示例4: convert
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.constBits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
m_cvt->setInRange(frame.colorRange());
const int pal = format.hasPalette();
QVector<const uchar*> pitch(format.planeCount() + pal);
QVector<int> stride(format.planeCount() + pal);
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.constBits(i);
stride[i] = frame.bytesPerLine(i);
}
const QByteArray paldata(frame.metaData(QStringLiteral("pallete")).toByteArray());
if (pal > 0) {
pitch[1] = (const uchar*)paldata.constData();
stride[1] = paldata.size();
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(frame.width(), frame.height(), fmt, m_cvt->outData());
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
f.setDisplayAspectRatio(frame.displayAspectRatio());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknown);
}
// TODO: color range
return f;
}
示例5: mapToHost
void* SurfaceInteropDXVA::mapToHost(const VideoFormat &format, void *handle, int plane)
{
Q_UNUSED(plane);
class ScopedD3DLock {
IDirect3DSurface9 *mpD3D;
public:
ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect) : mpD3D(d3d) {
if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
qWarning("Failed to lock surface");
mpD3D = 0;
}
}
~ScopedD3DLock() {
if (mpD3D)
mpD3D->UnlockRect();
}
};
D3DLOCKED_RECT lock;
ScopedD3DLock(m_surface, &lock);
if (lock.Pitch == 0)
return NULL;
//picth >= desc.Width
D3DSURFACE_DESC desc;
m_surface->GetDesc(&desc);
const VideoFormat fmt = VideoFormat(pixelFormatFromFourcc(desc.Format));
if (!fmt.isValid()) {
qWarning("unsupported dxva pixel format: %#x", desc.Format);
return NULL;
}
//YV12 need swap, not imc3?
// imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
// nv12 bpp(1)==1
// 3rd plane is not used for nv12
int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
quint8 *src[] = { (quint8*)lock.pBits, 0, 0}; //compute chroma later
Q_ASSERT(src[0] && pitch[0] > 0);
const bool swap_uv = desc.Format == MAKEFOURCC('I','M','C','3');
// try to use SSE. fallback to normal copy if SSE is not supported
VideoFrame frame(VideoFrame::fromGPU(fmt, frame_width, frame_height, desc.Height, src, pitch, true, swap_uv));
// TODO: check rgb32 because d3d can use hw to convert
if (format != fmt)
frame = frame.to(format);
VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
frame.setTimestamp(f->timestamp());
*f = frame;
return f;
}
示例6: encode
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
DPTR_D(VideoEncoderFFmpeg);
AVFrame *f = NULL;
if (frame.isValid()) {
f = av_frame_alloc();
f->format = frame.format().pixelFormatFFmpeg();
f->width = frame.width();
f->height = frame.height();
// TODO: record last pts
f->pts = int64_t(frame.timestamp()*frameRate());
// pts is set in muxer
const int nb_planes = frame.planeCount();
for (int i = 0; i < nb_planes; ++i) {
f->linesize[i] = frame.bytesPerLine(i);
f->data[i] = (uint8_t*)frame.bits(i);
}
if (d.avctx->width <= 0) {
d.avctx->width = frame.width();
}
if (d.avctx->height <= 0) {
d.avctx->height = frame.width();
}
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = (uint8_t*)d.buffer.constData();
pkt.size = d.buffer.size();
int got_packet = 0;
int ret = avcodec_encode_video2(d.avctx, &pkt, f, &got_packet);
av_frame_free(&f);
if (ret < 0) {
//qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
return false; //false
}
if (!got_packet) {
qWarning("no packet got");
return false; //false
}
qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
return true;
}
示例7: convert
VideoFrame VideoFrameConverter::convert(const VideoFrame &frame, int fffmt) const
{
if (!frame.isValid() || fffmt == QTAV_PIX_FMT_C(NONE))
return VideoFrame();
if (!frame.bits(0)) // hw surface
return frame.to(VideoFormat::pixelFormatFromFFmpeg(fffmt));
const VideoFormat format(frame.format());
//if (fffmt == format.pixelFormatFFmpeg())
// return *this;
if (!m_cvt) {
m_cvt = new ImageConverterSWS();
}
m_cvt->setBrightness(m_eq[0]);
m_cvt->setContrast(m_eq[1]);
m_cvt->setSaturation(m_eq[2]);
m_cvt->setInFormat(format.pixelFormatFFmpeg());
m_cvt->setOutFormat(fffmt);
m_cvt->setInSize(frame.width(), frame.height());
m_cvt->setOutSize(frame.width(), frame.height());
QVector<const uchar*> pitch(format.planeCount());
QVector<int> stride(format.planeCount());
for (int i = 0; i < format.planeCount(); ++i) {
pitch[i] = frame.bits(i);
stride[i] = frame.bytesPerLine(i);
}
if (!m_cvt->convert(pitch.constData(), stride.constData())) {
return VideoFrame();
}
const VideoFormat fmt(fffmt);
VideoFrame f(m_cvt->outData(), frame.width(), frame.height(), fmt);
f.setBits(m_cvt->outPlanes());
f.setBytesPerLine(m_cvt->outLineSizes());
f.setTimestamp(frame.timestamp());
// metadata?
if (fmt.isRGB()) {
f.setColorSpace(fmt.isPlanar() ? ColorSpace_GBR : ColorSpace_RGB);
} else {
f.setColorSpace(ColorSpace_Unknow);
}
return f;
}
示例8: mapToHost
void* InteropResource::mapToHost(const VideoFormat &format, void *handle, int picIndex, const CUVIDPROCPARAMS ¶m, int width, int height, int coded_height)
{
AutoCtxLock locker((cuda_api*)this, lock);
Q_UNUSED(locker);
CUdeviceptr devptr;
unsigned int pitch;
CUDA_ENSURE(cuvidMapVideoFrame(dec, picIndex, &devptr, &pitch, const_cast<CUVIDPROCPARAMS*>(¶m)), NULL);
CUVIDAutoUnmapper unmapper(this, dec, devptr);
Q_UNUSED(unmapper);
uchar* host_data = NULL;
const size_t host_size = pitch*coded_height*3/2;
CUDA_ENSURE(cuMemAllocHost((void**)&host_data, host_size), NULL);
// copy to the memory not allocated by cuda is possible but much slower
CUDA_ENSURE(cuMemcpyDtoH(host_data, devptr, host_size), NULL);
VideoFrame frame(width, height, VideoFormat::Format_NV12);
uchar *planes[] = {
host_data,
host_data + pitch * coded_height
};
frame.setBits(planes);
int pitches[] = { (int)pitch, (int)pitch };
frame.setBytesPerLine(pitches);
VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
frame.setTimestamp(f->timestamp());
frame.setDisplayAspectRatio(f->displayAspectRatio());
if (format == frame.format())
*f = frame.clone();
else
*f = frame.to(format);
cuMemFreeHost(host_data);
return f;
}
示例9: mapToHost
void* SurfaceInteropVAAPI::mapToHost(const VideoFormat &format, void *handle, int plane)
{
Q_UNUSED(plane);
VAImage image;
static const unsigned int fcc[] = { VA_FOURCC_NV12, VA_FOURCC_YV12, VA_FOURCC_IYUV, 0};
va_new_image(m_surface->vadisplay(), fcc, &image, m_surface->width(), m_surface->height());
if (image.image_id == VA_INVALID_ID)
return NULL;
void *p_base;
VA_ENSURE(vaGetImage(m_surface->vadisplay(), m_surface->get(), 0, 0, m_surface->width(), m_surface->height(), image.image_id), NULL);
VA_ENSURE(vaMapBuffer(m_surface->vadisplay(), image.buf, &p_base), NULL); //TODO: destroy image before return
VideoFormat::PixelFormat pixfmt = pixelFormatFromVA(image.format.fourcc);
bool swap_uv = image.format.fourcc != VA_FOURCC_NV12;
if (pixfmt == VideoFormat::Format_Invalid) {
qWarning("unsupported vaapi pixel format: %#x", image.format.fourcc);
VA_ENSURE(vaDestroyImage(m_surface->vadisplay(), image.image_id), NULL);
return NULL;
}
const VideoFormat fmt(pixfmt);
uint8_t *src[3];
int pitch[3];
for (int i = 0; i < fmt.planeCount(); ++i) {
src[i] = (uint8_t*)p_base + image.offsets[i];
pitch[i] = image.pitches[i];
}
VideoFrame frame = VideoFrame::fromGPU(fmt, frame_width, frame_height, m_surface->height(), src, pitch, true, swap_uv);
if (format != fmt)
frame = frame.to(format);
VAWARN(vaUnmapBuffer(m_surface->vadisplay(), image.buf));
VAWARN(vaDestroyImage(m_surface->vadisplay(), image.image_id));
image.image_id = VA_INVALID_ID;
VideoFrame *f = reinterpret_cast<VideoFrame*>(handle);
frame.setTimestamp(f->timestamp());
*f = frame;
return f;
}
示例10: encode
bool VideoEncoderFFmpeg::encode(const VideoFrame &frame)
{
DPTR_D(VideoEncoderFFmpeg);
QScopedPointer<AVFrame, ScopedAVFrameDeleter> f;
// hwupload
AVPixelFormat pixfmt = AVPixelFormat(frame.pixelFormatFFmpeg());
if (frame.isValid()) {
f.reset(av_frame_alloc());
f->format = pixfmt;
f->width = frame.width();
f->height = frame.height();
// f->quality = d.avctx->global_quality;
switch (timestampMode()) {
case TimestampCopy:
f->pts = int64_t(frame.timestamp()*frameRate()); // TODO: check monotically increase and fix if not. or another mode?
break;
case TimestampMonotonic:
f->pts = d.nb_encoded+1;
break;
default:
break;
}
// pts is set in muxer
const int nb_planes = frame.planeCount();
for (int i = 0; i < nb_planes; ++i) {
f->linesize[i] = frame.bytesPerLine(i);
f->data[i] = (uint8_t*)frame.constBits(i);
}
if (d.avctx->width <= 0) {
d.avctx->width = frame.width();
}
if (d.avctx->height <= 0) {
d.avctx->height = frame.width();
}
#ifdef HAVE_AVHWCTX
if (d.avctx->hw_frames_ctx) {
// TODO: try to map to SourceSurface
// checl valid sw_formats
if (!d.hwframes_ref) {
qWarning("no hw frame context for uploading");
return false;
}
if (pixfmt != d.hwframes->sw_format) {
// reinit or got an unsupported format. assume parameters will not change, so it's the 1st init
// check constraints
bool init_frames_ctx = d.hwframes->sw_format == AVPixelFormat(-1);
if (d.sw_fmts.contains(pixfmt)) { // format changed
init_frames_ctx = true;
} else { // convert to supported sw format
pixfmt = d.sw_fmts[0];
f->format = pixfmt;
VideoFrame converted = frame.to(VideoFormat::pixelFormatFromFFmpeg(pixfmt));
for (int i = 0; i < converted.planeCount(); ++i) {
f->linesize[i] = converted.bytesPerLine(i);
f->data[i] = (uint8_t*)frame.constBits(i);
}
}
if (init_frames_ctx) {
d.hwframes->sw_format = pixfmt;
d.hwframes->width = frame.width();
d.hwframes->height = frame.height();
AV_ENSURE(av_hwframe_ctx_init(d.hwframes_ref), false);
}
}
// upload
QScopedPointer<AVFrame, ScopedAVFrameDeleter> hwf( av_frame_alloc());
AV_ENSURE(av_hwframe_get_buffer(d.hwframes_ref, hwf.data(), 0), false);
//hwf->format = d.hwframes->format; // not necessary
//hwf->width = f->width;
//hwf->height = f->height;
AV_ENSURE(av_hwframe_transfer_data(hwf.data(), f.data(), 0), false);
AV_ENSURE(av_frame_copy_props(hwf.data(), f.data()), false);
av_frame_unref(f.data());
av_frame_move_ref(f.data(), hwf.data());
}
#endif //HAVE_AVHWCTX
}
AVPacket pkt;
av_init_packet(&pkt);
pkt.data = (uint8_t*)d.buffer.constData();
pkt.size = d.buffer.size();
int got_packet = 0;
int ret = avcodec_encode_video2(d.avctx, &pkt, f.data(), &got_packet);
if (ret < 0) {
qWarning("error avcodec_encode_video2: %s" ,av_err2str(ret));
return false; //false
}
d.nb_encoded++;
if (!got_packet) {
qWarning("no packet got");
d.packet = Packet();
// invalid frame means eof
return frame.isValid();
}
// qDebug("enc avpkt.pts: %lld, dts: %lld.", pkt.pts, pkt.dts);
d.packet = Packet::fromAVPacket(&pkt, av_q2d(d.avctx->time_base));
// qDebug("enc packet.pts: %.3f, dts: %.3f.", d.packet.pts, d.packet.dts);
return true;
}