本文整理汇总了C++中VideoDecoder类的典型用法代码示例。如果您正苦于以下问题:C++ VideoDecoder类的具体用法?C++ VideoDecoder怎么用?C++ VideoDecoder使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了VideoDecoder类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: readFunction
static int readFunction(void* opaque, uint8_t* buffer, int bufferSize)
{
VideoDecoder* decoder = (VideoDecoder*) opaque;
//Call implemented function
return decoder->getFillFileBufferFunc()(decoder->getCustomFileBufferFuncData(), buffer, bufferSize);
}
示例2: vdec_flush
Vdec_ReturnType vdec_flush(struct VDecoder* dec, int *nFlushedFrames)
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: flush \n");
VideoDecoder* pDec = (VideoDecoder*)(dec->core);
if(!pDec) return VDEC_EFAILED;
pDec->Flush( nFlushedFrames );
return VDEC_SUCCESS;
}
示例3: GetFormat
static AVPixelFormat GetFormat(AVCodecContext *Context, const AVPixelFormat *Formats)
{
if (!Context || !Formats)
return AV_PIX_FMT_YUV420P;
VideoDecoder* parent = static_cast<VideoDecoder*>(Context->opaque);
if (parent)
return parent->AgreePixelFormat(Context, Formats);
return GetFormatDefault(Context, Formats);
}
示例4: gstvideo_has_codec
G_GNUC_INTERNAL
gboolean gstvideo_has_codec(int codec_type)
{
gboolean has_codec = FALSE;
VideoDecoder *decoder = create_gstreamer_decoder(codec_type, NULL);
if (decoder) {
has_codec = TRUE;
decoder->destroy(decoder);
}
return has_codec;
}
示例5: ReleaseBuffer
static void ReleaseBuffer(AVCodecContext *Context, AVFrame *Frame)
{
if (Frame->type == FF_BUFFER_TYPE_INTERNAL)
{
avcodec_default_release_buffer(Context, Frame);
return;
}
VideoDecoder *parent = static_cast<VideoDecoder*>(Context->opaque);
if (parent)
parent->ReleaseAVBuffer(Context, Frame);
else
LOG(VB_GENERAL, LOG_ERR, "Invalid context");
}
示例6: GetBuffer
static int GetBuffer(struct AVCodecContext *Context, AVFrame *Frame)
{
if (!Context->codec)
return -1;
if (!(Context->codec->capabilities & CODEC_CAP_DR1))
return avcodec_default_get_buffer(Context, Frame);
VideoDecoder *parent = static_cast<VideoDecoder*>(Context->opaque);
if (parent)
return parent->GetAVBuffer(Context, Frame);
LOG(VB_GENERAL, LOG_ERR, "Invalid context");
return -1;
}
示例7: main
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
QString file = "test.avi";
int idx = a.arguments().indexOf("-f");
if (idx > 0)
file = a.arguments().at(idx + 1);
QString decName("FFmpeg");
idx = a.arguments().indexOf("-vc");
if (idx > 0)
decName = a.arguments().at(idx + 1);
VideoDecoderId cid = VideoDecoderFactory::id(decName.toStdString());
if (cid <= 0) {
qWarning("Can not find decoder: %s", decName.toUtf8().constData());
return 1;
}
VideoDecoder *dec = VideoDecoderFactory::create(cid);
AVDemuxer demux;
if (!demux.loadFile(file)) {
qWarning("Failed to load file: %s", file.toUtf8().constData());
return 1;
}
dec->setCodecContext(demux.videoCodecContext());
dec->prepare();
dec->open();
QElapsedTimer timer;
timer.start();
int count = 0;
VideoFrame frame;
while (!demux.atEnd()) {
if (!demux.readFrame())
continue;
if (dec->decode(demux.packet()->data)) {
/*
* TODO: may contains more than 1 frames
* map from gpu or not?
*/
//frame = dec->frame().clone();
count++;
}
}
qint64 elapsed = timer.elapsed();
int msec = elapsed/1000LL;
qDebug("decoded frames: %d, time: %d, average speed: %d", count, msec, count/msec);
return 0;
}
示例8: vdec_release_frame
Vdec_ReturnType vdec_release_frame(struct VDecoder *dec, struct vdec_frame *frame)
{
VDEC_FRAME vdecFrame;
VideoDecoder *pDec = (VideoDecoder*)(dec->core);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: release_frame %p\n", frame);
if (NULL == dec || NULL == frame)
{
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: error: encountered NULL parameter vdec: 0x%x frame: 0x%x", (unsigned int)dec, (unsigned int)frame);
return VDEC_EFAILED;
}
//vdecFrame.pBuf = (VDEC_BYTE*)frame->phys;
vdecFrame.pBuf = (VDEC_BYTE*)frame->base;
vdecFrame.timestamp = (unsigned long long)frame->timestamp;
pDec->ReuseFrameBuffer(&vdecFrame);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: released_frame with ptr: 0x%x", (unsigned int)vdecFrame.pBuf);
return VDEC_SUCCESS;
}
示例9: FFPlayer
FFPlayer()
{
stream_ = new FFStream();
audio_decoder_ = new AudioDecoder();
video_decoder_ = new VideoDecoder();
video_decoder_->setOnDecodeFinished( bind(&FFPlayer::video_decoder_OnDecodeFinished, this, placeholders::_1) );
audio_renderer_ = new AudioRenderer();
video_renderer_ = new VideoRenderer();
scheduler_ = new Scheduler();
scheduler_->setOnTime( bind(&FFPlayer::scheduler_OnTimer, this) );
}
示例10: onData
virtual void onData(LiveTrack* track, uint8_t* p_buffer, int i_size,
int i_truncated_bytes, int64_t pts, int64_t dts) {
//std::cout << "Got Data. size = " << i_size << "; truncated bytes = " << i_truncated_bytes << "; pts = " << pts << "; dts = " << dts << std::endl;
//std::cout << "Got Data. size = " << i_size << "; pts = " << pts << std::endl;
int consumed;
if (track->getFormat().i_codec != VLC_CODEC_H264)
return;
//std::cout << "Got H264 Data. size = " << i_size << "; truncated bytes = " << i_truncated_bytes << "; NAL type = " << (int)(p_buffer[4] & 0x1f) << std::endl;
if (!decoder) {
decoder = new VideoDecoder();
decoder->openCodec(0);
if (track->getFormat().p_extra) {
decoder->decode(track->getFormat().p_extra,
track->getFormat().i_extra, consumed);
}
}
uint8_t* tmp = p_buffer;
int left = i_size;
while (left) {
AVFrame* ret = decoder->decode(tmp, left, consumed);
if (ret) {
av_frame_unref(ret);
#ifdef TEST_MULTI_CLIENT
std::cout << "client " << this << " got frame!!!\n";
#endif
}
tmp += consumed;
left -= consumed;
}
}
示例11: RecVideo
/****************************************
* RecVideo
* Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecVideo()
{
//Coders
VideoDecoder* decoder = NULL;
VideoEncoder* encoder = VideoCodecFactory::CreateEncoder(VideoCodec::SORENSON);
//Create new video frame
RTMPVideoFrame frame(0,262143);
//Set codec
frame.SetVideoCodec(RTMPVideoFrame::FLV1);
int width=0;
int height=0;
DWORD numpixels=0;
Log(">RecVideo\n");
//Mientras tengamos que capturar
while(receivingVideo)
{
///Obtenemos el paquete
RTPPacket* packet = rtpVideo.GetPacket();
//Check
if (!packet)
//Next
continue;
//Get type
VideoCodec::Type type = (VideoCodec::Type)packet->GetCodec();
if ((decoder==NULL) || (type!=decoder->type))
{
//Si habia uno nos lo cargamos
if (decoder!=NULL)
delete decoder;
//Creamos uno dependiendo del tipo
decoder = VideoCodecFactory::CreateDecoder(type);
//Check
if (!decoder)
{
delete(packet);
continue;
}
}
//Lo decodificamos
if(!decoder->DecodePacket(packet->GetMediaData(),packet->GetMediaLength(),0,packet->GetMark()))
{
delete(packet);
continue;
}
//Get mark
bool mark = packet->GetMark();
//Delete packet
delete(packet);
//Check if it is last one
if(!mark)
continue;
//Check size
if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
{
//Get dimension
width = decoder->GetWidth();
height = decoder->GetHeight();
//Set size
numpixels = width*height*3/2;
//Set also frame rate and bps
encoder->SetFrameRate(25,300,500);
//Set them in the encoder
encoder->SetSize(width,height);
}
//Encode next frame
VideoFrame *encoded = encoder->EncodeFrame(decoder->GetFrame(),numpixels);
//Check
if (!encoded)
break;
//Check size
if (frame.GetMaxMediaSize()<encoded->GetLength())
//Not enougth space
return Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());
//Get full frame
frame.SetVideoFrame(encoded->GetData(),encoded->GetLength());
//.........这里部分代码省略.........
示例12: main
int main(int argc, char *argv[])
{
#if 0
QCoreApplication a(argc, argv);
return a.exec();
#endif
VideoDecoder* videoDecoder = new VideoDecoder;
VideoEncoder* videoEncoder = 0;
AdaboostClassifier* openClassifier = new AdaboostClassifier;
AdaboostClassifier* closedClassifier = new AdaboostClassifier;
HandyTracker tracker;
if ( argc != 5 )
{
printf("Usage: %s <open classifier> <closed classifier> <input video> <output video>\n", argv[0]);
return 0;
}
if ( !openClassifier->Load(argv[1]) )
{
fprintf(stderr, "Failed loading open classifier\n", argv[1]);
return 1;
}
if ( !tracker.SetOpenClassifier(openClassifier) )
{
fprintf(stderr, "Failed setting open classifier\n");
return 1;
}
if ( !closedClassifier->Load(argv[2]) )
{
fprintf(stderr, "Failed loading closed classifier\n", argv[2]);
return 1;
}
if ( !tracker.SetClosedClassifier(closedClassifier) )
{
fprintf(stderr, "Failed setting closed classifier\n");
return 1;
}
videoDecoder->SetFilename(argv[3]);
if ( !videoDecoder->Load() )
{
fprintf(stderr, "Failed loading video <%s>\n", argv[3]);
return 1;
}
if ( !videoDecoder->UpdateFrame() )
{
fprintf(stderr, "Failed updating frame\n");
return 1;
}
int frameNumber = 0;
bool trackingInitialized = false;
Image* img = videoDecoder->GetFrame();
while ( img )
{
if ( !videoEncoder )
{
videoEncoder = new VideoEncoder;
if ( !videoEncoder->Open(argv[4], img->GetWidth(), img->GetHeight(), 25) )
{
fprintf(stderr, "Failed opening output video <%s>\n", argv[4]);
return 1;
}
}
ProcessFrame(img, &tracker, trackingInitialized, frameNumber);
if ( trackingInitialized )
DrawResults(img, &tracker, frameNumber);
videoEncoder->AddFrame(img);
if ( frameNumber > 1 )
tracker.PurgeRegion(frameNumber - 2);
frameNumber++;
videoDecoder->UpdateFrame();
img = videoDecoder->GetFrame();
}
videoEncoder->Close();
return 0;
}
示例13: vdec_post_input_buffer
Vdec_ReturnType vdec_post_input_buffer(struct VDecoder *dec, video_input_frame_info *frame, void *cookie)
{
QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: post_input data=%p len=%d cookie=%p\n", frame->data, frame->len, cookie);
#ifdef LOG_INPUT_BUFFERS
static int take_input = 1;
#endif
int fatal_err = 0;
/*checkBufAvail flag is needed since we do not need to checkout
* YUV/Slice buffer incase the NAL corresponds to same frame.
* This is required for multiple NALs in one input buffer
*/
bool checkBufAvail = true;
VDEC_INPUT_BUFFER input;
VideoDecoder *pDec = (VideoDecoder*)(dec->core);
VDEC_ERROR err = VDEC_ERR_EVERYTHING_FINE;
if (NULL == dec || NULL == frame || NULL == frame->data )
{
QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: error: encountered NULL parameter dec: 0x%x frame: 0x%x data: 0x%x\n",
(unsigned int)dec,
(unsigned int)frame,
(unsigned int)frame->data);
return VDEC_EFAILED;
}
input.buffer[0] = (unsigned char*)frame->data;
input.timestamp[0] = (long long)frame->timestamp;
input.buffer_size[0] = (unsigned long int)frame->len;
input.buffer_pos[0] = 0;
input.layers = 1;
input.eOSIndicator[0]= false;
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: received ts: %lld", frame->timestamp);
if (frame->timestamp < timestamp )
{
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: error: out of order stamp! %d < %d\n",
(int)(frame->timestamp&0xFFFFFFFF), timestamp);
}
timestamp = (int)frame->timestamp;
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: vdec_core_post_input. buffer_size[0]: %ld frame->flags: 0x%x\n",
input.buffer_size[0], frame->flags);
if (input.buffer_size[0] == 0 && frame->flags & FRAME_FLAG_EOS)
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Zero-length buffer with EOS bit set\n");
input.eOSIndicator[0] = true;
if(pDec)
err = pDec->EOS( );
else
err = VDEC_ERR_NULL_STREAM_ID;
if(VDEC_ERR_OUT_OF_BUFFERS == err) return VDEC_EOUTOFBUFFERS;
vdec_decoder_info->ctxt->buffer_done(vdec_decoder_info->ctxt, cookie);
if (VDEC_ERR_EVERYTHING_FINE == err) return VDEC_SUCCESS;
return VDEC_EFAILED;
}
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_core_post_input\n");
#ifdef LOG_INPUT_BUFFERS
if (take_input)
{
fwritex((unsigned char*)frame->data, frame->len, pInputFile);
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: frame %d frame->len %d\n", counter++, frame->len);
}
#endif
do {
QPERF_TIME(arm_decode, err = pDec->Decode( &input, checkBufAvail ));
if (VDEC_ERR_EVERYTHING_FINE != err)
{
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: vdec_decoder error: %d\n", (int)err);
if(VDEC_ERR_UNSUPPORTED_DIMENSIONS == err) {
fatal_err = 1;
break;
}
}
checkBufAvail = false;
} while( ( VDEC_ERR_EVERYTHING_FINE == err ) && ( 0 != input.buffer_size[0] ) );
#ifdef LOG_INPUT_BUFFERS
take_input = (err==14?0:1);
#endif
if(VDEC_ERR_OUT_OF_BUFFERS == err) return VDEC_EOUTOFBUFFERS;
vdec_input_buffer_release_cb_handler(pDec,&input,cookie);
if(VDEC_ERR_EVERYTHING_FINE == err) return VDEC_SUCCESS;
if(fatal_err) {
static struct vdec_frame frame;
memset(&frame, 0, sizeof(frame));
frame.flags |= FRAME_FLAG_FATAL_ERROR;
QPERF_END(frame_data);
vdec_decoder_info->ctxt->frame_done(vdec_decoder_info->ctxt, &frame);
}
return VDEC_EFAILED;
}
示例14: sizeof
struct VDecoder *vdec_open(struct vdec_context *ctxt)
{
struct VDecoder *dec = NULL;
VDEC_ERROR err = 0;
const VDEC_CONCURRENCY_CONFIG concurrencyConfig = VDEC_CONCURRENT_NONE;
VideoDecoder* pDec = NULL;
dec = (VDecoder*)calloc(1, sizeof(struct VDecoder));
if (!dec) {
return 0;
}
dec->ctxt = ctxt;
dec->width = ctxt->width;
dec->height = ctxt->height;
dec->ctxt->outputBuffer.numBuffers = ctxt->outputBuffer.numBuffers;
if(VDEC_SUCCESS != vdec_commit_memory(dec)) {
return 0;
}
QPERF_RESET(arm_decode);
QPERF_RESET(frame_data);
nFrameDoneCnt = 0;
nGoodFrameCnt = 0;
#ifdef PROFILE_DECODER
qperf_total_frame_cnt = 0;
#endif
vdec_output_frame_index = 0;
timestamp = 0;
int i;
VDEC_PARAMETER_DATA codeDetectionEnable;
codeDetectionEnable.startCodeDetection.bStartCodeDetectionEnable = false; // by default set to false; MPEG4 doesnt require it
QTV_MSG_PRIO2(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_open(). width: %d, height: %d\n", dec->width, dec->height);
vdec_decoder_info = dec;
QTV_MSG_PRIO3(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_open(). width: %d, height: %d kind[%s]\n",
vdec_decoder_info->ctxt->width, vdec_decoder_info->ctxt->height,
vdec_decoder_info->ctxt->kind);
if(!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.avc"))
{
dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnH264(&err));
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating H264 Decoder [%p]\n",dec->core);
VDEC_PARAMETER_DATA sizeOfNalLengthField;
sizeOfNalLengthField.sizeOfNalLengthField.sizeOfNalLengthField = dec->ctxt->size_of_nal_length_field;
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: NAL lenght [%d]\n",dec->ctxt->size_of_nal_length_field);
pDec = (VideoDecoder*)(dec->core);
if (0 == dec->ctxt->size_of_nal_length_field)
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: START CODE....\n");
codeDetectionEnable.startCodeDetection.bStartCodeDetectionEnable = true;
if(!pDec)
err = VDEC_ERR_NULL_STREAM_ID;
else
err = pDec->SetParameter(VDEC_PARM_START_CODE_DETECTION,&codeDetectionEnable);
if (VDEC_ERR_EVERYTHING_FINE != err)
{
// TBD- printx("[vdec_core] set start code detection parameter failed: %d", (int)err);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"[vdec_core] set start code detection parameter failed: %d", (int)err);
goto fail_initialize;
}
}
else if(dec->ctxt->size_of_nal_length_field > 0 && dec->ctxt->size_of_nal_length_field <= 4)
{
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: NALU LENGTH[%d]\n",dec->ctxt->size_of_nal_length_field);
// test size of NAL length field decoder support
if(!pDec)
err = VDEC_ERR_NULL_STREAM_ID;
else
err = pDec->SetParameter( VDEC_PARM_SIZE_OF_NAL_LENGTH_FIELD, &sizeOfNalLengthField );
if (VDEC_ERR_EVERYTHING_FINE != err)
{
// TBD- printx("[vdec_core] set start code detection parameter failed: %d", (int)err);
goto fail_initialize;
}
}
else
{
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: Invalid size of nal length field: %d\n", dec->ctxt->size_of_nal_length_field);
goto fail_core;
}
}
else if ((!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.mpeg4"))
|| (!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.h263")))
{
dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnMpeg4(&err));
pDec = (VideoDecoder*)(dec->core);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating MP4 Decoder [%p]\n",dec->core);
}
else if (!strcmp(vdec_decoder_info->ctxt->kind,"OMX.qcom.video.decoder.vc1"))
{
dec->core = reinterpret_cast<VDEC_STREAM_ID>(pCreateFnWmv(&err));
pDec = (VideoDecoder*)(dec->core);
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: Creating WMV Decoder [%p]\n",dec->core);
}
else
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"Incorrect codec kind\n");
goto fail_core;
}
//.........这里部分代码省略.........
示例15: vdec_close
Vdec_ReturnType vdec_close(struct VDecoder *dec)
{
VDEC_ERROR err;
VideoDecoder* pDec = (VideoDecoder*)(dec->core);
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: vdec_close()\n");
#ifdef PROFILE_DECODER
usecs_t time_taken_by_arm = QPERF_TERMINATE(arm_decode);
float avArmTime = (float)time_taken_by_arm/(qperf_total_frame_cnt*1000);
usecs_t frame_data_time = QPERF_TERMINATE(frame_data);
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL," Arm Statistics \n");
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Total number of frames decoded = %ld\n",qperf_total_frame_cnt);
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Average Arm time/frame(ms) = %f\n",avArmTime);
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frames Arm Decoded/sec = %f\n",1000/avArmTime);
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL," Frame Done Statistics \n");
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frame done cumulative time = %lld\n",frame_data_time);
QTV_PERF_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"Frames Done per second = %f\n",(float)(qperf_total_frame_cnt-1)*1000000/frame_data_time);
QTV_PERF_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_FATAL,"===========================================================\n");
#endif
#ifdef LOG_YUV_FRAMES
if (pYUVFile)
{
fclose (pYUVFile);
pYUVFile = NULL;
}
#endif
#ifdef LOG_INPUT_BUFFERS
if (pInputFile)
{
fclose (pInputFile);
}
#endif
vdec_output_frame_index = 0;
#if NEED_VDEC_LP
if (vdec->fake)
{
//jlk - adsp_close() calls adsp_disable right now. Calling adsp_disable() twice causes problems
//Renable this line when we fix the kernel driver
//adsp_disable((adsp_module*)vdec->fake);
adsp_close((adsp_module*)vdec->fake);
}
else
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: adsp modules is NULL\n");
}
#endif
nFrameDoneCnt = 0;
nGoodFrameCnt = 0;
if (dec->core)
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: calling Suspend");
err = pDec->Suspend( );
if (VDEC_ERR_EVERYTHING_FINE != err)
{
QTV_MSG_PRIO1(QTVDIAG_GENERAL,QTVDIAG_PRIO_ERROR,"vdec: Suspend returned error: %d\n", (int)err);
}
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_LOW,"vdec: calling vdec_destroy");
QTV_Delete( (VideoDecoder*)(dec->core) );
}
else
{
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_HIGH,"vdec: core is NULL");
}
pmem_free(&dec->arena);
free(dec);
QTV_MSG_PRIO(QTVDIAG_GENERAL,QTVDIAG_PRIO_MED,"vdec: closed\n");
return VDEC_SUCCESS;
}