当前位置: 首页>>代码示例>>C++>>正文


C++ packet_queue_get函数代码示例

本文整理汇总了C++中packet_queue_get函数的典型用法代码示例。如果您正苦于以下问题:C++ packet_queue_get函数的具体用法?C++ packet_queue_get怎么用?C++ packet_queue_get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了packet_queue_get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: audio_decode_frame

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size)
{
    static AVPacket pkt;
    static uint8_t *audio_pkt_data = NULL;
    static int audio_pkt_size = 0;
    int len1, data_size;
    for(;;)
    {
        if(packet_queue_get(audioq, &pkt, 1) < 0)
        {
            return -1;
        }
        audio_pkt_data = pkt.data;
        audio_pkt_size = pkt.size;
        while(audio_pkt_size > 0)
        {
            data_size = buf_size;
            len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size,audio_pkt_data, audio_pkt_size);
            if(len1 < 0)
            {
                audio_pkt_size = 0;
                break;
            }
            audio_pkt_data += len1;
            audio_pkt_size -= len1;
            if(data_size <= 0)
            {
                continue;
            }
            return data_size;
        }
        if(pkt.data)
            av_free_packet(&pkt);
   }
}
开发者ID:kestiny,项目名称:Demos,代码行数:35,代码来源:videoplayer.cpp

示例2: video_thread

int video_thread(void *arg) {
        VideoState *is = (VideoState *) arg;
        AVPacket pkt1, *packet = &pkt1;
        //int len1;
        int frameFinished;
        AVFrame *pFrame;

        pFrame = av_frame_alloc();

        for (;;) {
                if (packet_queue_get(&is->videoq, packet, 1) < 0) {
                        // means we quit getting packets
                        break;
                }
                // Decode video frame
                //len1 =
                avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
                                packet);

                // Did we get a video frame?
                if (frameFinished) {
                        if (queue_picture(is, pFrame) < 0) {
                                break;
                        }
                }
                av_free_packet(packet);
        }
        av_free(pFrame);
        return 0;
}
开发者ID:GNUDimarik,项目名称:dranger-ffmpeg-tuto,代码行数:30,代码来源:tutorial04.c

示例3: audio_decode_frame

int audio_decode_frame(VideoState *is, double *pts_ptr) {

    int len1, data_size = 0, n;
    AVPacket *pkt = &is->audio_pkt;
    double pts;

    for (; ;) {
        while (is->audio_pkt_size > 0) {
            int got_frame = 0;
            len1 = avcodec_decode_audio4(is->audio_st->codec, &is->audio_frame, &got_frame, pkt);
            if (len1 < 0) {
                /* if error, skip frame */
                is->audio_pkt_size = 0;
                break;
            }
            if (got_frame) {
                data_size =
                        av_samples_get_buffer_size
                                (
                                        NULL,
                                        is->audio_st->codec->channels,
                                        is->audio_frame.nb_samples,
                                        is->audio_st->codec->sample_fmt,
                                        1
                                );
                memcpy(is->audio_buf, is->audio_frame.data[0], data_size);
            }
            is->audio_pkt_data += len1;
            is->audio_pkt_size -= len1;
            if (data_size <= 0) {
                /* No data yet, get more frames */
                continue;
            }
            pts = is->audio_clock;
            *pts_ptr = pts;
            n = 2 * is->audio_st->codec->channels;
            is->audio_clock += (double) data_size /
                    (double) (n * is->audio_st->codec->sample_rate);

            /* We have data, return it and come back for more later */
            return data_size;
        }
        if (pkt->data)
            av_free_packet(pkt);

        if (is->quit) {
            return -1;
        }
        /* next packet */
        if (packet_queue_get(&is->audioq, pkt, 1) < 0) {
            return -1;
        }
        is->audio_pkt_data = pkt->data;
        is->audio_pkt_size = pkt->size;
        /* if update, update the audio clock w/pts */
        if (pkt->pts != AV_NOPTS_VALUE) {
            is->audio_clock = av_q2d(is->audio_st->time_base) * pkt->pts;
        }
    }
}
开发者ID:Akagi201,项目名称:learning-ffmpeg,代码行数:60,代码来源:main.c

示例4: fprintf

void Player::ScheduleNextFrame(bool prerolling)
{
    AVPacket pkt;
    AVPicture picture;

    if (serial_fd > 0 && packet_queue_get(&dataqueue, &pkt, 0)) {
        if (pkt.data[0] != ' '){
            fprintf(stderr,"written %.*s  \n", pkt.size, pkt.data);
            write(serial_fd, pkt.data, pkt.size);
        }
        av_free_packet(&pkt);
    }

    if (packet_queue_get(&videoqueue, &pkt, 1) < 0)
        return;

    IDeckLinkMutableVideoFrame *videoFrame;
    m_deckLinkOutput->CreateVideoFrame(m_frameWidth,
                                       m_frameHeight,
                                       m_frameWidth * 2,
                                       pix,
                                       bmdFrameFlagDefault,
                                       &videoFrame);
    void *frame;
    int got_picture;
    videoFrame->GetBytes(&frame);

    avcodec_decode_video2(video_st->codec, avframe, &got_picture, &pkt);
    if (got_picture) {
        avpicture_fill(&picture, (uint8_t *)frame, pix_fmt,
                       m_frameWidth, m_frameHeight);

        sws_scale(sws, avframe->data, avframe->linesize, 0, avframe->height,
                  picture.data, picture.linesize);

        if (m_deckLinkOutput->ScheduleVideoFrame(videoFrame,
                                                 pkt.pts *
                                                 video_st->time_base.num,
                                                 pkt.duration *
                                                 video_st->time_base.num,
                                                 video_st->time_base.den) !=
            S_OK)
            fprintf(stderr, "Error scheduling frame\n");
    }
    videoFrame->Release();
    av_free_packet(&pkt);
}
开发者ID:djlancelot,项目名称:bmdtools,代码行数:47,代码来源:bmdplay.cpp

示例5: LOGE

void *video_thread(void *arg) {
  JNIEnv *env;
  if((*g_jvm)->AttachCurrentThread(g_jvm, &env, NULL) != JNI_OK) {
       LOGE(1, "### start video thead error");
	   return;
  }
  VideoState *is = (VideoState*)arg;
  AVPacket pkt1, *packet = &pkt1;
  int len1, frameFinished;
  AVFrame *pFrame;
  double pts;
  int numBytes;
  pFrame=avcodec_alloc_frame();
  int ret;
  for(;;) {
	if(is->quit == 1 || is->quit == 2) {
		break;
	}
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
	  if(debug) LOGI(10,"video_thread get packet exit");
      break;
    }
    pts = 0;
    global_video_pkt_pts = packet->pts;
    len1 = avcodec_decode_video2(is->video_st->codec,
				pFrame,
				&frameFinished,
				packet);	
    if(packet->dts == AV_NOPTS_VALUE
       && pFrame->opaque
       && *(uint64_t*)pFrame->opaque
       != AV_NOPTS_VALUE) {
      pts = *(uint64_t*) pFrame->opaque;
    } else if (packet->dts != AV_NOPTS_VALUE) {
      pts = packet->dts;
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);
	//pts *= av_q2d(pCodecCtx->time_base);
    if (frameFinished) {
       pts = synchronize_video(is, pFrame, pts);
       if (queue_picture(is, pFrame, pts) < 0) {
			break;
       }
    }
    av_free_packet(packet);
  }
  av_free(pFrame);
  if((*g_jvm)->DetachCurrentThread(g_jvm) != JNI_OK) {
	LOGE(1,"### detach video thread error");
  }
  pthread_exit(0);
  if(debug) {
		LOGI(1,"### video_thread exit");
  }
  return ((void *)0);
}
开发者ID:dalvik,项目名称:Drovik,代码行数:58,代码来源:ffmpeg-jni.c

示例6: audio_decode_frame

int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr) {

  int len1, data_size, n;
  AVPacket *pkt = &is->audio_pkt;
  double pts;

  for(;;) {
    while(is->audio_pkt_size > 0) {
      data_size = buf_size;
      len1 = avcodec_decode_audio3(is->audio_ctx, 
				  (int16_t *)audio_buf, &data_size, pkt);
				  //is->audio_pkt_data, is->audio_pkt_size);

      //len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, 
			//	  audio_pkt_data, audio_pkt_size);
      if(len1 < 0) {
        // if error, skip frame
        is->audio_pkt_size = 0;
        break;
      }
      is->audio_pkt_data += len1;
      is->audio_pkt_size -= len1;
      if(data_size <= 0) {
        // No data yet, get more frames
        continue;
      }
      pts = is->audio_clock;
      *pts_ptr = pts;
      //n = 2 * is->audio_ctx->channels;
      n = 2;
      is->audio_clock += (double)data_size /
      (double)(n * is->audio_ctx->sample_rate);

      // We have data, return it and come back for more later
      return data_size;
    }
    if(pkt->data)
      av_free_packet(pkt);

    if(is->quit) {
      return -1;
    }

    // next packet
    if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
      return -1;
    }

    is->audio_pkt_data = pkt->data;
    is->audio_pkt_size = pkt->size;

    // if update, update the audio clock w/pts
    if(pkt->pts != AV_NOPTS_VALUE) {
      is->audio_clock = av_q2d(is->audio_ctx->time_base)*pkt->pts;
    }

  }
}
开发者ID:26597925,项目名称:SmileTime,代码行数:58,代码来源:player.c

示例7: audio_decode_frame

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf,  
					   int buf_size) {  

						   static AVPacket pkt;
						   static uint8_t *audio_pkt_data = NULL;
						   static int audio_pkt_size = 0;

						   int len1, data_size = 0;

						   for(;;) {
							   while(audio_pkt_size > 0) {
								   int got_frame = 0;
								   len1 = avcodec_decode_audio4
									   (aCodecCtx, m_audio_frame, &got_frame, &pkt);

								   if(len1 < 0) 
								   {
									   audio_pkt_size = 0;
									   break;
								   }

								   audio_pkt_data += len1;
								   audio_pkt_size -= len1;
								   data_size = 0;

								   if(got_frame) 
								   {
									   data_size = 
										   av_samples_get_buffer_size(NULL, aCodecCtx->channels,
										   m_audio_frame->nb_samples,
										   aCodecCtx->sample_fmt, 1);
									   //assert(data_size <= buf_size);
									   memcpy(audio_buf, m_audio_frame->data[0], data_size);
								   }

								   if(data_size <= 0)
								   {
									   /* No data yet, get more frames */
									   continue;
								   }
								   /* We have data, return it and come back for more later */
								   return data_size;
							   }

							   if(pkt.data)
								   av_packet_unref(&pkt);

							   if(quit) {
								   return -1;
							   }

							   if(packet_queue_get(&m_audio_q, &pkt, 1) < 0) {
								   return -1;
							   }
							   audio_pkt_data = pkt.data;
							   audio_pkt_size = pkt.size;
						   }
}  
开发者ID:hoseogame,项目名称:20101898_SDL,代码行数:58,代码来源:DWVideo.cpp

示例8: audio_decode_frame

int audio_decode_frame( void )
{
        //printf( "In Audio decode frame : Thread \n" );
        SDL_Event quit_audio_event;

        quit_audio_event.type = QUIT_AUDIO_EVENT;

        PacketQueue *pAQueue = &( gMedia->audioQueue ) ;
        static AVPacket packet;
        AVFrame *pFrame = avcodec_alloc_frame() ;
        int pkt_bytes_decd = 0;
        int audio_data_size = 0;
        int frame_fin = 0;

        if ( packet.size == 0 ) {
                if ( !packet_queue_get( pAQueue, &packet ) ) {
                        SDL_PushEvent( &quit_audio_event );
                        av_free( pFrame );
                        return -1;
                }
        }

        while ( packet.size > 0 ) {
                // printf("Size of packet is %d\n",packet.size);
                pkt_bytes_decd = avcodec_decode_audio4( gMedia->aCodecContext,
                                                        pFrame,
                                                        &frame_fin,
                                                        &packet );

                printf( "%d bytes from packet decoded\n", pkt_bytes_decd );
                // printf("Format of Decoded frame is %d\n",pFrame->format);
                // printf("Format of audio is %d\n",pFrame->nb_samples);

                //               aud_frame_pts = pFrame->pkt_pts ;
                //printf( " audio frame : pts is %" PRId64 "\n", aud_frame_pts );

                if ( pkt_bytes_decd < 0 ) {
                        /* if error, skip packet */
                        break;
                }

                if ( frame_fin ) {
                        audio_data_size = create_channel_data( pFrame );
                        packet.size -= pkt_bytes_decd;
                        av_free( pFrame );
                        return audio_data_size ;
                }
        }

        /*        if ( pkt->pts != AV_NOPTS_VALUE ) {
                        gMedia->audio_clock = av_q2d( gMedia->pFormatContext->
                                                      streams[aud_stream_index] )
                                              * pkt->pts;
                }
        */
        return 1; //Never comes here
}
开发者ID:super11,项目名称:JSPlayer,代码行数:57,代码来源:audiofuncs.c

示例9: audio_decode_frame

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) 
{
	//ffplay_info("Start.\n");

	static AVPacket pkt;
	static uint8_t *audio_pkt_data = NULL;
	static int audio_pkt_size = 0;

	int len1, data_size;

	for(;;) 
	{
		while(audio_pkt_size > 0) 
		{
			data_size = buf_size;

			len1 = avcodec_decode_audio3(aCodecCtx, (int16_t *)audio_buf, &data_size, 
					  &pkt);
			ffplay_info("audio_buf = 0x%8x, data_size = %d, pkt = 0x%8x\n",audio_buf,data_size,&pkt);
			if(len1 < 0) 
			{
				/* if error, skip frame */
				audio_pkt_size = 0;
				break;
			}
			audio_pkt_data += len1;
			audio_pkt_size -= len1;
			if(data_size <= 0) 
			{
				/* No data yet, get more frames */
				continue;
			}
			/* We have data, return it and come back for more later */
			return data_size;
		}
		if(pkt.data)
		{
			ffplay_info("Here.\n");
			av_free_packet(&pkt);
		}
		if(quit) 
		{
			ffplay_info("Here.\n");
			return -1;
		}

		if(packet_queue_get(&audioq, &pkt, 1) < 0) 
		{
			ffplay_info("Here.\n");
			return -1;
		}
		audio_pkt_data = pkt.data;
		audio_pkt_size = pkt.size;
	}
	//ffplay_info("end.\n");
}
开发者ID:beizhong2501,项目名称:ffmpeg_tutorial_modify,代码行数:56,代码来源:ffmpeg_tutorial_modify_03.c

示例10: video_thread

int video_thread(void *arg) {
    VideoState *is = (VideoState *)arg;
    AVPacket pkt1, *packet = &pkt1;
    int frameFinished;
    AVFrame *pFrame;
    double pts;

    pFrame = avcodec_alloc_frame();

    for(;;) {
        if(packet_queue_get(&is->videoq, packet, 1) < 0) {
            // means we quit getting packets
            break;
        }

        if(packet->data == flush_pkt.data) {
            avcodec_flush_buffers(is->video_st->codec);
            continue;
        }

        pts = 0;

        // Save global pts to be stored in pFrame in first call
        global_video_pkt_pts = packet->pts;
        // Decode video frame
        avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
                              packet);

        if(packet->dts == AV_NOPTS_VALUE
                && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
            pts = *(uint64_t *)pFrame->opaque;

        } else if(packet->dts != AV_NOPTS_VALUE) {
            pts = packet->dts;

        } else {
            pts = 0;
        }

        pts *= av_q2d(is->video_st->time_base);

        // Did we get a video frame?
        if(frameFinished) {
            pts = synchronize_video(is, pFrame, pts);

            if(queue_picture(is, pFrame, pts) < 0) {
                break;
            }
        }

        av_free_packet(packet);
    }

    av_free(pFrame);
    return 0;
}
开发者ID:elad-perets,项目名称:dranger_ffmpeg_ubuntu_trusty,代码行数:56,代码来源:tutorial07.c

示例11: audio_decode_frame

int audio_decode_frame(AVCodecContext *aCodecCtx, AVPacket *pkt, AVPacket *pkt_temp, AVFrame *frame, uint8_t *audio_buf)
{
	int len1, data_size;
	int got_frame = 0;
	int new_packet = 0;

	while(1)
	{
		while(pkt_temp->size > 0 || (!pkt_temp->data && new_packet))
		{
			if(!frame)
			{
				if(!(frame = avcodec_alloc_frame()))
					return AVERROR(ENOMEM);
			}
			else
			{
				avcodec_get_frame_defaults(frame);
			}
			new_packet = 0;
			got_frame = 0;
			len1 = avcodec_decode_audio4(aCodecCtx, frame, &got_frame, pkt_temp);
			if(len1 < 0)
			{
				/*if error, skip frame*/
				pkt_temp->size = 0;
				av_free_packet(pkt_temp);
				continue;
			}
			pkt_temp->data += len1;
			pkt_temp->size -= len1;
			if(!got_frame)
			{
				/*stop sending empty packets if the decoder is finished*/
				continue;
				//break;
			}
			data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, frame->nb_samples, aCodecCtx->sample_fmt, 1);
			memcpy(audio_buf, frame->data[0], frame->linesize[0]);
			return data_size;
		}
		if(pkt->data)
			av_free_packet(pkt);
		memset(pkt_temp, 0, sizeof(*pkt_temp));
		if(quit)
		{
			return -1;
		}
		if((new_packet = packet_queue_get(&audioq, pkt, 1)) < 0)
		{
			return -1;
		}
		*pkt_temp = *pkt;
	}
}
开发者ID:elmagroud00,项目名称:Experiments,代码行数:55,代码来源:main.c

示例12: video_thread

int video_thread(void *arg) {
  VideoState *is = (VideoState *)arg;
  AVPacket pkt1, *packet = &pkt1;
  int len1, frameFinished;
  AVFrame *pFrame;
  double pts;

  pFrame = avcodec_alloc_frame();

  is->rgbaFrame = avcodec_alloc_frame();
  avpicture_alloc ((AVPicture *)is->rgbaFrame, PIX_FMT_RGBA, is->video_st->codec->width, is->video_st->codec->height);


  for(;;) {
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    pts = 0;

    // Save global pts to be stored in pFrame
    global_video_pkt_pts = packet->pts;
    // Decode video frame
    len1 = avcodec_decode_video2(is->video_st->codec, pFrame, &frameFinished,
        packet);
    if(packet->dts == AV_NOPTS_VALUE
       && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
      pts = (double)(*(uint64_t *)pFrame->opaque);
    } else if(packet->dts != AV_NOPTS_VALUE) {
		pts = (double)packet->dts;
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);


    // Did we get a video frame?
    if(frameFinished) {
      pts = synchronize_video(is, pFrame, pts);
      if(queue_picture(is, pFrame, pts) < 0) {
  break;
      }
    }
    av_free_packet(packet);
  }

  SDL_CloseAudio();

  av_free(pFrame);

  avpicture_free((AVPicture *)is->rgbaFrame);
  av_free(is->rgbaFrame);

  return 0;
}
开发者ID:pdpdds,项目名称:Win32OpenSourceSample,代码行数:55,代码来源:VideoPlayer.cpp

示例13: decode_audio_frame

int decode_audio_frame (PlayerContext *ctx, uint8_t **buf)
{
    static AVPacket pkt, cur_pkt;
    static AVFrame *frame;
    int got_frame, decoded_bytes;

    if (!frame)
    {
        frame = avcodec_alloc_frame ();
        if (!frame)
            return AVERROR (ENOMEM);
    }

    for (;;)
    {
        while (pkt.size > 0)
        {
            avcodec_get_frame_defaults (frame);

            decoded_bytes = avcodec_decode_audio4 (ctx->audio_codec,
                                                   frame, &got_frame,
                                                   &pkt);
            if (decoded_bytes < 0)
            {
                // error, skip the frame
                pkt.size = 0;
                break;
            }

            pkt.data += decoded_bytes;
            pkt.size -= decoded_bytes;

            *buf = frame->data[0];

            return av_samples_get_buffer_size(NULL,
                                              frame->channels,
                                              frame->nb_samples,
                                              frame->format, 1);
        }

        // free the current packet
        if (cur_pkt.data)
            av_free_packet (&cur_pkt);
        memset (&pkt, 0, sizeof (pkt));

        if (quit)
            return -1;

        // read next packet
        if (packet_queue_get (&ctx->audioq, &cur_pkt, 1) < 0)
            return -1;

        pkt = cur_pkt;
    }
}
开发者ID:bazurbat,项目名称:ffmpeg_tutorial,代码行数:55,代码来源:tutorial04.c

示例14: audio_decode_frame

int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {

	static AVPacket pkt;
	static uint8_t *audio_pkt_data = NULL;
	static int audio_pkt_size = 0;
	static AVFrame frame;
	int len1, resampled_data_size=0;

	for (;;) {
		while (audio_pkt_size > 0) {
			int got_frame = 0;
			len1 = avcodec_decode_audio4(aCodecCtx, &frame, &got_frame, &pkt);
			if (len1 < 0) {
				/* if error, skip frame */
				audio_pkt_size = 0;
				break;
			}
			audio_pkt_data += len1;
			audio_pkt_size -= len1;

			if (got_frame) {

				// ---------------

				//准备调用 swr_convert 的其他4个必须参数: out,out_samples_per_ch,in,in_samples_per_ch
				uint8_t **out = &audio_buf;
				const uint8_t **in = (const uint8_t **)frame.extended_data;
				//int out_samples_per_ch = buf_size/ (av_get_bytes_per_sample(AV_SAMPLE_FMT_S16)*2);
				//调用 swr_convert 进行转换
				int len2 = 0;
				len2 = swr_convert(swr_ctx, out, frame.nb_samples, in, frame.nb_samples);
				resampled_data_size = len2 * 2 * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);
				//memcpy(audio_buf, frame.data[0], data_size);

				// ----------------
				
			}
			/* We have data, return it and come back for more later */
			return resampled_data_size;
		}
		if (pkt.data)
			av_free_packet(&pkt);

		if (quit) {
			return -1;
		}

		if (packet_queue_get(&audioq, &pkt, 1) < 0) {
			return -1;
		}
		audio_pkt_data = pkt.data;
		audio_pkt_size = pkt.size;
	}
	
}
开发者ID:shileiz,项目名称:notes,代码行数:55,代码来源:tutorial03.01_ConvertRawAudio.cpp

示例15: video_thread

int video_thread(void *arg) {
  VideoState *is = (VideoState *)arg;
  AVPacket pkt1, *packet = &pkt1;
  int frameFinished;
  AVFrame *pFrame;
  double pts;

  pFrame = av_frame_alloc();

  for(;;) {
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    if(packet_queue_get(&is->videoq, packet, 1) < 0) {
      // means we quit getting packets
      break;
    }
    pts = 0;

    // Decode video frame
    avcodec_decode_video2(is->video_ctx, pFrame, &frameFinished, packet);

    if((pts = av_frame_get_best_effort_timestamp(pFrame)) == AV_NOPTS_VALUE) {
      pts = av_frame_get_best_effort_timestamp(pFrame);
    } else {
      pts = 0;
    }
    pts *= av_q2d(is->video_st->time_base);

    // Did we get a video frame?
    if(frameFinished) {
      pts = synchronize_video(is, pFrame, pts);
      if(queue_picture(is, pFrame, pts) < 0) {
	break;
      }
    }
    av_free_packet(packet);
  }
  av_frame_free(&pFrame);
  return 0;
}
开发者ID:shileiz,项目名称:notes,代码行数:42,代码来源:tutorial07.c


注:本文中的packet_queue_get函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。