本文整理汇总了C++中MediaSubsession::fmtp_spropparametersets方法的典型用法代码示例。如果您正苦于以下问题:C++ MediaSubsession::fmtp_spropparametersets方法的具体用法?C++ MediaSubsession::fmtp_spropparametersets怎么用?C++ MediaSubsession::fmtp_spropparametersets使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MediaSubsession
的用法示例。
在下文中一共展示了MediaSubsession::fmtp_spropparametersets方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: bad_alloc
DefaultSink::DefaultSink(UsageEnvironment & env,
MediaSubsession & subsession,
char const * sink_url,
char const * stream_id)
: MediaSink(env),
_subsession(subsession),
_receive_buffer(RECEIVE_BUFFER_SIZE),
_stream_id(stream_id),
_writer(libc2rtsp::sink::SinkFactory().gen(sink_url)),
_verbose(true),
_sprop_parameter_sets(),
_have_written_first_frame(false)
{
if (::strcmp(subsession.codecName(), "H264") == 0) {
// For H.264 video stream, we use a special sink that adds 'start codes',
// and (at the start) the SPS and PPS NAL units:
_sprop_parameter_sets.emplace_back(std::string(subsession.fmtp_spropparametersets()));
} else if (::strcmp(subsession.codecName(), "H265") == 0) {
// For H.265 video stream, we use a special sink that adds 'start codes',
// and (at the start) the VPS, SPS, and PPS NAL units:
_sprop_parameter_sets.emplace_back(std::string(subsession.fmtp_spropvps())); // VPS
_sprop_parameter_sets.emplace_back(std::string(subsession.fmtp_spropsps())); // SPS
_sprop_parameter_sets.emplace_back(std::string(subsession.fmtp_sproppps())); // PPS
} else {
crLogE("DefaultSink::DefaultSink() Unsupported subsession: {}/{}",
subsession.mediumName(), subsession.codecName());
throw std::bad_alloc();
}
}
示例2: runtime_error
StreamMediaSink::StreamMediaSink(UsageEnvironment& env, MediaSubsession& subsession, char const* streamId)
: MediaSink(env)
, m_fSubsession(subsession)
, m_idx(0)
, m_avCodec(NULL)
, m_avCodecContext(NULL)
, m_avFrame(NULL)
, m_bmp(NULL)
, m_screen(NULL)
, img_convert_ctx(NULL)
{
if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_TIMER)) {
std::stringstream ss;
ss << "Could not initialize SDL - " << SDL_GetError();
throw std::runtime_error(ss.str().c_str());
}
m_fStreamId = strDup(streamId);
m_buffer = new u_int8_t[MEDIA_SINK_RECEIVE_BUFFER_SIZE + 4];
av_init_packet(&m_avPacket);
//m_avPacket.flags |= AV_PKT_FLAG_KEY;
//m_avPacket.pts = m_avPacket.dts = 0;
m_avCodec = avcodec_find_decoder(AV_CODEC_ID_H264);
if (!m_avCodec) {
throw std::runtime_error("Failed to find H264 ffmpeg codec");
}
m_avCodecContext = avcodec_alloc_context3(m_avCodec);
if (!m_avCodecContext) {
throw std::runtime_error("Failed to allocate codec context");
}
m_avCodecContext->pix_fmt = PIX_FMT_YUV420P;
//m_avCodecContext->flags |= CODEC_FLAG2_CHUNKS;
//m_avCodecContext->thread_count = 4;
if (m_avCodec->capabilities & CODEC_CAP_TRUNCATED) {
m_avCodecContext->flags |= CODEC_FLAG_TRUNCATED;
}
if (avcodec_open2(m_avCodecContext, m_avCodec, NULL) < 0) {
throw std::runtime_error("Failed to open codec");
}
m_avFrame = av_frame_alloc();
if (!m_avFrame) {
throw std::runtime_error("Failed to allocate video frame");
}
m_screen = SDL_SetVideoMode(m_fSubsession.videoWidth(), m_fSubsession.videoHeight(), 0, 0);
if (!m_screen) {
throw std::runtime_error("SDL: could not set video mode - exiting");
}
// Allocate a place to put our YUV image on that screen
m_bmp = SDL_CreateYUVOverlay(m_screen->w, m_screen->h, SDL_YV12_OVERLAY, m_screen);
if (img_convert_ctx == NULL) {
int w = m_screen->w;
int h = m_screen->h;
img_convert_ctx = sws_getContext(w, h, m_avCodecContext->pix_fmt, w, h, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
}
const u_int8_t start_code[] = {0x00, 0x00, 0x00, 0x01};
u_int8_t idx = 0;
#if 0
unsigned int n_records = 0;
const char* sps = subsession.fmtp_spropparametersets();
envir() << "SPS: " << sps << "\n";
SPropRecord* pSPropRecord = parseSPropParameterSets(sps, n_records);
for (int i = 0; i < n_records; ++i) {
memcpy(&m_buffer[idx], start_code, 4);
memcpy(&m_buffer[idx + 4], pSPrpoRecord[i].sPropBytes, pSPropBytes[i].sPropLength);
idx += 4 + pSPropBytes[i].sPropLength;
m_avPacket.size += 4 + pSPropBytes[i].sPropLength;
}
m_avPacket.data = m_buffer;
int p = 0;
int l = avcodec_decode_video2(m_avCodecContext, m_avFrame, &p, &m_avPacket);
#endif
memcpy(&m_buffer[idx], &start_code, 4);
idx += 4;
m_fReceiveBuffer = &m_buffer[idx];
}