本文整理汇总了C++中GST_AUDIO_DECODER函数的典型用法代码示例。如果您正苦于以下问题:C++ GST_AUDIO_DECODER函数的具体用法?C++ GST_AUDIO_DECODER怎么用?C++ GST_AUDIO_DECODER使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GST_AUDIO_DECODER函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gst_amc_audio_dec_init
static void
gst_amc_audio_dec_init (GstAmcAudioDec * self)
{
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (self), TRUE);
gst_audio_decoder_set_drainable (GST_AUDIO_DECODER (self), TRUE);
g_mutex_init (&self->drain_lock);
g_cond_init (&self->drain_cond);
}
示例2: gst_ffmpegauddec_init
static void
gst_ffmpegauddec_init (GstFFMpegAudDec * ffmpegdec)
{
GstFFMpegAudDecClass *klass =
(GstFFMpegAudDecClass *) G_OBJECT_GET_CLASS (ffmpegdec);
/* some ffmpeg data */
ffmpegdec->context = avcodec_alloc_context3 (klass->in_plugin);
ffmpegdec->opened = FALSE;
gst_audio_decoder_set_drainable (GST_AUDIO_DECODER (ffmpegdec), TRUE);
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (ffmpegdec), TRUE);
}
示例3: gst_ffmpegauddec_drain
static void
gst_ffmpegauddec_drain (GstFFMpegAudDec * ffmpegdec)
{
GstFFMpegAudDecClass *oclass;
oclass = (GstFFMpegAudDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
if (oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
gint have_data, len;
GST_LOG_OBJECT (ffmpegdec,
"codec has delay capabilities, calling until libav has drained everything");
do {
GstFlowReturn ret;
len = gst_ffmpegauddec_frame (ffmpegdec, NULL, 0, &have_data, &ret);
} while (len >= 0 && have_data == 1);
avcodec_flush_buffers (ffmpegdec->context);
}
if (ffmpegdec->outbuf)
gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (ffmpegdec),
ffmpegdec->outbuf, 1);
ffmpegdec->outbuf = NULL;
}
示例4: gst_dtsdec_renegotiate
static gboolean
gst_dtsdec_renegotiate (GstDtsDec * dts)
{
gint channels;
gboolean result = FALSE;
GstAudioChannelPosition from[6], to[6];
GstAudioInfo info;
channels = gst_dtsdec_channels (dts->using_channels, from);
if (!channels)
goto done;
GST_INFO_OBJECT (dts, "dtsdec renegotiate, channels=%d, rate=%d",
channels, dts->sample_rate);
memcpy (to, from, sizeof (GstAudioChannelPosition) * channels);
gst_audio_channel_positions_to_valid_order (to, channels);
gst_audio_get_channel_reorder_map (channels, from, to,
dts->channel_reorder_map);
gst_audio_info_init (&info);
gst_audio_info_set_format (&info,
SAMPLE_TYPE, dts->sample_rate, channels, (channels > 1 ? to : NULL));
if (!gst_audio_decoder_set_output_format (GST_AUDIO_DECODER (dts), &info))
goto done;
result = TRUE;
done:
return result;
}
示例5: gst_mulawdec_init
static void
gst_mulawdec_init (GstMuLawDec * mulawdec)
{
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (mulawdec), TRUE);
gst_audio_decoder_set_use_default_pad_acceptcaps (GST_AUDIO_DECODER_CAST
(mulawdec), TRUE);
GST_PAD_SET_ACCEPT_TEMPLATE (GST_AUDIO_DECODER_SINK_PAD (mulawdec));
}
示例6: gst_droidadec_init
static void
gst_droidadec_init (GstDroidADec * dec)
{
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (dec), TRUE);
gst_audio_decoder_set_drainable (GST_AUDIO_DECODER (dec), TRUE);
dec->codec = NULL;
dec->codec_type = NULL;
dec->downstream_flow_ret = GST_FLOW_OK;
dec->eos = FALSE;
dec->codec_data = NULL;
dec->channels = 0;
dec->rate = 0;
g_mutex_init (&dec->eos_lock);
g_cond_init (&dec->eos_cond);
dec->adapter = gst_adapter_new ();
}
示例7: gst_wavpack_dec_init
static void
gst_wavpack_dec_init (GstWavpackDec * dec)
{
dec->context = NULL;
dec->stream_reader = gst_wavpack_stream_reader_new ();
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (dec), TRUE);
gst_wavpack_dec_reset (dec);
}
示例8: gst_opus_dec_init
static void
gst_opus_dec_init (GstOpusDec * dec)
{
dec->use_inband_fec = FALSE;
dec->apply_gain = DEFAULT_APPLY_GAIN;
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (dec), TRUE);
gst_opus_dec_reset (dec);
}
示例9: gst_sbc_dec_init
static void
gst_sbc_dec_init (GstSbcDec * dec)
{
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (dec), TRUE);
gst_audio_decoder_set_use_default_pad_acceptcaps (GST_AUDIO_DECODER_CAST
(dec), TRUE);
GST_PAD_SET_ACCEPT_TEMPLATE (GST_AUDIO_DECODER_SINK_PAD (dec));
dec->samples_per_frame = 0;
dec->frame_len = 0;
}
示例10: gst_ffmpegauddec_init
static void
gst_ffmpegauddec_init (GstFFMpegAudDec * ffmpegdec)
{
GstFFMpegAudDecClass *klass =
(GstFFMpegAudDecClass *) G_OBJECT_GET_CLASS (ffmpegdec);
/* some ffmpeg data */
ffmpegdec->context = avcodec_alloc_context3 (klass->in_plugin);
ffmpegdec->context->opaque = ffmpegdec;
ffmpegdec->opened = FALSE;
ffmpegdec->frame = av_frame_alloc ();
GST_PAD_SET_ACCEPT_TEMPLATE (GST_VIDEO_DECODER_SINK_PAD (ffmpegdec));
gst_audio_decoder_set_use_default_pad_acceptcaps (GST_AUDIO_DECODER_CAST
(ffmpegdec), TRUE);
gst_audio_decoder_set_drainable (GST_AUDIO_DECODER (ffmpegdec), TRUE);
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (ffmpegdec), TRUE);
}
示例11: gst_ffmpegauddec_get_buffer
/* called when ffmpeg wants us to allocate a buffer to write the decoded frame
* into. We try to give it memory from our pool */
static int
gst_ffmpegauddec_get_buffer (AVCodecContext * context, AVFrame * frame)
{
GstFFMpegAudDec *ffmpegdec;
GstAudioInfo *info;
BufferInfo *buffer_info;
ffmpegdec = (GstFFMpegAudDec *) context->opaque;
if (G_UNLIKELY (!gst_ffmpegauddec_negotiate (ffmpegdec, FALSE)))
goto negotiate_failed;
/* Always use the default allocator for planar audio formats because
* we will have to copy and deinterleave later anyway */
if (av_sample_fmt_is_planar (ffmpegdec->context->sample_fmt))
goto fallback;
info = gst_audio_decoder_get_audio_info (GST_AUDIO_DECODER (ffmpegdec));
buffer_info = g_slice_new (BufferInfo);
buffer_info->buffer =
gst_audio_decoder_allocate_output_buffer (GST_AUDIO_DECODER (ffmpegdec),
frame->nb_samples * info->bpf);
gst_buffer_map (buffer_info->buffer, &buffer_info->map, GST_MAP_WRITE);
frame->opaque = buffer_info;
frame->data[0] = buffer_info->map.data;
frame->extended_data = frame->data;
frame->linesize[0] = buffer_info->map.size;
frame->type = FF_BUFFER_TYPE_USER;
return 0;
/* fallbacks */
negotiate_failed:
{
GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
goto fallback;
}
fallback:
{
return avcodec_default_get_buffer (context, frame);
}
}
示例12: gst_speex_dec_init
static void
gst_speex_dec_init (GstSpeexDec * dec)
{
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (dec), TRUE);
gst_audio_decoder_set_use_default_pad_acceptcaps (GST_AUDIO_DECODER_CAST
(dec), TRUE);
GST_PAD_SET_ACCEPT_TEMPLATE (GST_AUDIO_DECODER_SINK_PAD (dec));
dec->enh = DEFAULT_ENH;
gst_speex_dec_reset (dec);
}
示例13: gst_opus_dec_init
static void
gst_opus_dec_init (GstOpusDec * dec)
{
dec->use_inband_fec = FALSE;
dec->apply_gain = DEFAULT_APPLY_GAIN;
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (dec), TRUE);
gst_audio_decoder_set_use_default_pad_acceptcaps (GST_AUDIO_DECODER_CAST
(dec), TRUE);
GST_PAD_SET_ACCEPT_TEMPLATE (GST_AUDIO_DECODER_SINK_PAD (dec));
gst_opus_dec_reset (dec);
}
示例14: gst_wavpack_dec_init
static void
gst_wavpack_dec_init (GstWavpackDec * dec)
{
dec->context = NULL;
dec->stream_reader = gst_wavpack_stream_reader_new ();
gst_audio_decoder_set_needs_format (GST_AUDIO_DECODER (dec), TRUE);
gst_audio_decoder_set_use_default_pad_acceptcaps (GST_AUDIO_DECODER_CAST
(dec), TRUE);
GST_PAD_SET_ACCEPT_TEMPLATE (GST_AUDIO_DECODER_SINK_PAD (dec));
gst_wavpack_dec_reset (dec);
}
示例15: gst_a52dec_update_streaminfo
static void
gst_a52dec_update_streaminfo (GstA52Dec * a52dec)
{
GstTagList *taglist;
taglist = gst_tag_list_new_empty ();
gst_tag_list_add (taglist, GST_TAG_MERGE_APPEND, GST_TAG_BITRATE,
(guint) a52dec->bit_rate, NULL);
gst_audio_decoder_merge_tags (GST_AUDIO_DECODER (a52dec), taglist,
GST_TAG_MERGE_REPLACE);
gst_tag_list_unref (taglist);
}