本文整理汇总了C++中GST_LOG_OBJECT函数的典型用法代码示例。如果您正苦于以下问题:C++ GST_LOG_OBJECT函数的具体用法?C++ GST_LOG_OBJECT怎么用?C++ GST_LOG_OBJECT使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GST_LOG_OBJECT函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gst_wavpack_parse_scan_to_find_sample
/* returns TRUE on success, with byte_offset set to the offset of the
* wavpack chunk containing the sample requested. start_sample will be
* set to the first sample in the chunk starting at byte_offset.
* Scanning from the last known header offset to the wanted position
* when seeking forward isn't very clever, but seems fast enough in
* practice and has the nice side effect of populating our index
* table */
static gboolean
gst_wavpack_parse_scan_to_find_sample (GstWavpackParse * parse,
gint64 sample, gint64 * byte_offset, gint64 * start_sample)
{
GstWavpackParseIndexEntry *entry;
GstFlowReturn ret;
gint64 off = 0;
/* first, check if we have to scan at all */
entry = gst_wavpack_parse_index_get_entry_from_sample (parse, sample);
if (entry) {
*byte_offset = entry->byte_offset;
*start_sample = entry->sample_offset;
GST_LOG_OBJECT (parse, "Found index entry: sample %" G_GINT64_FORMAT
" @ offset %" G_GINT64_FORMAT, entry->sample_offset,
entry->byte_offset);
return TRUE;
}
GST_LOG_OBJECT (parse, "No matching entry in index, scanning file ...");
/* if we have an index, we can start scanning from the last known offset
* in there, after all we know our wanted sample is not in the index */
if (parse->entries) {
GstWavpackParseIndexEntry *entry;
entry = gst_wavpack_parse_index_get_last_entry (parse);
off = entry->byte_offset;
}
/* now scan forward until we find the chunk we're looking for or hit EOS */
do {
WavpackHeader header;
GstBuffer *buf;
buf = gst_wavpack_parse_pull_buffer (parse, off, sizeof (WavpackHeader),
&ret);
if (buf == NULL)
break;
gst_wavpack_read_header (&header, GST_BUFFER_DATA (buf));
gst_buffer_unref (buf);
if (header.flags & INITIAL_BLOCK)
gst_wavpack_parse_index_append_entry (parse, off, header.block_index,
header.block_samples);
else
continue;
if (header.block_index <= sample &&
sample < (header.block_index + header.block_samples)) {
*byte_offset = off;
*start_sample = header.block_index;
return TRUE;
}
off += header.ckSize + 8;
} while (1);
GST_DEBUG_OBJECT (parse, "scan failed: %s (off=0x%08" G_GINT64_MODIFIER "x)",
gst_flow_get_name (ret), off);
return FALSE;
}
示例2: gst_rtp_h264_pay_getcaps
static GstCaps *
gst_rtp_h264_pay_getcaps (GstRTPBasePayload * payload, GstPad * pad,
GstCaps * filter)
{
GstCaps *template_caps;
GstCaps *allowed_caps;
GstCaps *caps, *icaps;
gboolean append_unrestricted;
guint i;
allowed_caps =
gst_pad_peer_query_caps (GST_RTP_BASE_PAYLOAD_SRCPAD (payload), NULL);
if (allowed_caps == NULL)
return NULL;
template_caps =
gst_static_pad_template_get_caps (&gst_rtp_h264_pay_sink_template);
if (gst_caps_is_any (allowed_caps)) {
caps = gst_caps_ref (template_caps);
goto done;
}
if (gst_caps_is_empty (allowed_caps)) {
caps = gst_caps_ref (allowed_caps);
goto done;
}
caps = gst_caps_new_empty ();
append_unrestricted = FALSE;
for (i = 0; i < gst_caps_get_size (allowed_caps); i++) {
GstStructure *s = gst_caps_get_structure (allowed_caps, i);
GstStructure *new_s = gst_structure_new_empty ("video/x-h264");
const gchar *profile_level_id;
profile_level_id = gst_structure_get_string (s, "profile-level-id");
if (profile_level_id && strlen (profile_level_id) == 6) {
const gchar *profile;
const gchar *level;
long int spsint;
guint8 sps[3];
spsint = strtol (profile_level_id, NULL, 16);
sps[0] = spsint >> 16;
sps[1] = spsint >> 8;
sps[2] = spsint;
profile = gst_codec_utils_h264_get_profile (sps, 3);
level = gst_codec_utils_h264_get_level (sps, 3);
if (profile && level) {
GST_LOG_OBJECT (payload, "In caps, have profile %s and level %s",
profile, level);
if (!strcmp (profile, "constrained-baseline"))
gst_structure_set (new_s, "profile", G_TYPE_STRING, profile, NULL);
else {
GValue val = { 0, };
GValue profiles = { 0, };
g_value_init (&profiles, GST_TYPE_LIST);
g_value_init (&val, G_TYPE_STRING);
g_value_set_static_string (&val, profile);
gst_value_list_append_value (&profiles, &val);
g_value_set_static_string (&val, "constrained-baseline");
gst_value_list_append_value (&profiles, &val);
gst_structure_take_value (new_s, "profile", &profiles);
}
if (!strcmp (level, "1"))
gst_structure_set (new_s, "level", G_TYPE_STRING, level, NULL);
else {
GValue levels = { 0, };
GValue val = { 0, };
int j;
g_value_init (&levels, GST_TYPE_LIST);
g_value_init (&val, G_TYPE_STRING);
for (j = 0; j < G_N_ELEMENTS (all_levels); j++) {
g_value_set_static_string (&val, all_levels[j]);
gst_value_list_prepend_value (&levels, &val);
if (!strcmp (level, all_levels[j]))
break;
}
gst_structure_take_value (new_s, "level", &levels);
}
} else {
/* Invalid profile-level-id means baseline */
gst_structure_set (new_s,
"profile", G_TYPE_STRING, "constrained-baseline", NULL);
}
} else {
//.........这里部分代码省略.........
示例3: gst_soup_http_client_sink_start
static gboolean
gst_soup_http_client_sink_start (GstBaseSink * sink)
{
GstSoupHttpClientSink *souphttpsink = GST_SOUP_HTTP_CLIENT_SINK (sink);
if (souphttpsink->prop_session) {
souphttpsink->session = souphttpsink->prop_session;
} else {
GSource *source;
GError *error = NULL;
souphttpsink->context = g_main_context_new ();
/* set up idle source to signal when the main loop is running and
* it's safe for ::stop() to call g_main_loop_quit() */
source = g_idle_source_new ();
g_source_set_callback (source, thread_ready_idle_cb, sink, NULL);
g_source_attach (source, souphttpsink->context);
g_source_unref (source);
souphttpsink->loop = g_main_loop_new (souphttpsink->context, TRUE);
g_mutex_lock (&souphttpsink->mutex);
souphttpsink->thread = g_thread_try_new ("souphttpclientsink-thread",
thread_func, souphttpsink, &error);
if (error != NULL) {
GST_DEBUG_OBJECT (souphttpsink, "failed to start thread, %s",
error->message);
g_error_free (error);
g_mutex_unlock (&souphttpsink->mutex);
return FALSE;
}
GST_LOG_OBJECT (souphttpsink, "waiting for main loop thread to start up");
g_cond_wait (&souphttpsink->cond, &souphttpsink->mutex);
g_mutex_unlock (&souphttpsink->mutex);
GST_LOG_OBJECT (souphttpsink, "main loop thread running");
if (souphttpsink->proxy == NULL) {
souphttpsink->session =
soup_session_async_new_with_options (SOUP_SESSION_ASYNC_CONTEXT,
souphttpsink->context, SOUP_SESSION_USER_AGENT,
souphttpsink->user_agent, SOUP_SESSION_TIMEOUT, souphttpsink->timeout,
NULL);
} else {
souphttpsink->session =
soup_session_async_new_with_options (SOUP_SESSION_ASYNC_CONTEXT,
souphttpsink->context, SOUP_SESSION_USER_AGENT,
souphttpsink->user_agent, SOUP_SESSION_TIMEOUT, souphttpsink->timeout,
SOUP_SESSION_PROXY_URI, souphttpsink->proxy, NULL);
}
g_signal_connect (souphttpsink->session, "authenticate",
G_CALLBACK (authenticate), souphttpsink);
}
/* Set up logging */
gst_soup_util_log_setup (souphttpsink->session, souphttpsink->log_level,
GST_ELEMENT (souphttpsink));
return TRUE;
}
示例4: gst_gnome_vfs_src_received_headers_callback
static void
gst_gnome_vfs_src_received_headers_callback (gconstpointer in,
gsize in_size, gpointer out, gsize out_size, gpointer callback_data)
{
GList *i;
gint icy_metaint;
GstGnomeVFSSrc *src = GST_GNOME_VFS_SRC (callback_data);
GnomeVFSModuleCallbackReceivedHeadersIn *in_args =
(GnomeVFSModuleCallbackReceivedHeadersIn *) in;
/* This is only used for internet radio stuff right now */
if (!src->iradio_mode)
return;
GST_DEBUG_OBJECT (src, "receiving internet radio metadata\n");
/* FIXME: Could we use "Accept-Ranges: bytes"
* http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.5
* to enable pull-mode?
*/
for (i = in_args->headers; i; i = i->next) {
char *data = (char *) i->data;
char *key = data;
char *value = strchr (data, ':');
if (!value)
continue;
value++;
g_strstrip (value);
if (!strlen (value))
continue;
GST_LOG_OBJECT (src, "data %s", data);
/* Icecast stuff */
if (strncmp (data, "icy-metaint:", 12) == 0) { /* ugh */
if (sscanf (data + 12, "%d", &icy_metaint) == 1) {
if (icy_metaint > 0) {
GstCaps *icy_caps;
icy_caps = gst_caps_new_simple ("application/x-icy",
"metadata-interval", G_TYPE_INT, icy_metaint, NULL);
gst_pad_set_caps (GST_BASE_SRC_PAD (src), icy_caps);
gst_caps_unref (icy_caps);
}
}
continue;
}
if (!strncmp (data, "icy-", 4))
key = data + 4;
else
continue;
GST_DEBUG_OBJECT (src, "key: %s", key);
if (!strncmp (key, "name", 4)) {
g_free (src->iradio_name);
src->iradio_name = gst_gnome_vfs_src_unicodify (value);
if (src->iradio_name)
g_object_notify (G_OBJECT (src), "iradio-name");
} else if (!strncmp (key, "genre", 5)) {
g_free (src->iradio_genre);
src->iradio_genre = gst_gnome_vfs_src_unicodify (value);
if (src->iradio_genre)
g_object_notify (G_OBJECT (src), "iradio-genre");
} else if (!strncmp (key, "url", 3)) {
g_free (src->iradio_url);
src->iradio_url = gst_gnome_vfs_src_unicodify (value);
if (src->iradio_url)
g_object_notify (G_OBJECT (src), "iradio-url");
}
}
}
示例5: gst_raw_parse_handle_seek_pull
static gboolean
gst_raw_parse_handle_seek_pull (GstRawParse * rp, GstEvent * event)
{
gdouble rate;
GstFormat format;
GstSeekFlags flags;
GstSeekType start_type, stop_type;
gint64 start, stop;
gint64 last_stop;
gboolean ret = FALSE;
gboolean flush;
GstSegment seeksegment;
if (event) {
gst_event_parse_seek (event, &rate, &format, &flags, &start_type, &start,
&stop_type, &stop);
/* convert input offsets to time */
ret = gst_raw_parse_convert (rp, format, start, GST_FORMAT_TIME, &start);
ret &= gst_raw_parse_convert (rp, format, stop, GST_FORMAT_TIME, &stop);
if (!ret)
goto convert_failed;
GST_DEBUG_OBJECT (rp, "converted start - stop to time");
format = GST_FORMAT_TIME;
gst_event_unref (event);
} else {
format = GST_FORMAT_TIME;
flags = 0;
}
flush = ((flags & GST_SEEK_FLAG_FLUSH) != 0);
/* start flushing up and downstream so that the loop function pauses and we
* can acquire the STREAM_LOCK. */
if (flush) {
GST_LOG_OBJECT (rp, "flushing");
gst_pad_push_event (rp->sinkpad, gst_event_new_flush_start ());
gst_pad_push_event (rp->srcpad, gst_event_new_flush_start ());
} else {
GST_LOG_OBJECT (rp, "pause task");
gst_pad_pause_task (rp->sinkpad);
}
GST_PAD_STREAM_LOCK (rp->sinkpad);
memcpy (&seeksegment, &rp->segment, sizeof (GstSegment));
if (event) {
/* configure the seek values */
gst_segment_do_seek (&seeksegment, rate, format, flags,
start_type, start, stop_type, stop, NULL);
}
/* get the desired position */
last_stop = seeksegment.position;
GST_LOG_OBJECT (rp, "seeking to %" GST_TIME_FORMAT,
GST_TIME_ARGS (last_stop));
/* convert the desired position to bytes */
ret =
gst_raw_parse_convert (rp, format, last_stop, GST_FORMAT_BYTES,
&last_stop);
/* prepare for streaming */
if (flush) {
GST_LOG_OBJECT (rp, "stop flush");
gst_pad_push_event (rp->sinkpad, gst_event_new_flush_stop (TRUE));
gst_pad_push_event (rp->srcpad, gst_event_new_flush_stop (TRUE));
}
if (ret) {
/* seek done */
/* Seek on a frame boundary */
last_stop -= last_stop % rp->framesize;
rp->offset = last_stop;
rp->n_frames = last_stop / rp->framesize;
GST_LOG_OBJECT (rp, "seeking to bytes %" G_GINT64_FORMAT, last_stop);
memcpy (&rp->segment, &seeksegment, sizeof (GstSegment));
if (rp->segment.flags & GST_SEEK_FLAG_SEGMENT) {
gst_element_post_message (GST_ELEMENT_CAST (rp),
gst_message_new_segment_start (GST_OBJECT_CAST (rp),
rp->segment.format, rp->segment.position));
}
/* for deriving a stop position for the playback segment from the seek
* segment, we must take the duration when the stop is not set */
if ((stop = rp->segment.stop) == -1)
stop = rp->segment.duration;
GST_DEBUG_OBJECT (rp, "preparing newsegment from %" G_GINT64_FORMAT
" to %" G_GINT64_FORMAT, rp->segment.start, stop);
//.........这里部分代码省略.........
示例6: gst_tcp_server_src_create
static GstFlowReturn
gst_tcp_server_src_create (GstPushSrc * psrc, GstBuffer ** outbuf)
{
GstTCPServerSrc *src;
GstFlowReturn ret = GST_FLOW_OK;
gssize rret, avail;
gsize read;
GError *err = NULL;
GstMapInfo map;
src = GST_TCP_SERVER_SRC (psrc);
if (!GST_OBJECT_FLAG_IS_SET (src, GST_TCP_SERVER_SRC_OPEN))
goto wrong_state;
if (!src->client_socket) {
/* wait on server socket for connections */
src->client_socket =
g_socket_accept (src->server_socket, src->cancellable, &err);
if (!src->client_socket)
goto accept_error;
/* now read from the socket. */
}
/* if we have a client, wait for read */
GST_LOG_OBJECT (src, "asked for a buffer");
/* read the buffer header */
avail = g_socket_get_available_bytes (src->client_socket);
if (avail < 0) {
goto get_available_error;
} else if (avail == 0) {
GIOCondition condition;
if (!g_socket_condition_wait (src->client_socket,
G_IO_IN | G_IO_PRI | G_IO_ERR | G_IO_HUP, src->cancellable, &err))
goto select_error;
condition =
g_socket_condition_check (src->client_socket,
G_IO_IN | G_IO_PRI | G_IO_ERR | G_IO_HUP);
if ((condition & G_IO_ERR)) {
GST_ELEMENT_ERROR (src, RESOURCE, READ, (NULL),
("Socket in error state"));
*outbuf = NULL;
ret = GST_FLOW_ERROR;
goto done;
} else if ((condition & G_IO_HUP)) {
GST_DEBUG_OBJECT (src, "Connection closed");
*outbuf = NULL;
ret = GST_FLOW_EOS;
goto done;
}
avail = g_socket_get_available_bytes (src->client_socket);
if (avail < 0)
goto get_available_error;
}
if (avail > 0) {
read = MIN (avail, MAX_READ_SIZE);
*outbuf = gst_buffer_new_and_alloc (read);
gst_buffer_map (*outbuf, &map, GST_MAP_READWRITE);
rret =
g_socket_receive (src->client_socket, (gchar *) map.data, read,
src->cancellable, &err);
} else {
/* Connection closed */
rret = 0;
*outbuf = NULL;
read = 0;
}
if (rret == 0) {
GST_DEBUG_OBJECT (src, "Connection closed");
ret = GST_FLOW_EOS;
if (*outbuf) {
gst_buffer_unmap (*outbuf, &map);
gst_buffer_unref (*outbuf);
}
*outbuf = NULL;
} else if (rret < 0) {
if (g_error_matches (err, G_IO_ERROR, G_IO_ERROR_CANCELLED)) {
ret = GST_FLOW_FLUSHING;
GST_DEBUG_OBJECT (src, "Cancelled reading from socket");
} else {
ret = GST_FLOW_ERROR;
GST_ELEMENT_ERROR (src, RESOURCE, READ, (NULL),
("Failed to read from socket: %s", err->message));
}
gst_buffer_unmap (*outbuf, &map);
gst_buffer_unref (*outbuf);
*outbuf = NULL;
} else {
ret = GST_FLOW_OK;
gst_buffer_unmap (*outbuf, &map);
gst_buffer_resize (*outbuf, 0, rret);
GST_LOG_OBJECT (src,
"Returning buffer from _get of size %" G_GSIZE_FORMAT ", ts %"
//.........这里部分代码省略.........
示例7: gst_spectrum_transform_ip
static GstFlowReturn
gst_spectrum_transform_ip (GstBaseTransform * trans, GstBuffer * buffer)
{
GstSpectrum *spectrum = GST_SPECTRUM (trans);
guint rate = GST_AUDIO_FILTER_RATE (spectrum);
guint channels = GST_AUDIO_FILTER_CHANNELS (spectrum);
guint bps = GST_AUDIO_FILTER_BPS (spectrum);
guint bpf = GST_AUDIO_FILTER_BPF (spectrum);
guint output_channels = spectrum->multi_channel ? channels : 1;
guint c;
gfloat max_value = (1UL << ((bps << 3) - 1)) - 1;
guint bands = spectrum->bands;
guint nfft = 2 * bands - 2;
guint input_pos;
gfloat *input;
GstMapInfo map;
const guint8 *data;
gsize size;
guint fft_todo, msg_todo, block_size;
gboolean have_full_interval;
GstSpectrumChannel *cd;
GstSpectrumInputData input_data;
g_mutex_lock (&spectrum->lock);
gst_buffer_map (buffer, &map, GST_MAP_READ);
data = map.data;
size = map.size;
GST_LOG_OBJECT (spectrum, "input size: %" G_GSIZE_FORMAT " bytes", size);
if (GST_BUFFER_IS_DISCONT (buffer)) {
GST_DEBUG_OBJECT (spectrum, "Discontinuity detected -- flushing");
gst_spectrum_flush (spectrum);
}
/* If we don't have a FFT context yet (or it was reset due to parameter
* changes) get one and allocate memory for everything
*/
if (spectrum->channel_data == NULL) {
GST_DEBUG_OBJECT (spectrum, "allocating for bands %u", bands);
gst_spectrum_alloc_channel_data (spectrum);
/* number of sample frames we process before posting a message
* interval is in ns */
spectrum->frames_per_interval =
gst_util_uint64_scale (spectrum->interval, rate, GST_SECOND);
spectrum->frames_todo = spectrum->frames_per_interval;
/* rounding error for frames_per_interval in ns,
* aggregated it in accumulated_error */
spectrum->error_per_interval = (spectrum->interval * rate) % GST_SECOND;
if (spectrum->frames_per_interval == 0)
spectrum->frames_per_interval = 1;
GST_INFO_OBJECT (spectrum, "interval %" GST_TIME_FORMAT ", fpi %"
G_GUINT64_FORMAT ", error %" GST_TIME_FORMAT,
GST_TIME_ARGS (spectrum->interval), spectrum->frames_per_interval,
GST_TIME_ARGS (spectrum->error_per_interval));
spectrum->input_pos = 0;
gst_spectrum_flush (spectrum);
}
if (spectrum->num_frames == 0)
spectrum->message_ts = GST_BUFFER_TIMESTAMP (buffer);
input_pos = spectrum->input_pos;
input_data = spectrum->input_data;
while (size >= bpf) {
/* run input_data for a chunk of data */
fft_todo = nfft - (spectrum->num_frames % nfft);
msg_todo = spectrum->frames_todo - spectrum->num_frames;
GST_LOG_OBJECT (spectrum,
"message frames todo: %u, fft frames todo: %u, input frames %"
G_GSIZE_FORMAT, msg_todo, fft_todo, (size / bpf));
block_size = msg_todo;
if (block_size > (size / bpf))
block_size = (size / bpf);
if (block_size > fft_todo)
block_size = fft_todo;
for (c = 0; c < output_channels; c++) {
cd = &spectrum->channel_data[c];
input = cd->input;
/* Move the current frames into our ringbuffers */
input_data (data + c * bps, input, block_size, channels, max_value,
input_pos, nfft);
}
data += block_size * bpf;
size -= block_size * bpf;
input_pos = (input_pos + block_size) % nfft;
spectrum->num_frames += block_size;
have_full_interval = (spectrum->num_frames == spectrum->frames_todo);
GST_LOG_OBJECT (spectrum,
"size: %" G_GSIZE_FORMAT ", do-fft = %d, do-message = %d", size,
(spectrum->num_frames % nfft == 0), have_full_interval);
//.........这里部分代码省略.........
示例8: vorbis_handle_data_packet
static GstFlowReturn
vorbis_handle_data_packet (GstVorbisDec * vd, ogg_packet * packet,
GstClockTime timestamp, GstClockTime duration)
{
#ifdef USE_TREMOLO
vorbis_sample_t *pcm;
#else
vorbis_sample_t **pcm;
#endif
guint sample_count;
GstBuffer *out = NULL;
GstFlowReturn result;
gint size;
if (G_UNLIKELY (!vd->initialized)) {
result = vorbis_dec_handle_header_caps (vd);
if (result != GST_FLOW_OK)
goto not_initialized;
}
/* normal data packet */
/* FIXME, we can skip decoding if the packet is outside of the
* segment, this is however not very trivial as we need a previous
* packet to decode the current one so we must be careful not to
* throw away too much. For now we decode everything and clip right
* before pushing data. */
#ifdef USE_TREMOLO
if (G_UNLIKELY (vorbis_dsp_synthesis (&vd->vd, packet, 1)))
goto could_not_read;
#else
if (G_UNLIKELY (vorbis_synthesis (&vd->vb, packet)))
goto could_not_read;
if (G_UNLIKELY (vorbis_synthesis_blockin (&vd->vd, &vd->vb) < 0))
goto not_accepted;
#endif
/* assume all goes well here */
result = GST_FLOW_OK;
/* count samples ready for reading */
#ifdef USE_TREMOLO
if ((sample_count = vorbis_dsp_pcmout (&vd->vd, NULL, 0)) == 0)
#else
if ((sample_count = vorbis_synthesis_pcmout (&vd->vd, NULL)) == 0)
goto done;
#endif
size = sample_count * vd->vi.channels * vd->width;
GST_LOG_OBJECT (vd, "%d samples ready for reading, size %d", sample_count,
size);
/* alloc buffer for it */
result =
gst_pad_alloc_buffer_and_set_caps (GST_AUDIO_DECODER_SRC_PAD (vd),
GST_BUFFER_OFFSET_NONE, size,
GST_PAD_CAPS (GST_AUDIO_DECODER_SRC_PAD (vd)), &out);
if (G_UNLIKELY (result != GST_FLOW_OK))
goto done;
/* get samples ready for reading now, should be sample_count */
#ifdef USE_TREMOLO
pcm = GST_BUFFER_DATA (out);
if (G_UNLIKELY (vorbis_dsp_pcmout (&vd->vd, pcm, sample_count) !=
sample_count))
#else
if (G_UNLIKELY (vorbis_synthesis_pcmout (&vd->vd, &pcm) != sample_count))
#endif
goto wrong_samples;
#ifndef USE_TREMOLO
/* copy samples in buffer */
vd->copy_samples ((vorbis_sample_t *) GST_BUFFER_DATA (out), pcm,
sample_count, vd->vi.channels, vd->width);
#endif
GST_LOG_OBJECT (vd, "setting output size to %d", size);
GST_BUFFER_SIZE (out) = size;
done:
/* whether or not data produced, consume one frame and advance time */
result = gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (vd), out, 1);
#ifdef USE_TREMOLO
vorbis_dsp_read (&vd->vd, sample_count);
#else
vorbis_synthesis_read (&vd->vd, sample_count);
#endif
return result;
/* ERRORS */
not_initialized:
{
GST_ELEMENT_ERROR (GST_ELEMENT (vd), STREAM, DECODE,
(NULL), ("no header sent yet"));
return GST_FLOW_NOT_NEGOTIATED;
}
could_not_read:
//.........这里部分代码省略.........
示例9: vorbis_dec_handle_frame
static GstFlowReturn
vorbis_dec_handle_frame (GstAudioDecoder * dec, GstBuffer * buffer)
{
ogg_packet *packet;
ogg_packet_wrapper packet_wrapper;
GstFlowReturn result = GST_FLOW_OK;
GstVorbisDec *vd = GST_VORBIS_DEC (dec);
/* no draining etc */
if (G_UNLIKELY (!buffer))
return GST_FLOW_OK;
/* make ogg_packet out of the buffer */
gst_ogg_packet_wrapper_from_buffer (&packet_wrapper, buffer);
packet = gst_ogg_packet_from_wrapper (&packet_wrapper);
/* set some more stuff */
packet->granulepos = -1;
packet->packetno = 0; /* we don't care */
/* EOS does not matter, it is used in vorbis to implement clipping the last
* block of samples based on the granulepos. We clip based on segments. */
packet->e_o_s = 0;
GST_LOG_OBJECT (vd, "decode buffer of size %ld", packet->bytes);
/* error out on empty header packets, but just skip empty data packets */
if (G_UNLIKELY (packet->bytes == 0)) {
if (vd->initialized)
goto empty_buffer;
else
goto empty_header;
}
/* switch depending on packet type */
if ((gst_ogg_packet_data (packet))[0] & 1) {
if (vd->initialized) {
GST_WARNING_OBJECT (vd, "Already initialized, so ignoring header packet");
goto done;
}
result = vorbis_handle_header_packet (vd, packet);
/* consumer header packet/frame */
gst_audio_decoder_finish_frame (GST_AUDIO_DECODER (vd), NULL, 1);
} else {
GstClockTime timestamp, duration;
timestamp = GST_BUFFER_TIMESTAMP (buffer);
duration = GST_BUFFER_DURATION (buffer);
result = vorbis_handle_data_packet (vd, packet, timestamp, duration);
}
done:
return result;
empty_buffer:
{
/* don't error out here, just ignore the buffer, it's invalid for vorbis
* but not fatal. */
GST_WARNING_OBJECT (vd, "empty buffer received, ignoring");
result = GST_FLOW_OK;
goto done;
}
/* ERRORS */
empty_header:
{
GST_ELEMENT_ERROR (vd, STREAM, DECODE, (NULL), ("empty header received"));
result = GST_FLOW_ERROR;
goto done;
}
}
示例10: fs_funnel_chain
static GstFlowReturn
fs_funnel_chain (GstPad * pad, GstBuffer * buffer)
{
GstFlowReturn res;
FsFunnel *funnel = FS_FUNNEL (gst_pad_get_parent (pad));
FsFunnelPadPrivate *priv = gst_pad_get_element_private (pad);
GstEvent *event = NULL;
GstClockTime newts;
GstCaps *padcaps;
GST_DEBUG_OBJECT (funnel, "received buffer %p", buffer);
GST_OBJECT_LOCK (funnel);
if (priv->segment.format == GST_FORMAT_UNDEFINED) {
GST_WARNING_OBJECT (funnel, "Got buffer without segment,"
" setting segment [0,inf[");
gst_segment_set_newsegment_full (&priv->segment, FALSE, 1.0, 1.0,
GST_FORMAT_TIME, 0, -1, 0);
}
if (GST_CLOCK_TIME_IS_VALID (GST_BUFFER_TIMESTAMP (buffer)))
gst_segment_set_last_stop (&priv->segment, priv->segment.format,
GST_BUFFER_TIMESTAMP (buffer));
newts = gst_segment_to_running_time (&priv->segment,
priv->segment.format, GST_BUFFER_TIMESTAMP (buffer));
if (newts != GST_BUFFER_TIMESTAMP (buffer)) {
buffer = gst_buffer_make_metadata_writable (buffer);
GST_BUFFER_TIMESTAMP (buffer) = newts;
}
if (!funnel->has_segment)
{
event = gst_event_new_new_segment_full (FALSE, 1.0, 1.0, GST_FORMAT_TIME,
0, -1, 0);
funnel->has_segment = TRUE;
}
GST_OBJECT_UNLOCK (funnel);
if (event) {
if (!gst_pad_push_event (funnel->srcpad, event))
GST_WARNING_OBJECT (funnel, "Could not push out newsegment event");
}
GST_OBJECT_LOCK (pad);
padcaps = GST_PAD_CAPS (funnel->srcpad);
GST_OBJECT_UNLOCK (pad);
if (GST_BUFFER_CAPS (buffer) && GST_BUFFER_CAPS (buffer) != padcaps) {
if (!gst_pad_set_caps (funnel->srcpad, GST_BUFFER_CAPS (buffer))) {
res = GST_FLOW_NOT_NEGOTIATED;
goto out;
}
}
res = gst_pad_push (funnel->srcpad, buffer);
GST_LOG_OBJECT (funnel, "handled buffer %s", gst_flow_get_name (res));
out:
gst_object_unref (funnel);
return res;
}
示例11: gst_openh264dec_handle_frame
static GstFlowReturn gst_openh264dec_handle_frame(GstVideoDecoder *decoder, GstVideoCodecFrame *frame)
{
GstOpenh264Dec *openh264dec = GST_OPENH264DEC(decoder);
GstMapInfo map_info;
GstVideoCodecState *state;
SBufferInfo dst_buf_info;
DECODING_STATE ret;
guint8 *yuvdata[3];
GstFlowReturn flow_status;
GstVideoFrame video_frame;
guint actual_width, actual_height;
guint i;
guint8 *p;
guint row_stride, component_width, component_height, src_width, row;
if (frame) {
if (!gst_buffer_map(frame->input_buffer, &map_info, GST_MAP_READ)) {
GST_ERROR_OBJECT(openh264dec, "Cannot map input buffer!");
return GST_FLOW_ERROR;
}
GST_LOG_OBJECT(openh264dec, "handle frame, %d", map_info.size > 4 ? map_info.data[4] & 0x1f : -1);
memset (&dst_buf_info, 0, sizeof (SBufferInfo));
ret = openh264dec->priv->decoder->DecodeFrame2(map_info.data, map_info.size, yuvdata, &dst_buf_info);
if (ret == dsNoParamSets) {
GST_DEBUG_OBJECT(openh264dec, "Requesting a key unit");
gst_pad_push_event(GST_VIDEO_DECODER_SINK_PAD(decoder),
gst_video_event_new_upstream_force_key_unit(GST_CLOCK_TIME_NONE, FALSE, 0));
}
if (ret != dsErrorFree && ret != dsNoParamSets) {
GST_DEBUG_OBJECT(openh264dec, "Requesting a key unit");
gst_pad_push_event(GST_VIDEO_DECODER_SINK_PAD(decoder),
gst_video_event_new_upstream_force_key_unit(GST_CLOCK_TIME_NONE, FALSE, 0));
GST_LOG_OBJECT(openh264dec, "error decoding nal, return code: %d", ret);
}
gst_buffer_unmap(frame->input_buffer, &map_info);
gst_video_codec_frame_unref (frame);
frame = NULL;
} else {
memset (&dst_buf_info, 0, sizeof (SBufferInfo));
ret = openh264dec->priv->decoder->DecodeFrame2(NULL, 0, yuvdata, &dst_buf_info);
if (ret != dsErrorFree)
return GST_FLOW_EOS;
}
/* FIXME: openh264 has no way for us to get a connection
* between the input and output frames, we just have to
* guess based on the input. Fortunately openh264 can
* only do baseline profile. */
frame = gst_video_decoder_get_oldest_frame (decoder);
if (!frame) {
/* Can only happen in finish() */
return GST_FLOW_EOS;
}
/* No output available yet */
if (dst_buf_info.iBufferStatus != 1) {
return (frame ? GST_FLOW_OK : GST_FLOW_EOS);
}
actual_width = dst_buf_info.UsrData.sSystemBuffer.iWidth;
actual_height = dst_buf_info.UsrData.sSystemBuffer.iHeight;
if (!gst_pad_has_current_caps (GST_VIDEO_DECODER_SRC_PAD (openh264dec)) || actual_width != openh264dec->priv->width || actual_height != openh264dec->priv->height) {
state = gst_video_decoder_set_output_state(decoder,
GST_VIDEO_FORMAT_I420,
actual_width,
actual_height,
openh264dec->priv->input_state);
openh264dec->priv->width = actual_width;
openh264dec->priv->height = actual_height;
if (!gst_video_decoder_negotiate(decoder)) {
GST_ERROR_OBJECT(openh264dec, "Failed to negotiate with downstream elements");
return GST_FLOW_NOT_NEGOTIATED;
}
} else {
state = gst_video_decoder_get_output_state(decoder);
}
flow_status = gst_video_decoder_allocate_output_frame(decoder, frame);
if (flow_status != GST_FLOW_OK) {
gst_video_codec_state_unref (state);
return flow_status;
}
if (!gst_video_frame_map(&video_frame, &state->info, frame->output_buffer, GST_MAP_WRITE)) {
GST_ERROR_OBJECT(openh264dec, "Cannot map output buffer!");
gst_video_codec_state_unref (state);
return GST_FLOW_ERROR;
}
for (i = 0; i < 3; i++) {
p = GST_VIDEO_FRAME_COMP_DATA(&video_frame, i);
row_stride = GST_VIDEO_FRAME_COMP_STRIDE(&video_frame, i);
component_width = GST_VIDEO_FRAME_COMP_WIDTH(&video_frame, i);
//.........这里部分代码省略.........
示例12: rsn_audiomunge_sink_event
static gboolean
rsn_audiomunge_sink_event (GstPad * pad, GstEvent * event)
{
gboolean ret = FALSE;
RsnAudioMunge *munge = RSN_AUDIOMUNGE (gst_pad_get_parent (pad));
switch (GST_EVENT_TYPE (event)) {
case GST_EVENT_FLUSH_STOP:
rsn_audiomunge_reset (munge);
ret = gst_pad_push_event (munge->srcpad, event);
break;
case GST_EVENT_NEWSEGMENT:
{
GstSegment *segment;
gboolean update;
GstFormat format;
gdouble rate, arate;
gint64 start, stop, time;
gst_event_parse_new_segment_full (event, &update, &rate, &arate, &format,
&start, &stop, &time);
/* we need TIME format */
if (format != GST_FORMAT_TIME)
goto newseg_wrong_format;
/* now configure the values */
segment = &munge->sink_segment;
gst_segment_set_newsegment_full (segment, update,
rate, arate, format, start, stop, time);
/*
* FIXME:
* If this is a segment update and accum >= threshold,
* or we're in a still frame and there's been no audio received,
* then we need to generate some audio data.
*
* If caused by a segment start update (time advancing in a gap) adjust
* the new-segment and send the buffer.
*
* Otherwise, send the buffer before the newsegment, so that it appears
* in the closing segment.
*/
if (!update) {
GST_DEBUG_OBJECT (munge,
"Sending newsegment: update %d start %" GST_TIME_FORMAT " stop %"
GST_TIME_FORMAT " accum now %" GST_TIME_FORMAT, update,
GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
GST_TIME_ARGS (segment->accum));
ret = gst_pad_push_event (munge->srcpad, event);
}
if (!munge->have_audio) {
if ((update && segment->accum >= AUDIO_FILL_THRESHOLD)
|| munge->in_still) {
GST_DEBUG_OBJECT (munge,
"Sending audio fill with ts %" GST_TIME_FORMAT ": accum = %"
GST_TIME_FORMAT " still-state=%d", GST_TIME_ARGS (segment->start),
GST_TIME_ARGS (segment->accum), munge->in_still);
/* Just generate a 200ms silence buffer for now. FIXME: Fill the gap */
if (rsn_audiomunge_make_audio (munge, segment->start,
GST_SECOND / 5) == GST_FLOW_OK)
munge->have_audio = TRUE;
} else {
GST_LOG_OBJECT (munge, "Not sending audio fill buffer: "
"Not segment update, or segment accum below thresh: accum = %"
GST_TIME_FORMAT, GST_TIME_ARGS (segment->accum));
}
}
if (update) {
GST_DEBUG_OBJECT (munge,
"Sending newsegment: update %d start %" GST_TIME_FORMAT " stop %"
GST_TIME_FORMAT " accum now %" GST_TIME_FORMAT, update,
GST_TIME_ARGS (start), GST_TIME_ARGS (stop),
GST_TIME_ARGS (segment->accum));
ret = gst_pad_push_event (munge->srcpad, event);
}
break;
}
case GST_EVENT_CUSTOM_DOWNSTREAM:
{
gboolean in_still;
if (gst_video_event_parse_still_frame (event, &in_still)) {
/* Remember the still-frame state, so we can generate a pre-roll
* buffer when a new-segment arrives */
munge->in_still = in_still;
GST_INFO_OBJECT (munge, "AUDIO MUNGE: still-state now %d",
munge->in_still);
}
ret = gst_pad_push_event (munge->srcpad, event);
break;
}
//.........这里部分代码省略.........
示例13: gst_wavpack_parse_push_buffer
static GstFlowReturn
gst_wavpack_parse_push_buffer (GstWavpackParse * wvparse, GstBuffer * buf,
WavpackHeader * header)
{
GstFlowReturn ret;
wvparse->current_offset += header->ckSize + 8;
wvparse->segment.last_stop = header->block_index;
if (wvparse->need_newsegment) {
if (gst_wavpack_parse_send_newsegment (wvparse, FALSE))
wvparse->need_newsegment = FALSE;
}
/* send any queued events */
if (wvparse->queued_events) {
GList *l;
for (l = wvparse->queued_events; l != NULL; l = l->next) {
gst_pad_push_event (wvparse->srcpad, GST_EVENT (l->data));
}
g_list_free (wvparse->queued_events);
wvparse->queued_events = NULL;
}
if (wvparse->pending_buffer == NULL) {
wvparse->pending_buffer = buf;
wvparse->pending_offset = header->block_index;
} else if (wvparse->pending_offset == header->block_index) {
wvparse->pending_buffer = gst_buffer_join (wvparse->pending_buffer, buf);
} else {
GST_ERROR ("Got incomplete block, dropping");
gst_buffer_unref (wvparse->pending_buffer);
wvparse->pending_buffer = buf;
wvparse->pending_offset = header->block_index;
}
if (!(header->flags & FINAL_BLOCK))
return GST_FLOW_OK;
buf = wvparse->pending_buffer;
wvparse->pending_buffer = NULL;
GST_BUFFER_TIMESTAMP (buf) = gst_util_uint64_scale_int (header->block_index,
GST_SECOND, wvparse->samplerate);
GST_BUFFER_DURATION (buf) = gst_util_uint64_scale_int (header->block_samples,
GST_SECOND, wvparse->samplerate);
GST_BUFFER_OFFSET (buf) = header->block_index;
GST_BUFFER_OFFSET_END (buf) = header->block_index + header->block_samples;
if (wvparse->discont || wvparse->next_block_index != header->block_index) {
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
wvparse->discont = FALSE;
}
wvparse->next_block_index = header->block_index + header->block_samples;
gst_buffer_set_caps (buf, GST_PAD_CAPS (wvparse->srcpad));
GST_LOG_OBJECT (wvparse, "Pushing buffer with time %" GST_TIME_FORMAT,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)));
ret = gst_pad_push (wvparse->srcpad, buf);
wvparse->segment.last_stop = wvparse->next_block_index;
return ret;
}
示例14: gst_wavpack_parse_create_src_pad
static gboolean
gst_wavpack_parse_create_src_pad (GstWavpackParse * wvparse, GstBuffer * buf,
WavpackHeader * header)
{
GstWavpackMetadata meta;
GstCaps *caps = NULL;
guchar *bufptr;
g_assert (wvparse->srcpad == NULL);
bufptr = GST_BUFFER_DATA (buf) + sizeof (WavpackHeader);
while (gst_wavpack_read_metadata (&meta, GST_BUFFER_DATA (buf), &bufptr)) {
switch (meta.id) {
case ID_WVC_BITSTREAM:{
caps = gst_caps_new_simple ("audio/x-wavpack-correction",
"framed", G_TYPE_BOOLEAN, TRUE, NULL);
wvparse->srcpad =
gst_pad_new_from_template (gst_element_class_get_pad_template
(GST_ELEMENT_GET_CLASS (wvparse), "wvcsrc"), "wvcsrc");
break;
}
case ID_WV_BITSTREAM:
case ID_WVX_BITSTREAM:{
WavpackStreamReader *stream_reader = gst_wavpack_stream_reader_new ();
WavpackContext *wpc;
gchar error_msg[80];
read_id rid;
gint channel_mask;
rid.buffer = GST_BUFFER_DATA (buf);
rid.length = GST_BUFFER_SIZE (buf);
rid.position = 0;
wpc =
WavpackOpenFileInputEx (stream_reader, &rid, NULL, error_msg, 0, 0);
if (!wpc)
return FALSE;
wvparse->samplerate = WavpackGetSampleRate (wpc);
wvparse->channels = WavpackGetNumChannels (wpc);
wvparse->total_samples =
(header->total_samples ==
0xffffffff) ? G_GINT64_CONSTANT (-1) : header->total_samples;
caps = gst_caps_new_simple ("audio/x-wavpack",
"width", G_TYPE_INT, WavpackGetBitsPerSample (wpc),
"channels", G_TYPE_INT, wvparse->channels,
"rate", G_TYPE_INT, wvparse->samplerate,
"framed", G_TYPE_BOOLEAN, TRUE, NULL);
#ifdef WAVPACK_OLD_API
channel_mask = wpc->config.channel_mask;
#else
channel_mask = WavpackGetChannelMask (wpc);
#endif
if (channel_mask == 0)
channel_mask =
gst_wavpack_get_default_channel_mask (wvparse->channels);
if (channel_mask != 0) {
if (!gst_wavpack_set_channel_layout (caps, channel_mask)) {
GST_WARNING_OBJECT (wvparse, "Failed to set channel layout");
gst_caps_unref (caps);
caps = NULL;
WavpackCloseFile (wpc);
g_free (stream_reader);
break;
}
}
wvparse->srcpad =
gst_pad_new_from_template (gst_element_class_get_pad_template
(GST_ELEMENT_GET_CLASS (wvparse), "src"), "src");
WavpackCloseFile (wpc);
g_free (stream_reader);
break;
}
default:{
GST_LOG_OBJECT (wvparse, "unhandled ID: 0x%02x", meta.id);
break;
}
}
if (caps != NULL)
break;
}
if (caps == NULL || wvparse->srcpad == NULL)
return FALSE;
GST_DEBUG_OBJECT (wvparse, "Added src pad with caps %" GST_PTR_FORMAT, caps);
gst_pad_set_query_function (wvparse->srcpad,
GST_DEBUG_FUNCPTR (gst_wavpack_parse_src_query));
gst_pad_set_query_type_function (wvparse->srcpad,
GST_DEBUG_FUNCPTR (gst_wavpack_parse_get_src_query_types));
//.........这里部分代码省略.........
示例15: gst_vdp_video_yuv_transform
GstFlowReturn
gst_vdp_video_yuv_transform (GstBaseTransform * trans, GstBuffer * inbuf,
GstBuffer * outbuf)
{
GstVdpVideoYUV *video_yuv = GST_VDP_VIDEO_YUV (trans);
GstVdpDevice *device;
VdpVideoSurface surface;
device = GST_VDP_VIDEO_BUFFER (inbuf)->device;
surface = GST_VDP_VIDEO_BUFFER (inbuf)->surface;
switch (video_yuv->format) {
case GST_MAKE_FOURCC ('Y', 'V', '1', '2'):
{
VdpStatus status;
guint8 *data[3];
guint32 stride[3];
data[0] = GST_BUFFER_DATA (outbuf) +
gst_video_format_get_component_offset (GST_VIDEO_FORMAT_YV12,
0, video_yuv->width, video_yuv->height);
data[1] = GST_BUFFER_DATA (outbuf) +
gst_video_format_get_component_offset (GST_VIDEO_FORMAT_YV12,
2, video_yuv->width, video_yuv->height);
data[2] = GST_BUFFER_DATA (outbuf) +
gst_video_format_get_component_offset (GST_VIDEO_FORMAT_YV12,
1, video_yuv->width, video_yuv->height);
stride[0] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_YV12,
0, video_yuv->width);
stride[1] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_YV12,
2, video_yuv->width);
stride[2] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_YV12,
1, video_yuv->width);
GST_LOG_OBJECT (video_yuv, "Entering vdp_video_surface_get_bits_ycbcr");
status =
device->vdp_video_surface_get_bits_ycbcr (surface,
VDP_YCBCR_FORMAT_YV12, (void *) data, stride);
GST_LOG_OBJECT (video_yuv,
"Got status %d from vdp_video_surface_get_bits_ycbcr", status);
if (G_UNLIKELY (status != VDP_STATUS_OK)) {
GST_ELEMENT_ERROR (video_yuv, RESOURCE, READ,
("Couldn't get data from vdpau"),
("Error returned from vdpau was: %s",
device->vdp_get_error_string (status)));
return GST_FLOW_ERROR;
}
break;
}
case GST_MAKE_FOURCC ('I', '4', '2', '0'):
{
VdpStatus status;
guint8 *data[3];
guint32 stride[3];
data[0] = GST_BUFFER_DATA (outbuf) +
gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420,
0, video_yuv->width, video_yuv->height);
data[1] = GST_BUFFER_DATA (outbuf) +
gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420,
2, video_yuv->width, video_yuv->height);
data[2] = GST_BUFFER_DATA (outbuf) +
gst_video_format_get_component_offset (GST_VIDEO_FORMAT_I420,
1, video_yuv->width, video_yuv->height);
stride[0] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420,
0, video_yuv->width);
stride[1] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420,
2, video_yuv->width);
stride[2] = gst_video_format_get_row_stride (GST_VIDEO_FORMAT_I420,
1, video_yuv->width);
GST_LOG_OBJECT (video_yuv, "Entering vdp_video_surface_get_bits_ycbcr");
status =
device->vdp_video_surface_get_bits_ycbcr (surface,
VDP_YCBCR_FORMAT_YV12, (void *) data, stride);
GST_LOG_OBJECT (video_yuv,
"Got status %d from vdp_video_surface_get_bits_ycbcr", status);
if (G_UNLIKELY (status != VDP_STATUS_OK)) {
GST_ELEMENT_ERROR (video_yuv, RESOURCE, READ,
("Couldn't get data from vdpau"),
("Error returned from vdpau was: %s",
device->vdp_get_error_string (status)));
return GST_FLOW_ERROR;
}
break;
}
case GST_MAKE_FOURCC ('N', 'V', '1', '2'):
{
VdpStatus status;
guint8 *data[2];
guint32 stride[2];
data[0] = GST_BUFFER_DATA (outbuf);
data[1] = GST_BUFFER_DATA (outbuf) + video_yuv->width * video_yuv->height;
stride[0] = video_yuv->width;
stride[1] = video_yuv->width;
//.........这里部分代码省略.........