本文整理汇总了C++中GST_BUFFER_FLAG_SET函数的典型用法代码示例。如果您正苦于以下问题:C++ GST_BUFFER_FLAG_SET函数的具体用法?C++ GST_BUFFER_FLAG_SET怎么用?C++ GST_BUFFER_FLAG_SET使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GST_BUFFER_FLAG_SET函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gst_rtp_mpa_depay_process
static GstBuffer *
gst_rtp_mpa_depay_process (GstRTPBaseDepayload * depayload, GstBuffer * buf)
{
GstRtpMPADepay *rtpmpadepay;
GstBuffer *outbuf;
GstRTPBuffer rtp = { NULL };
gint payload_len;
#if 0
guint8 *payload;
guint16 frag_offset;
#endif
gboolean marker;
rtpmpadepay = GST_RTP_MPA_DEPAY (depayload);
gst_rtp_buffer_map (buf, GST_MAP_READ, &rtp);
payload_len = gst_rtp_buffer_get_payload_len (&rtp);
if (payload_len <= 4)
goto empty_packet;
#if 0
payload = gst_rtp_buffer_get_payload (&rtp);
/* strip off header
*
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | MBZ | Frag_offset |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
frag_offset = (payload[2] << 8) | payload[3];
#endif
/* subbuffer skipping the 4 header bytes */
outbuf = gst_rtp_buffer_get_payload_subbuffer (&rtp, 4, -1);
marker = gst_rtp_buffer_get_marker (&rtp);
if (marker) {
/* mark start of talkspurt with RESYNC */
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_RESYNC);
}
GST_DEBUG_OBJECT (rtpmpadepay,
"gst_rtp_mpa_depay_chain: pushing buffer of size %" G_GSIZE_FORMAT "",
gst_buffer_get_size (outbuf));
gst_rtp_buffer_unmap (&rtp);
/* FIXME, we can push half mpeg frames when they are split over multiple
* RTP packets */
return outbuf;
/* ERRORS */
empty_packet:
{
GST_ELEMENT_WARNING (rtpmpadepay, STREAM, DECODE,
("Empty Payload."), (NULL));
gst_rtp_buffer_unmap (&rtp);
return NULL;
}
}
示例2: gst_vaapidecode_push_decoded_frame
static GstFlowReturn
gst_vaapidecode_push_decoded_frame (GstVideoDecoder * vdec,
GstVideoCodecFrame * out_frame)
{
GstVaapiDecode *const decode = GST_VAAPIDECODE (vdec);
GstVaapiSurfaceProxy *proxy;
GstVaapiSurface *surface;
GstFlowReturn ret;
const GstVaapiRectangle *crop_rect;
GstVaapiVideoMeta *meta;
GstBufferPoolAcquireParams *params = NULL;
GstVaapiVideoBufferPoolAcquireParams vaapi_params = { {0,}, };
guint flags, out_flags = 0;
gboolean alloc_renegotiate, caps_renegotiate;
if (!GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (out_frame)) {
proxy = gst_video_codec_frame_get_user_data (out_frame);
surface = GST_VAAPI_SURFACE_PROXY_SURFACE (proxy);
crop_rect = gst_vaapi_surface_proxy_get_crop_rect (proxy);
/* in theory, we are not supposed to check the surface resolution
* change here since it should be advertised before from ligstvaapi.
* But there are issues with it especially for some vp9 streams where
* upstream element set un-cropped values in set_format() which make
* everything a mess. So better doing the explicit check here irrespective
* of what notification we get from upstream or libgstvaapi.Also, even if
* we received notification from libgstvaapi, the frame we are going to
* be pushed at this point might not have the notified resolution if there
* are queued frames in decoded picture buffer. */
alloc_renegotiate = is_surface_resolution_changed (decode, surface);
caps_renegotiate = is_display_resolution_changed (decode, crop_rect);
if (gst_pad_needs_reconfigure (GST_VIDEO_DECODER_SRC_PAD (vdec))
|| alloc_renegotiate || caps_renegotiate || decode->do_renego) {
g_atomic_int_set (&decode->do_renego, FALSE);
if (!gst_vaapidecode_negotiate (decode))
return GST_FLOW_ERROR;
}
gst_vaapi_surface_proxy_set_destroy_notify (proxy,
(GDestroyNotify) gst_vaapidecode_release, gst_object_ref (decode));
if (is_src_allocator_dmabuf (decode)) {
vaapi_params.proxy = gst_vaapi_surface_proxy_ref (proxy);
params = (GstBufferPoolAcquireParams *) & vaapi_params;
}
ret = gst_video_decoder_allocate_output_frame_with_params (vdec, out_frame,
params);
if (params)
gst_vaapi_surface_proxy_unref (vaapi_params.proxy);
if (ret != GST_FLOW_OK)
goto error_create_buffer;
/* if not dmabuf is negotiated set the vaapi video meta in the
* proxy */
if (!params) {
meta = gst_buffer_get_vaapi_video_meta (out_frame->output_buffer);
if (!meta)
goto error_get_meta;
gst_vaapi_video_meta_set_surface_proxy (meta, proxy);
}
flags = gst_vaapi_surface_proxy_get_flags (proxy);
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_CORRUPTED)
out_flags |= GST_BUFFER_FLAG_CORRUPTED;
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_INTERLACED) {
out_flags |= GST_VIDEO_BUFFER_FLAG_INTERLACED;
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_TFF)
out_flags |= GST_VIDEO_BUFFER_FLAG_TFF;
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_RFF)
out_flags |= GST_VIDEO_BUFFER_FLAG_RFF;
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_ONEFIELD)
out_flags |= GST_VIDEO_BUFFER_FLAG_ONEFIELD;
}
GST_BUFFER_FLAG_SET (out_frame->output_buffer, out_flags);
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_FFB) {
GST_BUFFER_FLAG_SET (out_frame->output_buffer,
GST_VIDEO_BUFFER_FLAG_FIRST_IN_BUNDLE);
}
#if (USE_GLX || USE_EGL)
if (decode->has_texture_upload_meta)
gst_buffer_ensure_texture_upload_meta (out_frame->output_buffer);
#endif
}
if (decode->in_segment.rate < 0.0
&& !GST_VIDEO_CODEC_FRAME_IS_SYNC_POINT (out_frame)) {
GST_TRACE_OBJECT (decode, "drop frame in reverse playback");
gst_video_decoder_release_frame (GST_VIDEO_DECODER (decode), out_frame);
return GST_FLOW_OK;
}
ret = gst_video_decoder_finish_frame (vdec, out_frame);
if (ret != GST_FLOW_OK)
goto error_commit_buffer;
return GST_FLOW_OK;
//.........这里部分代码省略.........
示例3: gst_decklink_audio_src_create
//.........这里部分代码省略.........
capture_packet_free (p);
GST_DEBUG_OBJECT (self, "Flushing");
return GST_FLOW_FLUSHING;
}
p->packet->GetBytes ((gpointer *) & data);
sample_count = p->packet->GetSampleFrameCount ();
data_size = self->info.bpf * sample_count;
ap = (AudioPacket *) g_malloc0 (sizeof (AudioPacket));
*buffer =
gst_buffer_new_wrapped_full ((GstMemoryFlags) GST_MEMORY_FLAG_READONLY,
(gpointer) data, data_size, 0, data_size, ap,
(GDestroyNotify) audio_packet_free);
ap->packet = p->packet;
p->packet->AddRef ();
ap->input = self->input->input;
ap->input->AddRef ();
timestamp = p->capture_time;
// Jitter and discontinuity handling, based on audiobasesrc
start_time = timestamp;
// Convert to the sample numbers
start_offset =
gst_util_uint64_scale (start_time, self->info.rate, GST_SECOND);
end_offset = start_offset + sample_count;
end_time = gst_util_uint64_scale_int (end_offset, GST_SECOND,
self->info.rate);
duration = end_time - start_time;
if (self->next_offset == (guint64) - 1) {
discont = TRUE;
} else {
guint64 diff, max_sample_diff;
// Check discont
if (start_offset <= self->next_offset)
diff = self->next_offset - start_offset;
else
diff = start_offset - self->next_offset;
max_sample_diff =
gst_util_uint64_scale_int (self->alignment_threshold, self->info.rate,
GST_SECOND);
// Discont!
if (G_UNLIKELY (diff >= max_sample_diff)) {
if (self->discont_wait > 0) {
if (self->discont_time == GST_CLOCK_TIME_NONE) {
self->discont_time = start_time;
} else if (start_time - self->discont_time >= self->discont_wait) {
discont = TRUE;
self->discont_time = GST_CLOCK_TIME_NONE;
}
} else {
discont = TRUE;
}
} else if (G_UNLIKELY (self->discont_time != GST_CLOCK_TIME_NONE)) {
// we have had a discont, but are now back on track!
self->discont_time = GST_CLOCK_TIME_NONE;
}
}
if (discont) {
// Have discont, need resync and use the capture timestamps
if (self->next_offset != (guint64) - 1)
GST_INFO_OBJECT (self, "Have discont. Expected %"
G_GUINT64_FORMAT ", got %" G_GUINT64_FORMAT,
self->next_offset, start_offset);
GST_BUFFER_FLAG_SET (*buffer, GST_BUFFER_FLAG_DISCONT);
self->next_offset = end_offset;
} else {
// No discont, just keep counting
self->discont_time = GST_CLOCK_TIME_NONE;
timestamp =
gst_util_uint64_scale (self->next_offset, GST_SECOND, self->info.rate);
self->next_offset += sample_count;
duration =
gst_util_uint64_scale (self->next_offset, GST_SECOND,
self->info.rate) - timestamp;
}
GST_BUFFER_TIMESTAMP (*buffer) = timestamp;
GST_BUFFER_DURATION (*buffer) = duration;
GST_DEBUG_OBJECT (self,
"Outputting buffer %p with timestamp %" GST_TIME_FORMAT " and duration %"
GST_TIME_FORMAT, *buffer, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (*buffer)),
GST_TIME_ARGS (GST_BUFFER_DURATION (*buffer)));
capture_packet_free (p);
return flow_ret;
}
示例4: gst_video_rate_flush_prev
/* flush the oldest buffer */
static GstFlowReturn
gst_video_rate_flush_prev (GstVideoRate * videorate, gboolean duplicate)
{
GstFlowReturn res;
GstBuffer *outbuf;
GstClockTime push_ts;
if (!videorate->prevbuf)
goto eos_before_buffers;
/* make sure we can write to the metadata */
outbuf = gst_buffer_make_writable (gst_buffer_ref (videorate->prevbuf));
GST_BUFFER_OFFSET (outbuf) = videorate->out;
GST_BUFFER_OFFSET_END (outbuf) = videorate->out + 1;
if (videorate->discont) {
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
videorate->discont = FALSE;
} else
GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_DISCONT);
if (duplicate)
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
else
GST_BUFFER_FLAG_UNSET (outbuf, GST_BUFFER_FLAG_GAP);
/* this is the timestamp we put on the buffer */
push_ts = videorate->next_ts;
videorate->out++;
videorate->out_frame_count++;
if (videorate->to_rate_numerator) {
/* interpolate next expected timestamp in the segment */
videorate->next_ts =
videorate->segment.base + videorate->segment.start +
videorate->base_ts + gst_util_uint64_scale (videorate->out_frame_count,
videorate->to_rate_denominator * GST_SECOND,
videorate->to_rate_numerator);
GST_BUFFER_DURATION (outbuf) = videorate->next_ts - push_ts;
}
/* We do not need to update time in VFR (variable frame rate) mode */
if (!videorate->drop_only) {
/* adapt for looping, bring back to time in current segment. */
GST_BUFFER_TIMESTAMP (outbuf) = push_ts - videorate->segment.base;
}
GST_LOG_OBJECT (videorate,
"old is best, dup, pushing buffer outgoing ts %" GST_TIME_FORMAT,
GST_TIME_ARGS (push_ts));
res = gst_pad_push (GST_BASE_TRANSFORM_SRC_PAD (videorate), outbuf);
return res;
/* WARNINGS */
eos_before_buffers:
{
GST_INFO_OBJECT (videorate, "got EOS before any buffer was received");
return GST_FLOW_OK;
}
}
示例5: gst_timidity_loop
//.........这里部分代码省略.........
timidity->time_per_frame;
gst_segment_set_newsegment (timidity->o_segment, FALSE, 1.0,
GST_FORMAT_DEFAULT, 0, GST_CLOCK_TIME_NONE, 0);
gst_pad_push_event (timidity->srcpad,
gst_timidity_get_new_segment_event (timidity, GST_FORMAT_TIME, FALSE));
/* extract tags */
text = mid_song_get_meta (timidity->song, MID_SONG_TEXT);
if (text) {
tags = gst_tag_list_new ();
gst_tag_list_add (tags, GST_TAG_MERGE_APPEND, GST_TAG_TITLE, text, NULL);
//g_free (text);
}
text = mid_song_get_meta (timidity->song, MID_SONG_COPYRIGHT);
if (text) {
if (tags == NULL)
tags = gst_tag_list_new ();
gst_tag_list_add (tags, GST_TAG_MERGE_APPEND,
GST_TAG_COPYRIGHT, text, NULL);
//g_free (text);
}
if (tags) {
gst_element_found_tags (GST_ELEMENT (timidity), tags);
}
GST_DEBUG_OBJECT (timidity, "Parsing song done");
return;
}
if (timidity->o_segment_changed) {
GstSegment *segment = gst_timidity_get_segment (timidity, GST_FORMAT_TIME,
!timidity->o_new_segment);
GST_LOG_OBJECT (timidity,
"sending newsegment from %" GST_TIME_FORMAT "-%" GST_TIME_FORMAT
", pos=%" GST_TIME_FORMAT, GST_TIME_ARGS ((guint64) segment->start),
GST_TIME_ARGS ((guint64) segment->stop),
GST_TIME_ARGS ((guint64) segment->time));
if (timidity->o_segment->flags & GST_SEEK_FLAG_SEGMENT) {
gst_element_post_message (GST_ELEMENT (timidity),
gst_message_new_segment_start (GST_OBJECT (timidity),
segment->format, segment->start));
}
gst_segment_free (segment);
timidity->o_segment_changed = FALSE;
return;
}
if (timidity->o_seek) {
/* perform a seek internally */
timidity->o_segment->last_stop = timidity->o_segment->time;
mid_song_seek (timidity->song,
(timidity->o_segment->last_stop * timidity->time_per_frame) /
GST_MSECOND);
}
out = gst_timidity_get_buffer (timidity);
if (!out) {
GST_LOG_OBJECT (timidity, "Song ended, generating eos");
gst_pad_push_event (timidity->srcpad, gst_event_new_eos ());
timidity->o_seek = FALSE;
goto paused;
}
if (timidity->o_seek) {
GST_BUFFER_FLAG_SET (out, GST_BUFFER_FLAG_DISCONT);
timidity->o_seek = FALSE;
}
gst_buffer_set_caps (out, timidity->out_caps);
ret = gst_pad_push (timidity->srcpad, out);
if (GST_FLOW_IS_FATAL (ret) || ret == GST_FLOW_NOT_LINKED)
goto error;
return;
paused:
{
GST_DEBUG_OBJECT (timidity, "pausing task");
gst_pad_pause_task (timidity->sinkpad);
return;
}
error:
{
GST_ELEMENT_ERROR (timidity, STREAM, FAILED,
("Internal data stream error"),
("Streaming stopped, reason %s", gst_flow_get_name (ret)));
gst_pad_push_event (timidity->srcpad, gst_event_new_eos ());
goto paused;
}
}
示例6: gst_vdp_vpp_drain
static GstFlowReturn
gst_vdp_vpp_drain (GstVdpVideoPostProcess * vpp)
{
GstVdpPicture current_pic;
guint32 video_surfaces_past_count;
VdpVideoSurface video_surfaces_past[MAX_PICTURES];
guint32 video_surfaces_future_count;
VdpVideoSurface video_surfaces_future[MAX_PICTURES];
GstFlowReturn ret;
while (gst_vdp_vpp_get_next_picture (vpp,
¤t_pic,
&video_surfaces_past_count, video_surfaces_past,
&video_surfaces_future_count, video_surfaces_future)) {
GError *err;
GstVdpOutputBuffer *outbuf;
GstStructure *structure;
GstVideoRectangle src_r = { 0, }
, dest_r = {
0,};
VdpRect rect;
GstVdpDevice *device;
VdpStatus status;
err = NULL;
ret =
gst_vdp_output_src_pad_alloc_buffer ((GstVdpOutputSrcPad *) vpp->srcpad,
&outbuf, &err);
if (ret != GST_FLOW_OK)
goto output_pad_error;
src_r.w = vpp->width;
src_r.h = vpp->height;
if (vpp->got_par) {
gint new_width;
new_width = gst_util_uint64_scale_int (src_r.w, vpp->par_n, vpp->par_d);
src_r.x += (src_r.w - new_width) / 2;
src_r.w = new_width;
}
structure = gst_caps_get_structure (GST_BUFFER_CAPS (outbuf), 0);
if (!gst_structure_get_int (structure, "width", &dest_r.w) ||
!gst_structure_get_int (structure, "height", &dest_r.h))
goto invalid_caps;
if (vpp->force_aspect_ratio) {
GstVideoRectangle res_r;
gst_video_sink_center_rect (src_r, dest_r, &res_r, TRUE);
rect.x0 = res_r.x;
rect.x1 = res_r.w + res_r.x;
rect.y0 = res_r.y;
rect.y1 = res_r.h + res_r.y;
} else {
rect.x0 = 0;
rect.x1 = dest_r.w;
rect.y0 = 0;
rect.y1 = dest_r.h;
}
device = vpp->device;
status =
device->vdp_video_mixer_render (vpp->mixer, VDP_INVALID_HANDLE, NULL,
current_pic.structure, video_surfaces_past_count, video_surfaces_past,
current_pic.buf->surface, video_surfaces_future_count,
video_surfaces_future, NULL, outbuf->surface, NULL, &rect, 0, NULL);
if (status != VDP_STATUS_OK)
goto render_error;
GST_BUFFER_TIMESTAMP (outbuf) = current_pic.timestamp;
if (gst_vdp_vpp_is_interlaced (vpp))
GST_BUFFER_DURATION (outbuf) = vpp->field_duration;
else
GST_BUFFER_DURATION (outbuf) = GST_BUFFER_DURATION (current_pic.buf);
if (GST_BUFFER_FLAG_IS_SET (current_pic.buf, GST_BUFFER_FLAG_DISCONT))
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
if (GST_BUFFER_FLAG_IS_SET (current_pic.buf, GST_BUFFER_FLAG_PREROLL))
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_PREROLL);
if (GST_BUFFER_FLAG_IS_SET (current_pic.buf, GST_BUFFER_FLAG_GAP))
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_GAP);
err = NULL;
ret =
gst_vdp_output_src_pad_push ((GstVdpOutputSrcPad *) vpp->srcpad,
outbuf, &err);
if (ret != GST_FLOW_OK)
goto output_pad_error;
continue;
invalid_caps:
//.........这里部分代码省略.........
示例7: gst_vdp_vpp_chain
static GstFlowReturn
gst_vdp_vpp_chain (GstPad * pad, GstBuffer * buffer)
{
GstVdpVideoPostProcess *vpp =
GST_VDP_VIDEO_POST_PROCESS (gst_pad_get_parent (pad));
GstClockTime qostime;
GstFlowReturn ret = GST_FLOW_OK;
GError *err;
GST_DEBUG ("chain");
/* can only do QoS if the segment is in TIME */
if (vpp->segment.format != GST_FORMAT_TIME)
goto no_qos;
/* QOS is done on the running time of the buffer, get it now */
qostime = gst_segment_to_running_time (&vpp->segment, GST_FORMAT_TIME,
GST_BUFFER_TIMESTAMP (buffer));
if (qostime != -1) {
gboolean need_skip;
GstClockTime earliest_time;
/* lock for getting the QoS parameters that are set (in a different thread)
* with the QOS events */
GST_OBJECT_LOCK (vpp);
earliest_time = vpp->earliest_time;
/* check for QoS, don't perform conversion for buffers
* that are known to be late. */
need_skip = GST_CLOCK_TIME_IS_VALID (earliest_time) && qostime != -1 &&
qostime <= earliest_time;
GST_OBJECT_UNLOCK (vpp);
if (need_skip) {
GST_DEBUG_OBJECT (vpp, "skipping transform: qostime %"
GST_TIME_FORMAT " <= %" GST_TIME_FORMAT,
GST_TIME_ARGS (qostime), GST_TIME_ARGS (earliest_time));
/* mark discont for next buffer */
vpp->discont = TRUE;
gst_buffer_unref (buffer);
return GST_FLOW_OK;
}
}
no_qos:
if (vpp->discont) {
GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
vpp->discont = FALSE;
}
if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT))) {
GST_DEBUG_OBJECT (vpp, "Received discont buffer");
gst_vdp_vpp_flush (vpp);
}
if (!vpp->native_input) {
GstVdpVideoBuffer *video_buf;
err = NULL;
video_buf =
(GstVdpVideoBuffer *) gst_vdp_buffer_pool_get_buffer (vpp->vpool, &err);
if (G_UNLIKELY (!video_buf))
goto video_buf_error;
if (!gst_vdp_video_buffer_upload (video_buf, buffer, vpp->fourcc,
vpp->width, vpp->height)) {
gst_buffer_unref (GST_BUFFER (video_buf));
GST_ELEMENT_ERROR (vpp, RESOURCE, READ,
("Couldn't upload YUV data to vdpau"), (NULL));
ret = GST_FLOW_ERROR;
goto error;
}
gst_buffer_copy_metadata (GST_BUFFER (video_buf), buffer,
GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS);
gst_buffer_unref (buffer);
buffer = GST_BUFFER (video_buf);
}
if (G_UNLIKELY (vpp->mixer == VDP_INVALID_HANDLE)) {
ret = gst_vdp_vpp_create_mixer (vpp);
if (ret != GST_FLOW_OK)
goto error;
}
gst_vdp_vpp_add_buffer (vpp, GST_VDP_VIDEO_BUFFER (buffer));
ret = gst_vdp_vpp_drain (vpp);
done:
gst_object_unref (vpp);
return ret;
error:
gst_buffer_unref (buffer);
//.........这里部分代码省略.........
示例8: gst_shm_src_create
static GstFlowReturn
gst_shm_src_create (GstPushSrc * psrc, GstBuffer ** outbuf)
{
GstShmSrc *self = GST_SHM_SRC (psrc);
gchar *buf = NULL;
int rv = 0;
struct GstShmBuffer *gsb;
do {
if (gst_poll_wait (self->poll, GST_CLOCK_TIME_NONE) < 0) {
if (errno == EBUSY)
return GST_FLOW_WRONG_STATE;
GST_ELEMENT_ERROR (self, RESOURCE, READ, ("Failed to read from shmsrc"),
("Poll failed on fd: %s", strerror (errno)));
return GST_FLOW_ERROR;
}
if (self->unlocked)
return GST_FLOW_WRONG_STATE;
if (gst_poll_fd_has_closed (self->poll, &self->pollfd)) {
GST_ELEMENT_ERROR (self, RESOURCE, READ, ("Failed to read from shmsrc"),
("Control socket has closed"));
return GST_FLOW_ERROR;
}
if (gst_poll_fd_has_error (self->poll, &self->pollfd)) {
GST_ELEMENT_ERROR (self, RESOURCE, READ, ("Failed to read from shmsrc"),
("Control socket has error"));
return GST_FLOW_ERROR;
}
if (gst_poll_fd_can_read (self->poll, &self->pollfd)) {
buf = NULL;
GST_LOG_OBJECT (self, "Reading from pipe");
GST_OBJECT_LOCK (self);
rv = sp_client_recv (self->pipe->pipe, &buf);
GST_OBJECT_UNLOCK (self);
if (rv < 0) {
GST_ELEMENT_ERROR (self, RESOURCE, READ, ("Failed to read from shmsrc"),
("Error reading control data: %d", rv));
return GST_FLOW_ERROR;
}
}
} while (buf == NULL);
GST_LOG_OBJECT (self, "Got buffer %p of size %d", buf, rv);
gsb = g_slice_new0 (struct GstShmBuffer);
gsb->buf = buf;
gsb->pipe = self->pipe;
gst_shm_pipe_inc (self->pipe);
*outbuf = gst_buffer_new ();
GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_READONLY);
GST_BUFFER_DATA (*outbuf) = (guint8 *) buf;
GST_BUFFER_SIZE (*outbuf) = rv;
GST_BUFFER_MALLOCDATA (*outbuf) = (guint8 *) gsb;
GST_BUFFER_FREE_FUNC (*outbuf) = free_buffer;
return GST_FLOW_OK;
}
示例9: gst_rtp_amr_depay_process
//.........这里部分代码省略.........
CMR = (payload[0] & 0xf0) >> 4;
/* strip CMR header now, pack FT and the data for the decoder */
payload_len -= 1;
payload += 1;
GST_DEBUG_OBJECT (rtpamrdepay, "payload len %d", payload_len);
if (rtpamrdepay->interleaving) {
ILL = (payload[0] & 0xf0) >> 4;
ILP = (payload[0] & 0x0f);
payload_len -= 1;
payload += 1;
if (ILP > ILL)
goto wrong_interleaving;
}
/*
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6
* +-+-+-+-+-+-+-+-+..
* |F| FT |Q|P|P| more FT..
* +-+-+-+-+-+-+-+-+..
*/
/* count number of packets by counting the FTs. Also
* count number of amr data bytes and number of non-empty
* packets (this is also the number of CRCs if present). */
amr_len = 0;
num_nonempty_packets = 0;
num_packets = 0;
for (i = 0; i < payload_len; i++) {
gint fr_size;
guint8 FT;
FT = (payload[i] & 0x78) >> 3;
fr_size = frame_size[FT];
GST_DEBUG_OBJECT (rtpamrdepay, "frame size %d", fr_size);
if (fr_size == -1)
goto wrong_framesize;
if (fr_size > 0) {
amr_len += fr_size;
num_nonempty_packets++;
}
num_packets++;
if ((payload[i] & 0x80) == 0)
break;
}
if (rtpamrdepay->crc) {
/* data len + CRC len + header bytes should be smaller than payload_len */
if (num_packets + num_nonempty_packets + amr_len > payload_len)
goto wrong_length_1;
} else {
/* data len + header bytes should be smaller than payload_len */
if (num_packets + amr_len > payload_len)
goto wrong_length_2;
}
outbuf = gst_buffer_new_and_alloc (payload_len);
/* point to destination */
p = GST_BUFFER_DATA (outbuf);
/* point to first data packet */
dp = payload + num_packets;
if (rtpamrdepay->crc) {
/* skip CRC if present */
dp += num_nonempty_packets;
}
for (i = 0; i < num_packets; i++) {
gint fr_size;
/* copy FT, clear F bit */
*p++ = payload[i] & 0x7f;
fr_size = frame_size[(payload[i] & 0x78) >> 3];
if (fr_size > 0) {
/* copy data packet, FIXME, calc CRC here. */
memcpy (p, dp, fr_size);
p += fr_size;
dp += fr_size;
}
}
/* we can set the duration because each packet is 20 milliseconds */
GST_BUFFER_DURATION (outbuf) = num_packets * 20 * GST_MSECOND;
if (gst_rtp_buffer_get_marker (buf)) {
/* marker bit marks a discont buffer after a talkspurt. */
GST_DEBUG_OBJECT (depayload, "marker bit was set");
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
}
GST_DEBUG_OBJECT (depayload, "pushing buffer of size %d",
GST_BUFFER_SIZE (outbuf));
}
示例10: gst_decklink_src_task
static void
gst_decklink_src_task (void *priv)
{
GstDecklinkSrc *decklinksrc = GST_DECKLINK_SRC (priv);
GstBuffer *buffer;
GstBuffer *audio_buffer;
IDeckLinkVideoInputFrame *video_frame;
IDeckLinkAudioInputPacket *audio_frame;
void *data;
int n_samples;
GstFlowReturn ret;
const GstDecklinkMode *mode;
GST_DEBUG_OBJECT (decklinksrc, "task");
g_mutex_lock (decklinksrc->mutex);
while (decklinksrc->video_frame == NULL && !decklinksrc->stop) {
g_cond_wait (decklinksrc->cond, decklinksrc->mutex);
}
video_frame = decklinksrc->video_frame;
audio_frame = decklinksrc->audio_frame;
decklinksrc->video_frame = NULL;
decklinksrc->audio_frame = NULL;
g_mutex_unlock (decklinksrc->mutex);
if (decklinksrc->stop) {
GST_DEBUG ("stopping task");
return;
}
/* warning on dropped frames */
if (decklinksrc->dropped_frames - decklinksrc->dropped_frames_old > 0) {
GST_ELEMENT_WARNING (decklinksrc, RESOURCE, READ,
("Dropped %d frame(s), for a total of %d frame(s)",
decklinksrc->dropped_frames - decklinksrc->dropped_frames_old,
decklinksrc->dropped_frames),
(NULL));
decklinksrc->dropped_frames_old = decklinksrc->dropped_frames;
}
mode = gst_decklink_get_mode (decklinksrc->mode);
video_frame->GetBytes (&data);
if (decklinksrc->copy_data) {
buffer = gst_buffer_new_and_alloc (mode->width * mode->height * 2);
memcpy (GST_BUFFER_DATA (buffer), data, mode->width * mode->height * 2);
video_frame->Release ();
} else {
buffer = gst_buffer_new ();
GST_BUFFER_SIZE (buffer) = mode->width * mode->height * 2;
GST_BUFFER_DATA (buffer) = (guint8 *) data;
GST_BUFFER_FREE_FUNC (buffer) = video_frame_free;
GST_BUFFER_MALLOCDATA (buffer) = (guint8 *) video_frame;
}
GST_BUFFER_TIMESTAMP (buffer) =
gst_util_uint64_scale_int (decklinksrc->frame_num * GST_SECOND,
mode->fps_d, mode->fps_n);
GST_BUFFER_DURATION (buffer) =
gst_util_uint64_scale_int ((decklinksrc->frame_num + 1) * GST_SECOND,
mode->fps_d, mode->fps_n) - GST_BUFFER_TIMESTAMP (buffer);
GST_BUFFER_OFFSET (buffer) = decklinksrc->frame_num;
GST_BUFFER_OFFSET_END (buffer) = decklinksrc->frame_num;
if (decklinksrc->frame_num == 0) {
GstEvent *event;
gboolean ret;
GST_BUFFER_FLAG_SET (buffer, GST_BUFFER_FLAG_DISCONT);
event = gst_event_new_new_segment (FALSE, 1.0, GST_FORMAT_TIME, 0,
GST_CLOCK_TIME_NONE, 0);
ret = gst_pad_push_event (decklinksrc->videosrcpad, event);
if (!ret) {
GST_ERROR_OBJECT (decklinksrc, "new segment event ret=%d", ret);
return;
}
}
if (decklinksrc->video_caps == NULL) {
decklinksrc->video_caps = gst_decklink_mode_get_caps (decklinksrc->mode);
}
gst_buffer_set_caps (buffer, decklinksrc->video_caps);
ret = gst_pad_push (decklinksrc->videosrcpad, buffer);
if (ret != GST_FLOW_OK) {
GST_ELEMENT_ERROR (decklinksrc, CORE, NEGOTIATION, (NULL), (NULL));
}
if (gst_pad_is_linked (decklinksrc->audiosrcpad)) {
n_samples = audio_frame->GetSampleFrameCount ();
audio_frame->GetBytes (&data);
audio_buffer = gst_buffer_new_and_alloc (n_samples * 2 * 2);
memcpy (GST_BUFFER_DATA (audio_buffer), data, n_samples * 2 * 2);
GST_BUFFER_TIMESTAMP (audio_buffer) =
//.........这里部分代码省略.........
示例11: gst_audio_rate_chain
//.........这里部分代码省略.........
guint64 fillsamples;
/* We don't want to allocate a single unreasonably huge buffer - it might
be hundreds of megabytes. So, limit each output buffer to one second of
audio */
fillsamples = in_offset - audiorate->next_offset;
while (fillsamples > 0) {
guint64 cursamples = MIN (fillsamples, audiorate->rate);
fillsamples -= cursamples;
fillsize = cursamples * audiorate->bytes_per_sample;
fill = gst_buffer_new_and_alloc (fillsize);
/* FIXME, 0 might not be the silence byte for the negotiated format. */
memset (GST_BUFFER_DATA (fill), 0, fillsize);
GST_DEBUG_OBJECT (audiorate, "inserting %lld samples", cursamples);
GST_BUFFER_OFFSET (fill) = audiorate->next_offset;
audiorate->next_offset += cursamples;
GST_BUFFER_OFFSET_END (fill) = audiorate->next_offset;
/* Use next timestamp, then calculate following timestamp based on
* offset to get duration. Neccesary complexity to get 'perfect'
* streams */
GST_BUFFER_TIMESTAMP (fill) = audiorate->next_ts;
audiorate->next_ts = gst_util_uint64_scale_int (audiorate->next_offset,
GST_SECOND, audiorate->rate);
GST_BUFFER_DURATION (fill) = audiorate->next_ts -
GST_BUFFER_TIMESTAMP (fill);
/* we created this buffer to fill a gap */
GST_BUFFER_FLAG_SET (fill, GST_BUFFER_FLAG_GAP);
/* set discont if it's pending, this is mostly done for the first buffer
* and after a flushing seek */
if (audiorate->discont) {
GST_BUFFER_FLAG_SET (fill, GST_BUFFER_FLAG_DISCONT);
audiorate->discont = FALSE;
}
gst_buffer_set_caps (fill, GST_PAD_CAPS (audiorate->srcpad));
ret = gst_pad_push (audiorate->srcpad, fill);
if (ret != GST_FLOW_OK)
goto beach;
audiorate->out++;
audiorate->add += cursamples;
if (!audiorate->silent)
g_object_notify (G_OBJECT (audiorate), "add");
}
} else if (in_offset < audiorate->next_offset) {
/* need to remove samples */
if (in_offset_end <= audiorate->next_offset) {
guint64 drop = in_size / audiorate->bytes_per_sample;
audiorate->drop += drop;
GST_DEBUG_OBJECT (audiorate, "dropping %lld samples", drop);
/* we can drop the buffer completely */
gst_buffer_unref (buf);
if (!audiorate->silent)
g_object_notify (G_OBJECT (audiorate), "drop");
示例12: gst_rtp_xqt_depay_process
//.........这里部分代码省略.........
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |S| Reserved | Sample Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Sample Timestamp |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* . Sample Data ... .
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* |S| Reserved | Sample Length |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Sample Timestamp |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* . Sample Data ... .
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* . ...... .
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
while (payload_len > 8) {
s = (payload[0] & 0x80) != 0; /* contains sync sample */
slen = (payload[2] << 8) | payload[3];
/* timestamp =
* (payload[4] << 24) | (payload[5] << 16) | (payload[6] << 8) |
* payload[7];
*/
payload += 8;
payload_len -= 8;
if (slen > payload_len)
slen = payload_len;
outbuf = gst_buffer_new_and_alloc (slen);
gst_buffer_fill (outbuf, 0, payload, slen);
if (!s)
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
gst_rtp_base_depayload_push (depayload, outbuf);
/* aligned on 32 bit boundary */
slen = GST_ROUND_UP_4 (slen);
payload += slen;
payload_len -= slen;
}
break;
}
case 3:
{
/* one sample per packet, use adapter to combine based on marker bit. */
outbuf = gst_buffer_new_and_alloc (payload_len);
gst_buffer_fill (outbuf, 0, payload, payload_len);
gst_adapter_push (rtpxqtdepay->adapter, outbuf);
outbuf = NULL;
if (!m)
goto done;
avail = gst_adapter_available (rtpxqtdepay->adapter);
outbuf = gst_adapter_take_buffer (rtpxqtdepay->adapter, avail);
GST_DEBUG_OBJECT (rtpxqtdepay,
"gst_rtp_xqt_depay_chain: pushing buffer of size %u", avail);
goto done;
}
}
示例13: videodecoder_chain
//.........这里部分代码省略.........
{
if (av_new_packet(&decoder->packet, GST_BUFFER_SIZE(buf)) == 0)
{
memcpy(decoder->packet.data, GST_BUFFER_DATA(buf), GST_BUFFER_SIZE(buf));
if (GST_BUFFER_TIMESTAMP_IS_VALID(buf))
base->context->reordered_opaque = GST_BUFFER_TIMESTAMP(buf);
else
base->context->reordered_opaque = AV_NOPTS_VALUE;
num_dec = avcodec_decode_video2(base->context, base->frame, &decoder->frame_finished, &decoder->packet);
av_free_packet(&decoder->packet);
}
else
{
result = GST_FLOW_ERROR;
goto _exit;
}
}
else
{
av_init_packet(&decoder->packet);
decoder->packet.data = GST_BUFFER_DATA(buf);
decoder->packet.size = GST_BUFFER_SIZE(buf);
if (GST_BUFFER_TIMESTAMP_IS_VALID(buf))
base->context->reordered_opaque = GST_BUFFER_TIMESTAMP(buf);
else
base->context->reordered_opaque = AV_NOPTS_VALUE;
num_dec = avcodec_decode_video2(base->context, base->frame, &decoder->frame_finished, &decoder->packet);
}
if (num_dec < 0)
{
// basedecoder_flush(base);
#ifdef DEBUG_OUTPUT
g_print ("videodecoder_chain error: %s\n", avelement_error_to_string(AVELEMENT(decoder), num_dec));
#endif
goto _exit;
}
if (decoder->frame_finished > 0)
{
if (!videodecoder_configure_sourcepad(decoder))
result = GST_FLOW_ERROR;
else
{
GstBuffer *outbuf = NULL;
result = gst_pad_alloc_buffer_and_set_caps(base->srcpad, base->context->frame_number,
decoder->frame_size, GST_PAD_CAPS(base->srcpad), &outbuf);
if (result != GST_FLOW_OK)
{
if (result != GST_FLOW_WRONG_STATE)
{
gst_element_message_full(GST_ELEMENT(decoder), GST_MESSAGE_ERROR,
GST_STREAM_ERROR, GST_STREAM_ERROR_DECODE,
("Decoded video buffer allocation failed"), NULL,
("videodecoder.c"), ("videodecoder_chain"), 0);
}
}
else
{
if (base->frame->reordered_opaque != AV_NOPTS_VALUE)
{
GST_BUFFER_TIMESTAMP(outbuf) = base->frame->reordered_opaque;
GST_BUFFER_DURATION(outbuf) = GST_BUFFER_DURATION(buf); // Duration for video usually same
}
GST_BUFFER_SIZE(outbuf) = decoder->frame_size;
// Copy image by parts from different arrays.
memcpy(GST_BUFFER_DATA(outbuf), base->frame->data[0], decoder->u_offset);
memcpy(GST_BUFFER_DATA(outbuf) + decoder->u_offset, base->frame->data[1], decoder->uv_blocksize);
memcpy(GST_BUFFER_DATA(outbuf) + decoder->v_offset, base->frame->data[2], decoder->uv_blocksize);
GST_BUFFER_OFFSET_END(outbuf) = GST_BUFFER_OFFSET_NONE;
if (decoder->discont || GST_BUFFER_IS_DISCONT(buf))
{
#ifdef DEBUG_OUTPUT
g_print("Video discont: frame size=%dx%d\n", base->context->width, base->context->height);
#endif
GST_BUFFER_FLAG_SET(outbuf, GST_BUFFER_FLAG_DISCONT);
decoder->discont = FALSE;
}
#ifdef VERBOSE_DEBUG
g_print("videodecoder: pushing buffer ts=%.4f sec", (double)GST_BUFFER_TIMESTAMP(outbuf)/GST_SECOND);
#endif
result = gst_pad_push(base->srcpad, outbuf);
#ifdef VERBOSE_DEBUG
g_print(" done, res=%s\n", gst_flow_get_name(result));
#endif
}
}
}
_exit:
// INLINE - gst_buffer_unref()
gst_buffer_unref(buf);
return result;
}
示例14: gst_jasper_dec_chain
static GstFlowReturn
gst_jasper_dec_chain (GstPad * pad, GstBuffer * buf)
{
GstJasperDec *dec;
GstFlowReturn ret = GST_FLOW_OK;
GstClockTime ts;
GstBuffer *outbuf = NULL;
guint8 *data;
guint size;
gboolean decode;
dec = GST_JASPER_DEC (GST_PAD_PARENT (pad));
if (dec->fmt < 0)
goto not_negotiated;
ts = GST_BUFFER_TIMESTAMP (buf);
GST_LOG_OBJECT (dec, "buffer with ts: %" GST_TIME_FORMAT, GST_TIME_ARGS (ts));
if (GST_BUFFER_FLAG_IS_SET (buf, GST_BUFFER_FLAG_DISCONT))
dec->discont = TRUE;
decode = gst_jasper_dec_do_qos (dec, ts);
/* FIXME: do clipping */
if (G_UNLIKELY (!decode)) {
dec->discont = TRUE;
goto done;
}
/* strip possible prefix */
if (dec->strip) {
GstBuffer *tmp;
tmp = gst_buffer_create_sub (buf, dec->strip,
GST_BUFFER_SIZE (buf) - dec->strip);
gst_buffer_copy_metadata (tmp, buf, GST_BUFFER_COPY_TIMESTAMPS);
gst_buffer_unref (buf);
buf = tmp;
}
/* preprend possible codec_data */
if (dec->codec_data) {
GstBuffer *tmp;
tmp =
gst_buffer_append (gst_buffer_ref (dec->codec_data),
gst_buffer_ref (buf));
gst_buffer_copy_metadata (tmp, buf, GST_BUFFER_COPY_TIMESTAMPS);
gst_buffer_unref (buf);
buf = tmp;
}
/* now really feed the data to decoder */
data = GST_BUFFER_DATA (buf);
size = GST_BUFFER_SIZE (buf);
ret = gst_jasper_dec_get_picture (dec, data, size, &outbuf);
if (outbuf) {
gst_buffer_copy_metadata (outbuf, buf, GST_BUFFER_COPY_TIMESTAMPS);
if (dec->discont) {
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
dec->discont = FALSE;
}
if (ret == GST_FLOW_OK)
ret = gst_pad_push (dec->srcpad, outbuf);
else
gst_buffer_unref (outbuf);
}
done:
gst_buffer_unref (buf);
return ret;
/* ERRORS */
not_negotiated:
{
GST_ELEMENT_ERROR (dec, CORE, NEGOTIATION, (NULL),
("format wasn't negotiated before chain function"));
ret = GST_FLOW_NOT_NEGOTIATED;
goto done;
}
}
示例15: gst_vaapidecode_push_decoded_frame
static GstFlowReturn
gst_vaapidecode_push_decoded_frame (GstVideoDecoder * vdec,
GstVideoCodecFrame * out_frame)
{
GstVaapiDecode *const decode = GST_VAAPIDECODE (vdec);
GstVaapiSurfaceProxy *proxy;
GstFlowReturn ret;
const GstVaapiRectangle *crop_rect;
GstVaapiVideoMeta *meta;
guint flags, out_flags = 0;
if (!GST_VIDEO_CODEC_FRAME_IS_DECODE_ONLY (out_frame)) {
proxy = gst_video_codec_frame_get_user_data (out_frame);
/* reconfigure if un-cropped surface resolution changed */
if (is_surface_resolution_changed (vdec, GST_VAAPI_SURFACE_PROXY_SURFACE (proxy)))
gst_vaapidecode_negotiate (decode);
gst_vaapi_surface_proxy_set_destroy_notify (proxy,
(GDestroyNotify) gst_vaapidecode_release, gst_object_ref (decode));
ret = gst_video_decoder_allocate_output_frame (vdec, out_frame);
if (ret != GST_FLOW_OK)
goto error_create_buffer;
meta = gst_buffer_get_vaapi_video_meta (out_frame->output_buffer);
if (!meta)
goto error_get_meta;
gst_vaapi_video_meta_set_surface_proxy (meta, proxy);
flags = gst_vaapi_surface_proxy_get_flags (proxy);
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_CORRUPTED)
out_flags |= GST_BUFFER_FLAG_CORRUPTED;
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_INTERLACED) {
out_flags |= GST_VIDEO_BUFFER_FLAG_INTERLACED;
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_TFF)
out_flags |= GST_VIDEO_BUFFER_FLAG_TFF;
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_RFF)
out_flags |= GST_VIDEO_BUFFER_FLAG_RFF;
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_ONEFIELD)
out_flags |= GST_VIDEO_BUFFER_FLAG_ONEFIELD;
}
GST_BUFFER_FLAG_SET (out_frame->output_buffer, out_flags);
#if GST_CHECK_VERSION(1,5,0)
/* First-in-bundle flag only appeared in 1.5 dev */
if (flags & GST_VAAPI_SURFACE_PROXY_FLAG_FFB) {
GST_BUFFER_FLAG_SET (out_frame->output_buffer,
GST_VIDEO_BUFFER_FLAG_FIRST_IN_BUNDLE);
}
#endif
crop_rect = gst_vaapi_surface_proxy_get_crop_rect (proxy);
if (crop_rect) {
GstVideoCropMeta *const crop_meta =
gst_buffer_add_video_crop_meta (out_frame->output_buffer);
if (crop_meta) {
crop_meta->x = crop_rect->x;
crop_meta->y = crop_rect->y;
crop_meta->width = crop_rect->width;
crop_meta->height = crop_rect->height;
}
}
#if (USE_GLX || USE_EGL)
if (decode->has_texture_upload_meta)
gst_buffer_ensure_texture_upload_meta (out_frame->output_buffer);
#endif
}
ret = gst_video_decoder_finish_frame (vdec, out_frame);
if (ret != GST_FLOW_OK)
goto error_commit_buffer;
gst_video_codec_frame_unref (out_frame);
return GST_FLOW_OK;
/* ERRORS */
error_create_buffer:
{
const GstVaapiID surface_id =
gst_vaapi_surface_get_id (GST_VAAPI_SURFACE_PROXY_SURFACE (proxy));
GST_ELEMENT_ERROR (vdec, STREAM, FAILED,
("Failed to create sink buffer"),
("video sink failed to create video buffer for proxy'ed "
"surface %" GST_VAAPI_ID_FORMAT, GST_VAAPI_ID_ARGS (surface_id)));
gst_video_decoder_drop_frame (vdec, out_frame);
gst_video_codec_frame_unref (out_frame);
return GST_FLOW_ERROR;
}
error_get_meta:
{
GST_ELEMENT_ERROR (vdec, STREAM, FAILED,
("Failed to get vaapi video meta attached to video buffer"),
("Failed to get vaapi video meta attached to video buffer"));
gst_video_decoder_drop_frame (vdec, out_frame);
gst_video_codec_frame_unref (out_frame);
return GST_FLOW_ERROR;
}
error_commit_buffer:
//.........这里部分代码省略.........