本文整理汇总了C++中GST_BUFFER_SIZE函数的典型用法代码示例。如果您正苦于以下问题:C++ GST_BUFFER_SIZE函数的具体用法?C++ GST_BUFFER_SIZE怎么用?C++ GST_BUFFER_SIZE使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了GST_BUFFER_SIZE函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: incoming_content
/**
* Main working loop for stuff coming in from the CCNx network
*
* The only kind of content we work with are data messages. They are in response to the
* interest messages we send out. The work involves 2 pieces: packing the data from ccn message
* sizes into buffer sizes we use internally, and detecting when the stream of data is done.
*
* The first is fairly simple. Each internal buffer we 'fill' is placed onto the fifo queue
* so the main thread can take it off and reply to the pipeline request for more data.
*
* Determining the end of stream at the moment is a bit of a hack and could use some work.
* \todo volunteers? 8-)
*
* \param selfp -> a context structure we created when registering this call-back
* \param kind specifies the type of call-back being processed, see the \b switch statement
* \param info context information about the call-back itself; interests, data, etc.
* \return a response as to how successful we were in processing the call-back
* \retval CCN_UPCALL_RESULT_OK things went well
* \retval CCN_UPCALL_RESULT_VERIFY need to verify the contents of what we received
* \retval CCN_UPCALL_RESULT_REEXPRESS an interest timedout waiting for data, so we try again
* \retval CCN_UPCALL_RESULT_ERR some error was encountered
*/
static enum ccn_upcall_res
incoming_content (struct ccn_closure *selfp,
enum ccn_upcall_kind kind, struct ccn_upcall_info *info)
{
Gstccnxsrc *me = GST_CCNXSRC (selfp->data);
const unsigned char *ccnb = NULL;
size_t ccnb_size = 0;
const unsigned char *ib = NULL; /* info->interest_ccnb */
struct ccn_indexbuf *ic = NULL;
unsigned int i;
uintmax_t segment;
CcnxInterestState *istate = NULL;
gint res;
const unsigned char *cp;
size_t sz;
const unsigned char *data = NULL;
size_t data_size = 0;
gboolean b_last = FALSE;
GST_INFO ("content has arrived!");
/* Do some basic sanity and type checks to see if we want to process this data */
if (CCN_UPCALL_FINAL == kind) {
GST_LOG_OBJECT (me, "CCN upcall final %p", selfp);
if (me->i_bufoffset > 0) {
GST_BUFFER_SIZE (me->buf) = me->i_bufoffset;
fifo_put (me, me->buf);
me->buf = gst_buffer_new_and_alloc (CCN_FIFO_BLOCK_SIZE);
me->i_bufoffset = 0;
}
/*
* Should emit an eos here instead of the empty buffer
*/
GST_BUFFER_SIZE (me->buf) = 0;
fifo_put (me, me->buf);
me->i_bufoffset = 0;
return (CCN_UPCALL_RESULT_OK);
}
if (!info)
return CCN_UPCALL_RESULT_ERR; // Now why would this happen?
// show_comps( info->content_ccnb, info->content_comps);
if (CCN_UPCALL_INTEREST_TIMED_OUT == kind) {
if (selfp != me->ccn_closure) {
GST_LOG_OBJECT (me, "CCN Interest timed out on dead closure %p", selfp);
return (CCN_UPCALL_RESULT_OK);
}
segment =
ccn_ccnb_fetch_segment (info->interest_ccnb, info->interest_comps);
GST_INFO ("...looks to be for segment: %d", segment);
GST_LOG_OBJECT (me, "CCN upcall reexpress -- timed out");
istate = fetchSegmentInterest (me, segment);
if (istate) {
if (istate->timeouts > 5) {
GST_LOG_OBJECT (me, "CCN upcall reexpress -- too many reexpressions");
if (segment == me->post_seg) // We have been waiting for this one...process as an empty block to trigger other activity
process_or_queue (me, me->post_seg, NULL, 0, FALSE);
else
freeInterestState (me, istate);
post_next_interest (me); // make sure to ask for new stuff if needed, or else we stall waiting for nothing
return (CCN_UPCALL_RESULT_OK);
} else {
istate->timeouts++;
return (CCN_UPCALL_RESULT_REEXPRESS);
}
} else {
GST_LOG_OBJECT (me, "segment not found in cache: %d", segment);
return (CCN_UPCALL_RESULT_OK);
}
} else if (CCN_UPCALL_CONTENT_UNVERIFIED == kind) {
if (selfp != me->ccn_closure) {
GST_LOG_OBJECT (me, "CCN unverified content on dead closure %p", selfp);
return (CCN_UPCALL_RESULT_OK);
//.........这里部分代码省略.........
示例2: gst_goo_encjpeg_chain
static GstFlowReturn
gst_goo_encjpeg_chain (GstPad* pad, GstBuffer* buffer)
{
GST_LOG ("");
GstGooEncJpeg* self = GST_GOO_ENCJPEG (gst_pad_get_parent (pad));
GstGooEncJpegPrivate* priv = GST_GOO_ENCJPEG_GET_PRIVATE (self);
GstFlowReturn ret = GST_FLOW_OK;
GstGooAdapter* adapter = self->adapter;
OMX_BUFFERHEADERTYPE* omx_buffer = NULL;
GstClockTime timestamp, duration;
guint64 offset, offsetend;
GstBuffer* outbuf = NULL;
if (goo_port_is_tunneled (self->inport))
{
GST_INFO ("Inport is tunneled");
ret = GST_FLOW_OK;
priv->incount++;
goto process_output;
}
if (goo_port_is_eos (self->inport))
{
GST_INFO ("port is eos");
ret = GST_FLOW_UNEXPECTED;
goto fail;
}
if (self->component->cur_state != OMX_StateExecuting)
{
goto fail;
}
/* let's copy the timestamp meta data */
timestamp = GST_BUFFER_TIMESTAMP (buffer);
duration = GST_BUFFER_DURATION (buffer);
offset = GST_BUFFER_OFFSET (buffer);
offsetend = GST_BUFFER_OFFSET_END (buffer);
if (GST_IS_GOO_BUFFER (buffer) &&
goo_port_is_my_buffer (self->inport,
GST_GOO_BUFFER (buffer)->omx_buffer))
{
GST_INFO ("My own OMX buffer");
priv->incount++;
gst_buffer_unref (buffer); /* let's push the buffer to omx */
ret = GST_FLOW_OK;
}
else if (GST_IS_GOO_BUFFER (buffer) &&
!goo_port_is_my_buffer (self->inport,
GST_GOO_BUFFER (buffer)->omx_buffer))
{
GST_INFO ("Other OMX buffer");
if (GST_BUFFER_SIZE (buffer) != priv->omxbufsiz)
{
GST_ELEMENT_ERROR (self, STREAM, FORMAT,
("Frame is incomplete (%u!=%u)",
GST_BUFFER_SIZE (buffer),
priv->omxbufsiz),
("Frame is incomplete (%u!=%u)",
GST_BUFFER_SIZE (buffer),
priv->omxbufsiz));
ret = GST_FLOW_ERROR;
}
omx_buffer = goo_port_grab_buffer (self->inport);
memcpy (omx_buffer->pBuffer, GST_BUFFER_DATA (buffer),
priv->omxbufsiz);
omx_buffer->nFilledLen = priv->omxbufsiz;
priv->incount++;
goo_component_release_buffer (self->component, omx_buffer);
gst_buffer_unref (buffer);
ret = GST_FLOW_OK;
}
else
{
if (GST_BUFFER_FLAG_IS_SET (buffer, GST_BUFFER_FLAG_DISCONT))
{
gst_goo_adapter_clear (adapter);
}
GST_LOG ("size = %d bytes", GST_BUFFER_SIZE (buffer));
gst_goo_adapter_push (adapter, buffer);
guint tmp = priv->incount;
while (gst_goo_adapter_available (adapter) >= priv->omxbufsiz &&
ret == GST_FLOW_OK)
{
GST_DEBUG ("Pushing data to OMX");
OMX_BUFFERHEADERTYPE* omx_buffer;
omx_buffer = goo_port_grab_buffer (self->inport);
gst_goo_adapter_peek (adapter, priv->omxbufsiz,
omx_buffer);
omx_buffer->nFilledLen = priv->omxbufsiz;
gst_goo_adapter_flush (adapter, priv->omxbufsiz);
priv->incount++;
goo_component_release_buffer (self->component,
//.........这里部分代码省略.........
示例3: gst_tidmaiaccel_prepare_output_buffer
/*****************************************************************************
* gst_tidmaiaccel_prepare_output_buffer
* Function is used to allocate output buffer
*****************************************************************************/
static GstFlowReturn gst_tidmaiaccel_prepare_output_buffer (GstBaseTransform
*trans, GstBuffer *inBuf, gint size, GstCaps *caps, GstBuffer **outBuf)
{
GstTIDmaiaccel *dmaiaccel = GST_TIDMAIACCEL(trans);
Buffer_Handle hOutBuf;
Bool isContiguous = FALSE;
UInt32 phys = 0;
/* Always check if the buffer is contiguous */
phys = Memory_getBufferPhysicalAddress(
GST_BUFFER_DATA(inBuf),
GST_BUFFER_SIZE(inBuf),
&isContiguous);
if (isContiguous && dmaiaccel->width){
GST_DEBUG("Is contiguous video buffer");
Memory_registerContigBuf((UInt32)GST_BUFFER_DATA(inBuf),
GST_BUFFER_SIZE(inBuf),phys);
/* This is a contiguous buffer, create a dmai buffer transport */
BufferGfx_Attrs gfxAttrs = BufferGfx_Attrs_DEFAULT;
gfxAttrs.bAttrs.reference = TRUE;
gfxAttrs.dim.width = dmaiaccel->width;
gfxAttrs.dim.height = dmaiaccel->height;
gfxAttrs.colorSpace = dmaiaccel->colorSpace;
gfxAttrs.dim.lineLength = dmaiaccel->lineLength;
hOutBuf = Buffer_create(GST_BUFFER_SIZE(inBuf), &gfxAttrs.bAttrs);
BufferGfx_setDimensions(hOutBuf,&gfxAttrs.dim);
BufferGfx_setColorSpace(hOutBuf,gfxAttrs.colorSpace);
Buffer_setUserPtr(hOutBuf, (Int8*)GST_BUFFER_DATA(inBuf));
Buffer_setNumBytesUsed(hOutBuf, GST_BUFFER_SIZE(inBuf));
*outBuf = gst_tidmaibuffertransport_new(hOutBuf, NULL, NULL, FALSE);
gst_buffer_set_data(*outBuf, (guint8*) Buffer_getUserPtr(hOutBuf),
Buffer_getSize(hOutBuf));
gst_buffer_copy_metadata(*outBuf,inBuf,GST_BUFFER_COPY_ALL);
gst_buffer_set_caps(*outBuf, GST_PAD_CAPS(trans->srcpad));
/* We need to grab a reference to the input buffer since we have
* a pointer to his buffer */
gst_buffer_ref(inBuf);
gst_tidmaibuffertransport_set_release_callback(
(GstTIDmaiBufferTransport *)*outBuf,
dmaiaccel_release_cb,inBuf);
return GST_FLOW_OK;
} else {
GST_DEBUG("Copying into contiguous video buffer");
/* This is a contiguous buffer, create a dmai buffer transport */
if (!dmaiaccel->bufTabAllocated){
/* Initialize our buffer tab */
BufferGfx_Attrs gfxAttrs = BufferGfx_Attrs_DEFAULT;
gfxAttrs.dim.width = dmaiaccel->width;
gfxAttrs.dim.height = dmaiaccel->height;
gfxAttrs.colorSpace = dmaiaccel->colorSpace;
gfxAttrs.dim.lineLength = dmaiaccel->lineLength;
dmaiaccel->hOutBufTab =
BufTab_create(2, GST_BUFFER_SIZE(inBuf),
BufferGfx_getBufferAttrs(&gfxAttrs));
pthread_mutex_init(&dmaiaccel->bufTabMutex, NULL);
pthread_cond_init(&dmaiaccel->bufTabCond, NULL);
if (dmaiaccel->hOutBufTab == NULL) {
GST_ELEMENT_ERROR(dmaiaccel,RESOURCE,NO_SPACE_LEFT,(NULL),
("failed to create output buffer tab"));
return GST_FLOW_ERROR;
}
dmaiaccel->bufTabAllocated = TRUE;
}
pthread_mutex_lock(&dmaiaccel->bufTabMutex);
hOutBuf = BufTab_getFreeBuf(dmaiaccel->hOutBufTab);
if (hOutBuf == NULL) {
GST_INFO("Failed to get free buffer, waiting on bufTab\n");
pthread_cond_wait(&dmaiaccel->bufTabCond, &dmaiaccel->bufTabMutex);
hOutBuf = BufTab_getFreeBuf(dmaiaccel->hOutBufTab);
if (hOutBuf == NULL) {
GST_ELEMENT_ERROR(dmaiaccel,RESOURCE,NO_SPACE_LEFT,(NULL),
("failed to get a free contiguous buffer from BufTab"));
pthread_mutex_unlock(&dmaiaccel->bufTabMutex);
return GST_FLOW_ERROR;
}
}
pthread_mutex_unlock(&dmaiaccel->bufTabMutex);
memcpy(Buffer_getUserPtr(hOutBuf),GST_BUFFER_DATA(inBuf),
GST_BUFFER_SIZE(inBuf));
Buffer_setNumBytesUsed(hOutBuf, GST_BUFFER_SIZE(inBuf));
*outBuf = gst_tidmaibuffertransport_new(hOutBuf, &dmaiaccel->bufTabMutex,
&dmaiaccel->bufTabCond, FALSE);
gst_buffer_set_data(*outBuf, (guint8*) Buffer_getUserPtr(hOutBuf),
//.........这里部分代码省略.........
示例4: gst_xvidenc_chain
static GstFlowReturn
gst_xvidenc_chain (GstPad * pad, GstBuffer * buf)
{
GstXvidEnc *xvidenc = GST_XVIDENC (GST_PAD_PARENT (pad));
GstBuffer *outbuf;
xvid_enc_frame_t xframe;
const gint motion_presets[] = {
0, 0, 0, 0,
XVID_ME_HALFPELREFINE16,
XVID_ME_HALFPELREFINE16 | XVID_ME_ADVANCEDDIAMOND16,
XVID_ME_HALFPELREFINE16 | XVID_ME_EXTSEARCH16
| XVID_ME_HALFPELREFINE8 | XVID_ME_USESQUARES16
};
if (!xvidenc->handle) {
GST_ELEMENT_ERROR (xvidenc, CORE, NEGOTIATION, (NULL),
("format wasn't negotiated before chain function"));
gst_buffer_unref (buf);
return GST_FLOW_NOT_NEGOTIATED;
}
GST_DEBUG_OBJECT (xvidenc,
"Received buffer of time %" GST_TIME_FORMAT ", size %d",
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (buf)), GST_BUFFER_SIZE (buf));
if (xvidenc->xframe_cache)
memcpy (&xframe, xvidenc->xframe_cache, sizeof (xframe));
else { /* need to build some inital xframe to be cached */
/* encode and so ... */
gst_xvid_init_struct (xframe);
if (xvidenc->par_width == xvidenc->par_height)
xframe.par = XVID_PAR_11_VGA;
else {
xframe.par = XVID_PAR_EXT;
xframe.par_width = xvidenc->par_width;
xframe.par_height = xvidenc->par_height;
}
/* handle options */
xframe.vol_flags |= xvidenc->quant_type;
xframe.vop_flags = XVID_VOP_HALFPEL;
xframe.motion = motion_presets[xvidenc->motion];
if (xvidenc->me_chroma) {
xframe.motion |= XVID_ME_CHROMA_PVOP;
xframe.motion |= XVID_ME_CHROMA_BVOP;
}
if (xvidenc->me_vhq >= 1) {
xframe.vop_flags |= XVID_VOP_MODEDECISION_RD;
}
if (xvidenc->me_vhq >= 2) {
xframe.motion |= XVID_ME_HALFPELREFINE16_RD;
xframe.motion |= XVID_ME_QUARTERPELREFINE16_RD;
}
if (xvidenc->me_vhq >= 3) {
xframe.motion |= XVID_ME_HALFPELREFINE8_RD;
xframe.motion |= XVID_ME_QUARTERPELREFINE8_RD;
xframe.motion |= XVID_ME_CHECKPREDICTION_RD;
}
if (xvidenc->me_vhq >= 4) {
xframe.motion |= XVID_ME_EXTSEARCH_RD;
}
/* no motion estimation, then only intra */
if (xvidenc->motion == 0) {
xframe.type = XVID_TYPE_IVOP;
} else {
xframe.type = XVID_TYPE_AUTO;
}
if (xvidenc->motion > 4) {
xframe.vop_flags |= XVID_VOP_INTER4V;
}
if (xvidenc->me_quarterpel) {
xframe.vol_flags |= XVID_VOL_QUARTERPEL;
xframe.motion |= XVID_ME_QUARTERPELREFINE16;
xframe.motion |= XVID_ME_QUARTERPELREFINE8;
}
if (xvidenc->gmc) {
xframe.vol_flags |= XVID_VOL_GMC;
xframe.motion |= XVID_ME_GME_REFINE;
}
if (xvidenc->interlaced) {
xframe.vol_flags |= XVID_VOL_INTERLACING;
}
if (xvidenc->trellis) {
xframe.vop_flags |= XVID_VOP_TRELLISQUANT;
}
if (xvidenc->hqacpred) {
xframe.vop_flags |= XVID_VOP_HQACPRED;
}
//.........这里部分代码省略.........
示例5: gst_rtp_speex_pay_handle_buffer
static GstFlowReturn
gst_rtp_speex_pay_handle_buffer (GstBaseRTPPayload * basepayload,
GstBuffer * buffer)
{
GstRtpSPEEXPay *rtpspeexpay;
guint size, payload_len;
GstBuffer *outbuf;
guint8 *payload, *data;
GstClockTime timestamp, duration;
GstFlowReturn ret;
rtpspeexpay = GST_RTP_SPEEX_PAY (basepayload);
size = GST_BUFFER_SIZE (buffer);
data = GST_BUFFER_DATA (buffer);
switch (rtpspeexpay->packet) {
case 0:
/* ident packet. We need to parse the headers to construct the RTP
* properties. */
if (!gst_rtp_speex_pay_parse_ident (rtpspeexpay, data, size))
goto parse_error;
ret = GST_FLOW_OK;
goto done;
case 1:
/* comment packet, we ignore it */
ret = GST_FLOW_OK;
goto done;
default:
/* other packets go in the payload */
break;
}
timestamp = GST_BUFFER_TIMESTAMP (buffer);
duration = GST_BUFFER_DURATION (buffer);
/* FIXME, only one SPEEX frame per RTP packet for now */
payload_len = size;
outbuf = gst_rtp_buffer_new_allocate (payload_len, 0, 0);
/* FIXME, assert for now */
g_assert (payload_len <= GST_BASE_RTP_PAYLOAD_MTU (rtpspeexpay));
/* copy timestamp and duration */
GST_BUFFER_TIMESTAMP (outbuf) = timestamp;
GST_BUFFER_DURATION (outbuf) = duration;
/* get payload */
payload = gst_rtp_buffer_get_payload (outbuf);
/* copy data in payload */
memcpy (&payload[0], data, size);
ret = gst_basertppayload_push (basepayload, outbuf);
done:
gst_buffer_unref (buffer);
rtpspeexpay->packet++;
return ret;
/* ERRORS */
parse_error:
{
GST_ELEMENT_ERROR (rtpspeexpay, STREAM, DECODE, (NULL),
("Error parsing first identification packet."));
gst_buffer_unref (buffer);
return GST_FLOW_ERROR;
}
}
示例6: gst_segment_clip
HRESULT AudioFakeSink::DoRenderSample(IMediaSample *pMediaSample)
{
GstBuffer *out_buf = NULL;
gboolean in_seg = FALSE;
GstClockTime buf_start, buf_stop;
gint64 clip_start = 0, clip_stop = 0;
guint start_offset = 0, stop_offset;
GstClockTime duration;
if(pMediaSample)
{
BYTE *pBuffer = NULL;
LONGLONG lStart = 0, lStop = 0;
long size = pMediaSample->GetActualDataLength();
pMediaSample->GetPointer(&pBuffer);
pMediaSample->GetTime(&lStart, &lStop);
if (!GST_CLOCK_TIME_IS_VALID (mDec->timestamp)) {
// Convert REFERENCE_TIME to GST_CLOCK_TIME
mDec->timestamp = (GstClockTime)lStart * 100;
}
duration = (lStop - lStart) * 100;
buf_start = mDec->timestamp;
buf_stop = mDec->timestamp + duration;
/* save stop position to start next buffer with it */
mDec->timestamp = buf_stop;
/* check if this buffer is in our current segment */
in_seg = gst_segment_clip (mDec->segment, GST_FORMAT_TIME,
buf_start, buf_stop, &clip_start, &clip_stop);
/* if the buffer is out of segment do not push it downstream */
if (!in_seg) {
GST_DEBUG_OBJECT (mDec,
"buffer is out of segment, start %" GST_TIME_FORMAT " stop %"
GST_TIME_FORMAT, GST_TIME_ARGS (buf_start), GST_TIME_ARGS (buf_stop));
goto done;
}
/* buffer is entirely or partially in-segment, so allocate a
* GstBuffer for output, and clip if required */
/* allocate a new buffer for raw audio */
mDec->last_ret = gst_pad_alloc_buffer (mDec->srcpad,
GST_BUFFER_OFFSET_NONE,
size,
GST_PAD_CAPS (mDec->srcpad), &out_buf);
if (!out_buf) {
GST_WARNING_OBJECT (mDec, "cannot allocate a new GstBuffer");
goto done;
}
/* set buffer properties */
GST_BUFFER_TIMESTAMP (out_buf) = buf_start;
GST_BUFFER_DURATION (out_buf) = duration;
memcpy (GST_BUFFER_DATA (out_buf), pBuffer,
MIN ((unsigned int)size, GST_BUFFER_SIZE (out_buf)));
/* we have to remove some heading samples */
if ((GstClockTime) clip_start > buf_start) {
start_offset = (guint)gst_util_uint64_scale_int (clip_start - buf_start,
mDec->rate, GST_SECOND) * mDec->depth / 8 * mDec->channels;
}
else
start_offset = 0;
/* we have to remove some trailing samples */
if ((GstClockTime) clip_stop < buf_stop) {
stop_offset = (guint)gst_util_uint64_scale_int (buf_stop - clip_stop,
mDec->rate, GST_SECOND) * mDec->depth / 8 * mDec->channels;
}
else
stop_offset = size;
/* truncating */
if ((start_offset != 0) || (stop_offset != (size_t) size)) {
GstBuffer *subbuf = gst_buffer_create_sub (out_buf, start_offset,
stop_offset - start_offset);
if (subbuf) {
gst_buffer_set_caps (subbuf, GST_PAD_CAPS (mDec->srcpad));
gst_buffer_unref (out_buf);
out_buf = subbuf;
}
}
GST_BUFFER_TIMESTAMP (out_buf) = clip_start;
GST_BUFFER_DURATION (out_buf) = clip_stop - clip_start;
/* replace the saved stop position by the clipped one */
mDec->timestamp = clip_stop;
GST_DEBUG_OBJECT (mDec,
"push_buffer (size %d)=> pts %" GST_TIME_FORMAT " stop %" GST_TIME_FORMAT
" duration %" GST_TIME_FORMAT, size,
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (out_buf)),
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (out_buf) +
GST_BUFFER_DURATION (out_buf)),
//.........这里部分代码省略.........
示例7: gst_tta_dec_chain
static GstFlowReturn
gst_tta_dec_chain (GstPad * pad, GstBuffer * in)
{
GstTtaDec *ttadec;
GstBuffer *outbuf, *buf = GST_BUFFER (in);
guchar *data, *p;
decoder *dec;
unsigned long outsize;
unsigned long size;
guint32 frame_samples;
long res;
long *prev;
ttadec = GST_TTA_DEC (GST_OBJECT_PARENT (pad));
data = GST_BUFFER_DATA (buf);
size = GST_BUFFER_SIZE (buf);
ttadec->tta_buf.bit_count = 0;
ttadec->tta_buf.bit_cache = 0;
ttadec->tta_buf.bitpos = ttadec->tta_buf.buffer_end;
ttadec->tta_buf.offset = 0;
decoder_init (ttadec->tta, ttadec->channels, ttadec->bytes);
if (GST_BUFFER_DURATION_IS_VALID (buf)) {
frame_samples =
ceil ((gdouble) (GST_BUFFER_DURATION (buf) * ttadec->samplerate) /
(gdouble) GST_SECOND);
} else {
frame_samples = ttadec->samplerate * FRAME_TIME;
}
outsize = ttadec->channels * frame_samples * ttadec->bytes;
dec = ttadec->tta;
p = ttadec->decdata;
prev = ttadec->cache;
for (res = 0;
p < ttadec->decdata + frame_samples * ttadec->channels * ttadec->bytes;) {
unsigned long unary, binary, depth, k;
long value, temp_value;
fltst *fst = &dec->fst;
adapt *rice = &dec->rice;
long *last = &dec->last;
// decode Rice unsigned
get_unary (&ttadec->tta_buf, data, size, &unary);
switch (unary) {
case 0:
depth = 0;
k = rice->k0;
break;
default:
depth = 1;
k = rice->k1;
unary--;
}
if (k) {
get_binary (&ttadec->tta_buf, data, size, &binary, k);
value = (unary << k) + binary;
} else
value = unary;
switch (depth) {
case 1:
rice->sum1 += value - (rice->sum1 >> 4);
if (rice->k1 > 0 && rice->sum1 < shift_16[rice->k1])
rice->k1--;
else if (rice->sum1 > shift_16[rice->k1 + 1])
rice->k1++;
value += bit_shift[rice->k0];
default:
rice->sum0 += value - (rice->sum0 >> 4);
if (rice->k0 > 0 && rice->sum0 < shift_16[rice->k0])
rice->k0--;
else if (rice->sum0 > shift_16[rice->k0 + 1])
rice->k0++;
}
/* this only uses a temporary variable to silence a gcc warning */
temp_value = DEC (value);
value = temp_value;
// decompress stage 1: adaptive hybrid filter
hybrid_filter (fst, &value);
// decompress stage 2: fixed order 1 prediction
switch (ttadec->bytes) {
case 1:
value += PREDICTOR1 (*last, 4);
break; // bps 8
case 2:
value += PREDICTOR1 (*last, 5);
break; // bps 16
case 3:
value += PREDICTOR1 (*last, 5);
break; // bps 24
case 4:
value += *last;
//.........这里部分代码省略.........
示例8: gst_aiff_parse_parse_comm
static gboolean
gst_aiff_parse_parse_comm (GstAiffParse * aiff, GstBuffer * buf)
{
guint8 *data;
int size;
if (aiff->is_aifc)
size = 22;
else
size = 18;
if (GST_BUFFER_SIZE (buf) < size) {
GST_WARNING_OBJECT (aiff, "COMM chunk too short, cannot parse header");
return FALSE;
}
data = GST_BUFFER_DATA (buf);
aiff->channels = GST_READ_UINT16_BE (data);
aiff->total_frames = GST_READ_UINT32_BE (data + 2);
aiff->depth = GST_READ_UINT16_BE (data + 6);
aiff->width = GST_ROUND_UP_8 (aiff->depth);
aiff->rate = (int) gst_aiff_parse_read_IEEE80 (data + 8);
aiff->floating_point = FALSE;
if (aiff->is_aifc) {
guint32 fourcc = GST_READ_UINT32_LE (data + 18);
/* We only support the 'trivial' uncompressed AIFC, but it can be
* either big or little endian */
switch (fourcc) {
case GST_MAKE_FOURCC ('N', 'O', 'N', 'E'):
aiff->endianness = G_BIG_ENDIAN;
break;
case GST_MAKE_FOURCC ('s', 'o', 'w', 't'):
aiff->endianness = G_LITTLE_ENDIAN;
break;
case GST_MAKE_FOURCC ('F', 'L', '3', '2'):
case GST_MAKE_FOURCC ('f', 'l', '3', '2'):
aiff->floating_point = TRUE;
aiff->width = aiff->depth = 32;
aiff->endianness = G_BIG_ENDIAN;
break;
case GST_MAKE_FOURCC ('f', 'l', '6', '4'):
aiff->floating_point = TRUE;
aiff->width = aiff->depth = 64;
aiff->endianness = G_BIG_ENDIAN;
break;
default:
GST_WARNING_OBJECT (aiff, "Unsupported compression in AIFC "
"file: %" GST_FOURCC_FORMAT,
GST_FOURCC_ARGS (GST_READ_UINT32_LE (data + 18)));
return FALSE;
}
} else
aiff->endianness = G_BIG_ENDIAN;
return TRUE;
}
示例9: gst_aiff_parse_stream_headers
//.........这里部分代码省略.........
size = MIN (datasize, (upstream_size - aiff->datastart));
}
aiff->datasize = (guint64) datasize;
aiff->dataleft = (guint64) datasize;
aiff->end_offset = datasize + aiff->datastart;
if (!aiff->streaming) {
/* We will continue looking at chunks until the end - to read tags,
* etc. */
aiff->offset += datasize;
}
GST_DEBUG_OBJECT (aiff, "datasize = %d", datasize);
if (aiff->streaming) {
done = TRUE;
}
break;
}
case GST_MAKE_FOURCC ('I', 'D', '3', ' '):{
GstTagList *tags;
if (aiff->streaming) {
if (!gst_aiff_parse_peek_chunk (aiff, &tag, &size))
return GST_FLOW_OK;
gst_adapter_flush (aiff->adapter, 8);
aiff->offset += 8;
buf = gst_adapter_take_buffer (aiff->adapter, size);
} else {
if ((res = gst_aiff_parse_read_chunk (aiff,
&aiff->offset, &tag, &buf)) != GST_FLOW_OK)
return res;
}
GST_LOG_OBJECT (aiff, "ID3 chunk of size %u", GST_BUFFER_SIZE (buf));
tags = gst_tag_list_from_id3v2_tag (buf);
gst_buffer_unref (buf);
GST_INFO_OBJECT (aiff, "ID3 tags: %" GST_PTR_FORMAT, tags);
if (aiff->tags == NULL) {
aiff->tags = tags;
} else {
gst_tag_list_insert (aiff->tags, tags, GST_TAG_MERGE_APPEND);
gst_tag_list_free (tags);
}
break;
}
default:
gst_aiff_parse_ignore_chunk (aiff, buf, tag, size);
}
if (upstream_size && (aiff->offset >= upstream_size)) {
/* Now we have gone through the whole file */
done = TRUE;
}
}
/* We read all the chunks (in pull mode) or reached the SSND chunk
* (in push mode). We must have both COMM and SSND now; error out
* otherwise.
*/
if (!aiff->got_comm) {
GST_WARNING_OBJECT (aiff, "Failed to find COMM chunk");
goto no_header;
}
示例10: gst_omx_base_src_create_from_port
/* protected helper method which can be used by derived classes:
*/
GstFlowReturn
gst_omx_base_src_create_from_port (GstOmxBaseSrc *self,
GOmxPort *out_port,
GstBuffer **ret_buf)
{
GOmxCore *gomx;
GstFlowReturn ret = GST_FLOW_OK;
gomx = self->gomx;
GST_LOG_OBJECT (self, "begin");
if (out_port->enabled)
{
if (G_UNLIKELY (gomx->omx_state == OMX_StateIdle))
{
GST_INFO_OBJECT (self, "omx: play");
g_omx_core_start (gomx);
}
if (G_UNLIKELY (gomx->omx_state != OMX_StateExecuting))
{
GST_ERROR_OBJECT (self, "Whoa! very wrong");
ret = GST_FLOW_ERROR;
goto beach;
}
while (out_port->enabled)
{
gpointer obj = g_omx_port_recv (out_port);
if (G_UNLIKELY (!obj))
{
ret = GST_FLOW_ERROR;
break;
}
if (G_LIKELY (GST_IS_BUFFER (obj)))
{
GstBuffer *buf = GST_BUFFER (obj);
if (G_LIKELY (GST_BUFFER_SIZE (buf) > 0))
{
PRINT_BUFFER (self, buf);
*ret_buf = buf;
break;
}
}
else if (GST_IS_EVENT (obj))
{
GST_INFO_OBJECT (self, "got eos");
g_omx_core_set_done (gomx);
break;
}
}
}
if (!out_port->enabled)
{
GST_WARNING_OBJECT (self, "done");
ret = GST_FLOW_UNEXPECTED;
}
beach:
GST_LOG_OBJECT (self, "end");
return ret;
}
示例11: gst_aiff_parse_stream_data
static GstFlowReturn
gst_aiff_parse_stream_data (GstAiffParse * aiff)
{
GstBuffer *buf = NULL;
GstFlowReturn res = GST_FLOW_OK;
guint64 desired, obtained;
GstClockTime timestamp, next_timestamp, duration;
guint64 pos, nextpos;
iterate_adapter:
GST_LOG_OBJECT (aiff,
"offset: %" G_GINT64_FORMAT " , end: %" G_GINT64_FORMAT " , dataleft: %"
G_GINT64_FORMAT, aiff->offset, aiff->end_offset, aiff->dataleft);
/* Get the next n bytes and output them */
if (aiff->dataleft == 0 || aiff->dataleft < aiff->bytes_per_sample)
goto found_eos;
/* scale the amount of data by the segment rate so we get equal
* amounts of data regardless of the playback rate */
desired =
MIN (gst_guint64_to_gdouble (aiff->dataleft),
MAX_BUFFER_SIZE * aiff->segment.abs_rate);
if (desired >= aiff->bytes_per_sample && aiff->bytes_per_sample > 0)
desired -= (desired % aiff->bytes_per_sample);
GST_LOG_OBJECT (aiff, "Fetching %" G_GINT64_FORMAT " bytes of data "
"from the sinkpad", desired);
if (aiff->streaming) {
guint avail = gst_adapter_available (aiff->adapter);
if (avail < desired) {
GST_LOG_OBJECT (aiff, "Got only %d bytes of data from the sinkpad",
avail);
return GST_FLOW_OK;
}
buf = gst_adapter_take_buffer (aiff->adapter, desired);
} else {
if ((res = gst_pad_pull_range (aiff->sinkpad, aiff->offset,
desired, &buf)) != GST_FLOW_OK)
goto pull_error;
}
/* If we have a pending close/start segment, send it now. */
if (G_UNLIKELY (aiff->close_segment != NULL)) {
gst_pad_push_event (aiff->srcpad, aiff->close_segment);
aiff->close_segment = NULL;
}
if (G_UNLIKELY (aiff->start_segment != NULL)) {
gst_pad_push_event (aiff->srcpad, aiff->start_segment);
aiff->start_segment = NULL;
}
if (G_UNLIKELY (aiff->tags != NULL)) {
gst_element_found_tags_for_pad (GST_ELEMENT_CAST (aiff), aiff->srcpad,
aiff->tags);
aiff->tags = NULL;
}
obtained = GST_BUFFER_SIZE (buf);
/* our positions in bytes */
pos = aiff->offset - aiff->datastart;
nextpos = pos + obtained;
/* update offsets, does not overflow. */
GST_BUFFER_OFFSET (buf) = pos / aiff->bytes_per_sample;
GST_BUFFER_OFFSET_END (buf) = nextpos / aiff->bytes_per_sample;
if (aiff->bps > 0) {
/* and timestamps if we have a bitrate, be careful for overflows */
timestamp =
gst_util_uint64_scale_ceil (pos, GST_SECOND, (guint64) aiff->bps);
next_timestamp =
gst_util_uint64_scale_ceil (nextpos, GST_SECOND, (guint64) aiff->bps);
duration = next_timestamp - timestamp;
/* update current running segment position */
gst_segment_set_last_stop (&aiff->segment, GST_FORMAT_TIME, next_timestamp);
} else {
/* no bitrate, all we know is that the first sample has timestamp 0, all
* other positions and durations have unknown timestamp. */
if (pos == 0)
timestamp = 0;
else
timestamp = GST_CLOCK_TIME_NONE;
duration = GST_CLOCK_TIME_NONE;
/* update current running segment position with byte offset */
gst_segment_set_last_stop (&aiff->segment, GST_FORMAT_BYTES, nextpos);
}
if (aiff->discont) {
GST_DEBUG_OBJECT (aiff, "marking DISCONT");
GST_BUFFER_FLAG_SET (buf, GST_BUFFER_FLAG_DISCONT);
aiff->discont = FALSE;
}
GST_BUFFER_TIMESTAMP (buf) = timestamp;
GST_BUFFER_DURATION (buf) = duration;
//.........这里部分代码省略.........
示例12: gst_tag_list_from_xmp_buffer
/**
* gst_tag_list_from_xmp_buffer:
* @buffer: buffer
*
* Parse a xmp packet into a taglist.
*
* Returns: new taglist or %NULL, free the list when done
*
* Since: 0.10.29
*/
GstTagList *
gst_tag_list_from_xmp_buffer (const GstBuffer * buffer)
{
GstTagList *list = NULL;
const gchar *xps, *xp1, *xp2, *xpe, *ns, *ne;
guint len, max_ft_len;
gboolean in_tag;
gchar *part, *pp;
guint i;
const gchar *last_tag = NULL;
XmpTag *last_xmp_tag = NULL;
GSList *pending_tags = NULL;
xmp_tags_initialize ();
g_return_val_if_fail (GST_IS_BUFFER (buffer), NULL);
g_return_val_if_fail (GST_BUFFER_SIZE (buffer) > 0, NULL);
xps = (const gchar *) GST_BUFFER_DATA (buffer);
len = GST_BUFFER_SIZE (buffer);
xpe = &xps[len + 1];
/* check header and footer */
xp1 = g_strstr_len (xps, len, "<?xpacket begin");
if (!xp1)
goto missing_header;
xp1 = &xp1[strlen ("<?xpacket begin")];
while (*xp1 != '>' && *xp1 != '<' && xp1 < xpe)
xp1++;
if (*xp1 != '>')
goto missing_header;
max_ft_len = 1 + strlen ("<?xpacket end=\".\"?>\n");
if (len < max_ft_len)
goto missing_footer;
GST_DEBUG ("checking footer: [%s]", &xps[len - max_ft_len]);
xp2 = g_strstr_len (&xps[len - max_ft_len], max_ft_len, "<?xpacket ");
if (!xp2)
goto missing_footer;
GST_INFO ("xmp header okay");
/* skip > and text until first xml-node */
xp1++;
while (*xp1 != '<' && xp1 < xpe)
xp1++;
/* no tag can be longer that the whole buffer */
part = g_malloc (xp2 - xp1);
list = gst_tag_list_new ();
/* parse data into a list of nodes */
/* data is between xp1..xp2 */
in_tag = TRUE;
ns = ne = xp1;
pp = part;
while (ne < xp2) {
if (in_tag) {
ne++;
while (ne < xp2 && *ne != '>' && *ne != '<') {
if (*ne == '\n' || *ne == '\t' || *ne == ' ') {
while (ne < xp2 && (*ne == '\n' || *ne == '\t' || *ne == ' '))
ne++;
*pp++ = ' ';
} else {
*pp++ = *ne++;
}
}
*pp = '\0';
if (*ne != '>')
goto broken_xml;
/* create node */
/* {XML, ns, ne-ns} */
if (ns[0] != '/') {
gchar *as = strchr (part, ' ');
/* only log start nodes */
GST_INFO ("xml: %s", part);
if (as) {
gchar *ae, *d;
/* skip ' ' and scan the attributes */
as++;
d = ae = as;
/* split attr=value pairs */
while (*ae != '\0') {
if (*ae == '=') {
/* attr/value delimmiter */
//.........这里部分代码省略.........
示例13: spc_setup
static gboolean
spc_setup (GstSpcDec * spc)
{
spc_tag_info *info;
GstTagList *taglist;
guint64 total_duration;
if (!spc->buf || !spc_negotiate (spc)) {
return FALSE;
}
info = &(spc->tag_info);
spc_tag_get_info (GST_BUFFER_DATA (spc->buf), GST_BUFFER_SIZE (spc->buf),
info);
taglist = gst_tag_list_new ();
if (info->title)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_TITLE,
info->title, NULL);
if (info->artist)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_ARTIST,
info->artist, NULL);
/* Prefer the name of the official soundtrack over the name of the game (since this is
* how track numbers are derived)
*/
if (info->album)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_ALBUM,
info->album, NULL);
else if (info->game)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_ALBUM, info->game,
NULL);
if (info->year) {
GDate *date = g_date_new_dmy (1, 1, info->year);
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_DATE, date, NULL);
g_date_free (date);
}
if (info->track) {
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_TRACK_NUMBER,
info->track, NULL);
}
if (info->comment)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_COMMENT,
info->comment, NULL);
if (info->disc)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE,
GST_TAG_ALBUM_VOLUME_NUMBER, info->disc, NULL);
if (info->publisher)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_ORGANIZATION,
info->publisher, NULL);
if (info->dumper)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_CONTACT,
info->dumper, NULL);
if (info->emulator)
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE, GST_TAG_ENCODER,
info->emulator == EMU_ZSNES ? "ZSNES" : "Snes9x", NULL);
total_duration = (guint64) (gst_spc_duration (spc) + gst_spc_fadeout (spc));
gst_tag_list_add (taglist, GST_TAG_MERGE_REPLACE,
GST_TAG_DURATION, total_duration,
GST_TAG_GENRE, "Game", GST_TAG_CODEC, "SPC700", NULL);
gst_element_found_tags_for_pad (GST_ELEMENT (spc), spc->srcpad, taglist);
/* spc_tag_info_free(&info); */
if (OSPC_Init (GST_BUFFER_DATA (spc->buf), GST_BUFFER_SIZE (spc->buf)) != 0) {
return FALSE;
}
gst_pad_push_event (spc->srcpad, gst_event_new_new_segment (FALSE, 1.0,
GST_FORMAT_TIME, 0, -1, 0));
gst_pad_start_task (spc->srcpad, (GstTaskFunction) spc_play, spc->srcpad);
/* We can't unreference this buffer because we might need to re-initialize
* the emulator with the original data during a reverse seek
* gst_buffer_unref (spc->buf);
* spc->buf = NULL;
*/
spc->initialized = TRUE;
spc->seeking = FALSE;
spc->seekpoint = 0;
spc->byte_pos = 0;
return spc->initialized;
}
示例14: spc_play
static void
spc_play (GstPad * pad)
{
GstSpcDec *spc = GST_SPC_DEC (gst_pad_get_parent (pad));
GstFlowReturn flow_return;
GstBuffer *out;
gboolean seeking = spc->seeking;
gint64 duration, fade, end, position;
if (!seeking) {
out = gst_buffer_new_and_alloc (1600 * 4);
gst_buffer_set_caps (out, GST_PAD_CAPS (pad));
GST_BUFFER_TIMESTAMP (out) =
(gint64) gst_util_uint64_scale ((guint64) spc->byte_pos, GST_SECOND,
32000 * 2 * 2);
spc->byte_pos += OSPC_Run (-1, (short *) GST_BUFFER_DATA (out), 1600 * 4);
} else {
if (spc->seekpoint < spc->byte_pos) {
OSPC_Init (GST_BUFFER_DATA (spc->buf), GST_BUFFER_SIZE (spc->buf));
spc->byte_pos = 0;
}
spc->byte_pos += OSPC_Run (-1, NULL, 1600 * 4);
if (spc->byte_pos >= spc->seekpoint) {
spc->seeking = FALSE;
}
out = gst_buffer_new ();
gst_buffer_set_caps (out, GST_PAD_CAPS (pad));
}
duration = gst_spc_duration (spc);
fade = gst_spc_fadeout (spc);
end = duration + fade;
position =
(gint64) gst_util_uint64_scale ((guint64) spc->byte_pos, GST_SECOND,
32000 * 2 * 2);
if (position >= duration) {
gint16 *data = (gint16 *) GST_BUFFER_DATA (out);
guint32 size = GST_BUFFER_SIZE (out) / sizeof (gint16);
unsigned int i;
gint64 num = (fade - (position - duration));
for (i = 0; i < size; i++) {
/* Apply a parabolic volume envelope */
data[i] = (gint16) (data[i] * num / fade * num / fade);
}
}
if ((flow_return = gst_pad_push (spc->srcpad, out)) != GST_FLOW_OK) {
GST_DEBUG_OBJECT (spc, "pausing task, reason %s",
gst_flow_get_name (flow_return));
gst_pad_pause_task (pad);
if (flow_return <= GST_FLOW_UNEXPECTED
|| flow_return == GST_FLOW_NOT_LINKED) {
gst_pad_push_event (pad, gst_event_new_eos ());
}
}
if (position >= end) {
gst_pad_pause_task (pad);
gst_pad_push_event (pad, gst_event_new_eos ());
}
gst_object_unref (spc);
return;
}
示例15: gst_vdp_h264_dec_set_sink_caps
static gboolean
gst_vdp_h264_dec_set_sink_caps (GstBaseVideoDecoder * base_video_decoder,
GstCaps * caps)
{
GstVdpH264Dec *h264_dec;
GstStructure *structure;
const GValue *value;
h264_dec = GST_VDP_H264_DEC (base_video_decoder);
structure = gst_caps_get_structure (caps, 0);
/* packetized video has a codec_data */
if ((value = gst_structure_get_value (structure, "codec_data"))) {
GstBuffer *buf;
GstBitReader reader;
guint8 version;
guint8 n_sps, n_pps;
gint i;
GST_DEBUG_OBJECT (h264_dec, "have packetized h264");
h264_dec->packetized = TRUE;
buf = gst_value_get_buffer (value);
GST_MEMDUMP ("avcC:", GST_BUFFER_DATA (buf), GST_BUFFER_SIZE (buf));
/* parse the avcC data */
if (GST_BUFFER_SIZE (buf) < 7) {
GST_ERROR_OBJECT (h264_dec, "avcC size %u < 7", GST_BUFFER_SIZE (buf));
return FALSE;
}
gst_bit_reader_init_from_buffer (&reader, buf);
READ_UINT8 (&reader, version, 8);
if (version != 1)
return FALSE;
SKIP (&reader, 30);
READ_UINT8 (&reader, h264_dec->nal_length_size, 2);
h264_dec->nal_length_size += 1;
GST_DEBUG_OBJECT (h264_dec, "nal length %u", h264_dec->nal_length_size);
SKIP (&reader, 3);
READ_UINT8 (&reader, n_sps, 5);
for (i = 0; i < n_sps; i++) {
guint16 sps_length;
guint8 *data;
READ_UINT16 (&reader, sps_length, 16);
sps_length -= 1;
SKIP (&reader, 8);
data = GST_BUFFER_DATA (buf) + gst_bit_reader_get_pos (&reader) / 8;
if (!gst_h264_parser_parse_sequence (h264_dec->parser, data, sps_length))
return FALSE;
SKIP (&reader, sps_length * 8);
}
READ_UINT8 (&reader, n_pps, 8);
for (i = 0; i < n_pps; i++) {
guint16 pps_length;
guint8 *data;
READ_UINT16 (&reader, pps_length, 16);
pps_length -= 1;
SKIP (&reader, 8);
data = GST_BUFFER_DATA (buf) + gst_bit_reader_get_pos (&reader) / 8;
if (!gst_h264_parser_parse_picture (h264_dec->parser, data, pps_length))
return FALSE;
SKIP (&reader, pps_length * 8);
}
}
return TRUE;
}