本文整理汇总了C++中bytestream2_init函数的典型用法代码示例。如果您正苦于以下问题:C++ bytestream2_init函数的具体用法?C++ bytestream2_init怎么用?C++ bytestream2_init使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bytestream2_init函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: decompress_chunks_thread
static int decompress_chunks_thread(AVCodecContext *avctx, void *arg,
int chunk_nb, int thread_nb)
{
HapContext *ctx = avctx->priv_data;
HapChunk *chunk = &ctx->chunks[chunk_nb];
GetByteContext gbc;
uint8_t *dst = ctx->tex_buf + chunk->uncompressed_offset;
bytestream2_init(&gbc, ctx->gbc.buffer + chunk->compressed_offset, chunk->compressed_size);
if (chunk->compressor == HAP_COMP_SNAPPY) {
int ret;
int64_t uncompressed_size = ctx->tex_size;
/* Uncompress the frame */
ret = ff_snappy_uncompress(&gbc, dst, &uncompressed_size);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Snappy uncompress error\n");
return ret;
}
} else if (chunk->compressor == HAP_COMP_NONE) {
bytestream2_get_buffer(&gbc, dst, chunk->compressed_size);
}
return 0;
}
示例2: gif_decode_frame
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
GifState *s = avctx->priv_data;
AVFrame *picture = data;
int ret;
bytestream2_init(&s->gb, buf, buf_size);
if ((ret = gif_read_header1(s)) < 0)
return ret;
avctx->pix_fmt = AV_PIX_FMT_PAL8;
if ((ret = ff_set_dimensions(avctx, s->screen_width, s->screen_height)) < 0)
return ret;
if ((ret = ff_get_buffer(avctx, picture, 0)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
s->image_palette = (uint32_t *)picture->data[1];
ret = gif_parse_next_image(s, picture);
if (ret < 0)
return ret;
*got_frame = 1;
return bytestream2_tell(&s->gb);
}
示例3: mm_decode_frame
static int mm_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MmContext *s = avctx->priv_data;
int type, res;
if (buf_size < MM_PREAMBLE_SIZE)
return AVERROR_INVALIDDATA;
type = AV_RL16(&buf[0]);
buf += MM_PREAMBLE_SIZE;
buf_size -= MM_PREAMBLE_SIZE;
bytestream2_init(&s->gb, buf, buf_size);
if (avctx->reget_buffer(avctx, &s->frame) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1;
}
switch(type) {
case MM_TYPE_PALETTE :
res = mm_decode_pal(s);
return buf_size;
case MM_TYPE_INTRA :
res = mm_decode_intra(s, 0, 0);
break;
case MM_TYPE_INTRA_HH :
res = mm_decode_intra(s, 1, 0);
break;
case MM_TYPE_INTRA_HHV :
res = mm_decode_intra(s, 1, 1);
break;
case MM_TYPE_INTER :
res = mm_decode_inter(s, 0, 0);
break;
case MM_TYPE_INTER_HH :
res = mm_decode_inter(s, 1, 0);
break;
case MM_TYPE_INTER_HHV :
res = mm_decode_inter(s, 1, 1);
break;
default:
res = AVERROR_INVALIDDATA;
break;
}
if (res < 0)
return res;
memcpy(s->frame.data[1], s->palette, AVPALETTE_SIZE);
*data_size = sizeof(AVFrame);
*(AVFrame*)data = s->frame;
return buf_size;
}
示例4: gif_decode_frame
static int gif_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
GifState *s = avctx->priv_data;
AVFrame *picture = data;
int ret;
bytestream2_init(&s->gb, avpkt->data, avpkt->size);
s->picture.pts = avpkt->pts;
s->picture.pkt_pts = avpkt->pts;
s->picture.pkt_dts = avpkt->dts;
s->picture.pkt_duration = avpkt->duration;
if (avpkt->size >= 6) {
s->keyframe = memcmp(avpkt->data, gif87a_sig, 6) == 0 ||
memcmp(avpkt->data, gif89a_sig, 6) == 0;
} else {
s->keyframe = 0;
}
if (s->keyframe) {
if ((ret = gif_read_header1(s)) < 0)
return ret;
if ((ret = av_image_check_size(s->screen_width, s->screen_height, 0, avctx)) < 0)
return ret;
avcodec_set_dimensions(avctx, s->screen_width, s->screen_height);
if (s->picture.data[0])
avctx->release_buffer(avctx, &s->picture);
if ((ret = ff_get_buffer(avctx, &s->picture)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
s->picture.pict_type = AV_PICTURE_TYPE_I;
s->picture.key_frame = 1;
} else {
if ((ret = avctx->reget_buffer(avctx, &s->picture)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
s->picture.pict_type = AV_PICTURE_TYPE_P;
s->picture.key_frame = 0;
}
ret = gif_parse_next_image(s, got_frame);
if (ret < 0)
return ret;
else if (*got_frame)
*picture = s->picture;
return avpkt->size;
}
示例5: apng_probe
/*
* To be a valid APNG file, we mandate, in this order:
* PNGSIG
* IHDR
* ...
* acTL
* ...
* IDAT
*/
static int apng_probe(AVProbeData *p)
{
GetByteContext gb;
int state = 0;
uint32_t len, tag;
bytestream2_init(&gb, p->buf, p->buf_size);
if (bytestream2_get_be64(&gb) != PNGSIG)
return 0;
for (;;) {
len = bytestream2_get_be32(&gb);
if (len > 0x7fffffff)
return 0;
tag = bytestream2_get_le32(&gb);
/* we don't check IDAT size, as this is the last tag
* we check, and it may be larger than the probe buffer */
if (tag != MKTAG('I', 'D', 'A', 'T') &&
len > bytestream2_get_bytes_left(&gb))
return 0;
switch (tag) {
case MKTAG('I', 'H', 'D', 'R'):
if (len != 13)
return 0;
if (av_image_check_size(bytestream2_get_be32(&gb), bytestream2_get_be32(&gb), 0, NULL))
return 0;
bytestream2_skip(&gb, 9);
state++;
break;
case MKTAG('a', 'c', 'T', 'L'):
if (state != 1 ||
len != 8 ||
bytestream2_get_be32(&gb) == 0) /* 0 is not a valid value for number of frames */
return 0;
bytestream2_skip(&gb, 8);
state++;
break;
case MKTAG('I', 'D', 'A', 'T'):
if (state != 2)
return 0;
goto end;
default:
/* skip other tags */
bytestream2_skip(&gb, len + 4);
break;
}
}
end:
return AVPROBE_SCORE_MAX;
}
示例6: tgq_decode_frame
static int tgq_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt){
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data;
int x,y;
int big_endian;
if (buf_size < 16) {
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
return -1;
}
big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
if (big_endian) {
s->width = bytestream2_get_be16u(&s->gb);
s->height = bytestream2_get_be16u(&s->gb);
} else {
s->width = bytestream2_get_le16u(&s->gb);
s->height = bytestream2_get_le16u(&s->gb);
}
if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
}
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3);
if (!s->frame.data[0]) {
s->frame.key_frame = 1;
s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if (ff_get_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
}
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
if (tgq_decode_mb(s, y, x) < 0)
return AVERROR_INVALIDDATA;
*got_frame = 1;
*(AVFrame*)data = s->frame;
return avpkt->size;
}
示例7: mm_decode_inter
/**
* @param half_horiz Half horizontal resolution (0 or 1)
* @param half_vert Half vertical resolution (0 or 1)
*/
static int mm_decode_inter(MmContext * s, int half_horiz, int half_vert)
{
int data_off = bytestream2_get_le16(&s->gb);
int y = 0;
GetByteContext data_ptr;
if (bytestream2_get_bytes_left(&s->gb) < data_off)
return AVERROR_INVALIDDATA;
bytestream2_init(&data_ptr, s->gb.buffer + data_off, bytestream2_get_bytes_left(&s->gb) - data_off);
while (s->gb.buffer < data_ptr.buffer_start) {
int i, j;
int length = bytestream2_get_byte(&s->gb);
int x = bytestream2_get_byte(&s->gb) + ((length & 0x80) << 1);
length &= 0x7F;
if (length==0) {
y += x;
continue;
}
if (y + half_vert >= s->avctx->height)
return 0;
for(i=0; i<length; i++) {
int replace_array = bytestream2_get_byte(&s->gb);
for(j=0; j<8; j++) {
int replace = (replace_array >> (7-j)) & 1;
if (x + half_horiz >= s->avctx->width)
return AVERROR_INVALIDDATA;
if (replace) {
int color = bytestream2_get_byte(&data_ptr);
s->frame->data[0][y*s->frame->linesize[0] + x] = color;
if (half_horiz)
s->frame->data[0][y*s->frame->linesize[0] + x + 1] = color;
if (half_vert) {
s->frame->data[0][(y+1)*s->frame->linesize[0] + x] = color;
if (half_horiz)
s->frame->data[0][(y+1)*s->frame->linesize[0] + x + 1] = color;
}
}
x += 1 + half_horiz;
}
}
y += 1 + half_vert;
}
return 0;
}
示例8: tgq_decode_frame
static int tgq_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data;
AVFrame *frame = data;
int x, y, ret;
int big_endian;
if (buf_size < 16) {
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
return AVERROR_INVALIDDATA;
}
big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
if (big_endian) {
s->width = bytestream2_get_be16u(&s->gb);
s->height = bytestream2_get_be16u(&s->gb);
} else {
s->width = bytestream2_get_le16u(&s->gb);
s->height = bytestream2_get_le16u(&s->gb);
}
ret = ff_set_dimensions(s->avctx, s->width, s->height);
if (ret < 0)
return ret;
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3);
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
if (tgq_decode_mb(s, frame, y, x) < 0)
return AVERROR_INVALIDDATA;
*got_frame = 1;
return avpkt->size;
}
示例9: aasc_decode_frame
static int aasc_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AascContext *s = avctx->priv_data;
int compr, i, stride, ret;
if (buf_size < 4)
return AVERROR_INVALIDDATA;
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return ret;
}
compr = AV_RL32(buf);
buf += 4;
buf_size -= 4;
switch (compr) {
case 0:
stride = (avctx->width * 3 + 3) & ~3;
if (buf_size < stride * avctx->height)
return AVERROR_INVALIDDATA;
for (i = avctx->height - 1; i >= 0; i--) {
memcpy(s->frame->data[0] + i * s->frame->linesize[0], buf, avctx->width * 3);
buf += stride;
}
break;
case 1:
bytestream2_init(&s->gb, buf, buf_size);
ff_msrle_decode(avctx, (AVPicture*)s->frame, 8, &s->gb);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
return AVERROR_INVALIDDATA;
}
*got_frame = 1;
if ((ret = av_frame_ref(data, s->frame)) < 0)
return ret;
/* report that the buffer was completely consumed */
return avpkt->size;
}
示例10: decode_extradata_ps_mp4
/* There are (invalid) samples in the wild with mp4-style extradata, where the
* parameter sets are stored unescaped (i.e. as RBSP).
* This function catches the parameter set decoding failure and tries again
* after escaping it */
static int decode_extradata_ps_mp4(const uint8_t *buf, int buf_size, H264ParamSets *ps,
int err_recognition, void *logctx)
{
int ret;
ret = decode_extradata_ps(buf, buf_size, ps, 1, logctx);
if (ret < 0 && !(err_recognition & AV_EF_EXPLODE)) {
GetByteContext gbc;
PutByteContext pbc;
uint8_t *escaped_buf;
int escaped_buf_size;
av_log(logctx, AV_LOG_WARNING,
"SPS decoding failure, trying again after escaping the NAL\n");
if (buf_size / 2 >= (INT16_MAX - AV_INPUT_BUFFER_PADDING_SIZE) / 3)
return AVERROR(ERANGE);
escaped_buf_size = buf_size * 3 / 2 + AV_INPUT_BUFFER_PADDING_SIZE;
escaped_buf = av_mallocz(escaped_buf_size);
if (!escaped_buf)
return AVERROR(ENOMEM);
bytestream2_init(&gbc, buf, buf_size);
bytestream2_init_writer(&pbc, escaped_buf, escaped_buf_size);
while (bytestream2_get_bytes_left(&gbc)) {
if (bytestream2_get_bytes_left(&gbc) >= 3 &&
bytestream2_peek_be24(&gbc) <= 3) {
bytestream2_put_be24(&pbc, 3);
bytestream2_skip(&gbc, 2);
} else
bytestream2_put_byte(&pbc, bytestream2_get_byte(&gbc));
}
escaped_buf_size = bytestream2_tell_p(&pbc);
AV_WB16(escaped_buf, escaped_buf_size - 2);
ret = decode_extradata_ps(escaped_buf, escaped_buf_size, ps, 1, logctx);
av_freep(&escaped_buf);
if (ret < 0)
return ret;
}
return 0;
}
示例11: decode_init
static av_cold int decode_init(AVCodecContext *avctx)
{
AnmContext *s = avctx->priv_data;
int i;
avctx->pix_fmt = PIX_FMT_PAL8;
s->frame.reference = 1;
bytestream2_init(&s->gb, avctx->extradata, avctx->extradata_size);
if (bytestream2_get_bytes_left(&s->gb) < 16 * 8 + 4 * 256)
return -1;
bytestream2_skipu(&s->gb, 16 * 8);
for (i = 0; i < 256; i++)
s->palette[i] = bytestream2_get_le32u(&s->gb);
return 0;
}
示例12: hq_hqa_decode_frame
static int hq_hqa_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
HQContext *ctx = avctx->priv_data;
AVFrame *pic = data;
uint32_t info_tag;
unsigned int data_size;
int ret;
unsigned tag;
bytestream2_init(&ctx->gbc, avpkt->data, avpkt->size);
if (bytestream2_get_bytes_left(&ctx->gbc) < 4 + 4) {
av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", avpkt->size);
return AVERROR_INVALIDDATA;
}
info_tag = bytestream2_peek_le32(&ctx->gbc);
if (info_tag == MKTAG('I', 'N', 'F', 'O')) {
int info_size;
bytestream2_skip(&ctx->gbc, 4);
info_size = bytestream2_get_le32(&ctx->gbc);
if (bytestream2_get_bytes_left(&ctx->gbc) < info_size) {
av_log(avctx, AV_LOG_ERROR, "Invalid INFO size (%d).\n", info_size);
return AVERROR_INVALIDDATA;
}
ff_canopus_parse_info_tag(avctx, ctx->gbc.buffer, info_size);
bytestream2_skip(&ctx->gbc, info_size);
}
data_size = bytestream2_get_bytes_left(&ctx->gbc);
if (data_size < 4) {
av_log(avctx, AV_LOG_ERROR, "Frame is too small (%d).\n", data_size);
return AVERROR_INVALIDDATA;
}
/* HQ defines dimensions and number of slices, and thus slice traversal
* order. HQA has no size constraint and a fixed number of slices, so it
* needs a separate scheme for it. */
tag = bytestream2_get_le32(&ctx->gbc);
if ((tag & 0x00FFFFFF) == (MKTAG('U', 'V', 'C', ' ') & 0x00FFFFFF)) {
ret = hq_decode_frame(ctx, pic, tag >> 24, data_size);
} else if (tag == MKTAG('H', 'Q', 'A', '1')) {
示例13: mm_decode_frame
static int mm_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
MmContext *s = avctx->priv_data;
int type, res;
if (buf_size < MM_PREAMBLE_SIZE)
return AVERROR_INVALIDDATA;
type = AV_RL16(&buf[0]);
buf += MM_PREAMBLE_SIZE;
buf_size -= MM_PREAMBLE_SIZE;
bytestream2_init(&s->gb, buf, buf_size);
if ((res = ff_reget_buffer(avctx, s->frame)) < 0)
return res;
switch(type) {
case MM_TYPE_PALETTE : mm_decode_pal(s); return avpkt->size;
case MM_TYPE_INTRA : res = mm_decode_intra(s, 0, 0); break;
case MM_TYPE_INTRA_HH : res = mm_decode_intra(s, 1, 0); break;
case MM_TYPE_INTRA_HHV : res = mm_decode_intra(s, 1, 1); break;
case MM_TYPE_INTER : res = mm_decode_inter(s, 0, 0); break;
case MM_TYPE_INTER_HH : res = mm_decode_inter(s, 1, 0); break;
case MM_TYPE_INTER_HHV : res = mm_decode_inter(s, 1, 1); break;
default:
res = AVERROR_INVALIDDATA;
break;
}
if (res < 0)
return res;
memcpy(s->frame->data[1], s->palette, AVPALETTE_SIZE);
if ((res = av_frame_ref(data, s->frame)) < 0)
return res;
*got_frame = 1;
return avpkt->size;
}
示例14: aasc_decode_frame
static int aasc_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AascContext *s = avctx->priv_data;
int compr, i, stride;
s->frame.reference = 1;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
if (avctx->reget_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1;
}
compr = AV_RL32(buf);
buf += 4;
buf_size -= 4;
switch(compr){
case 0:
stride = (avctx->width * 3 + 3) & ~3;
for(i = avctx->height - 1; i >= 0; i--){
memcpy(s->frame.data[0] + i*s->frame.linesize[0], buf, avctx->width*3);
buf += stride;
}
break;
case 1:
bytestream2_init(&s->gb, buf - 4, buf_size + 4);
ff_msrle_decode(avctx, (AVPicture*)&s->frame, 8, &s->gb);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unknown compression type %d\n", compr);
return -1;
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = s->frame;
/* report that the buffer was completely consumed */
return buf_size;
}
示例15: tm2_read_stream
static int tm2_read_stream(TM2Context *ctx, const uint8_t *buf, int stream_id, int buf_size)
{
int i, ret;
int skip = 0;
int len, toks, pos;
TM2Codes codes;
GetByteContext gb;
if (buf_size < 4) {
av_log(ctx->avctx, AV_LOG_ERROR, "not enough space for len left\n");
return AVERROR_INVALIDDATA;
}
/* get stream length in dwords */
bytestream2_init(&gb, buf, buf_size);
len = bytestream2_get_be32(&gb);
skip = len * 4 + 4;
if (len == 0)
return 4;
if (len >= INT_MAX / 4 - 1 || len < 0 || skip > buf_size) {
av_log(ctx->avctx, AV_LOG_ERROR, "Error, invalid stream size.\n");
return AVERROR_INVALIDDATA;
}
toks = bytestream2_get_be32(&gb);
if (toks & 1) {
len = bytestream2_get_be32(&gb);
if (len == TM2_ESCAPE) {
len = bytestream2_get_be32(&gb);
}
if (len > 0) {
pos = bytestream2_tell(&gb);
if (skip <= pos)
return AVERROR_INVALIDDATA;
init_get_bits(&ctx->gb, buf + pos, (skip - pos) * 8);
if ((ret = tm2_read_deltas(ctx, stream_id)) < 0)
return ret;
bytestream2_skip(&gb, ((get_bits_count(&ctx->gb) + 31) >> 5) << 2);
}