本文整理汇总了C++中bytestream2_get_byteu函数的典型用法代码示例。如果您正苦于以下问题:C++ bytestream2_get_byteu函数的具体用法?C++ bytestream2_get_byteu怎么用?C++ bytestream2_get_byteu使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了bytestream2_get_byteu函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: get_cod
/** get coding parameters for a particular tile or whole image*/
static int get_cod(J2kDecoderContext *s, J2kCodingStyle *c, uint8_t *properties)
{
J2kCodingStyle tmp;
int compno;
if (bytestream2_get_bytes_left(&s->g) < 5)
return AVERROR(EINVAL);
tmp.log2_prec_width =
tmp.log2_prec_height = 15;
tmp.csty = bytestream2_get_byteu(&s->g);
if (bytestream2_get_byteu(&s->g)){ // progression level
av_log(s->avctx, AV_LOG_ERROR, "only LRCP progression supported\n");
return -1;
}
tmp.nlayers = bytestream2_get_be16u(&s->g);
tmp.mct = bytestream2_get_byteu(&s->g); // multiple component transformation
get_cox(s, &tmp);
for (compno = 0; compno < s->ncomponents; compno++){
if (!(properties[compno] & HAD_COC))
memcpy(c + compno, &tmp, sizeof(J2kCodingStyle));
}
return 0;
}
示例2: gif_read_header1
static int gif_read_header1(GifState *s)
{
uint8_t sig[6];
int v, n;
int background_color_index;
if (bytestream2_get_bytes_left(&s->gb) < 13)
return AVERROR_INVALIDDATA;
/* read gif signature */
bytestream2_get_bufferu(&s->gb, sig, 6);
if (memcmp(sig, gif87a_sig, 6) != 0 &&
memcmp(sig, gif89a_sig, 6) != 0)
return AVERROR_INVALIDDATA;
/* read screen header */
s->transparent_color_index = -1;
s->screen_width = bytestream2_get_le16u(&s->gb);
s->screen_height = bytestream2_get_le16u(&s->gb);
if( (unsigned)s->screen_width > 32767
|| (unsigned)s->screen_height > 32767){
av_log(s->avctx, AV_LOG_ERROR, "picture size too large\n");
return AVERROR_INVALIDDATA;
}
av_fast_malloc(&s->idx_line, &s->idx_line_size, s->screen_width);
if (!s->idx_line)
return AVERROR(ENOMEM);
v = bytestream2_get_byteu(&s->gb);
s->color_resolution = ((v & 0x70) >> 4) + 1;
s->has_global_palette = (v & 0x80);
s->bits_per_pixel = (v & 0x07) + 1;
background_color_index = bytestream2_get_byteu(&s->gb);
n = bytestream2_get_byteu(&s->gb);
if (n) {
s->avctx->sample_aspect_ratio.num = n + 15;
s->avctx->sample_aspect_ratio.den = 64;
}
av_dlog(s->avctx, "screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
s->screen_width, s->screen_height, s->bits_per_pixel,
s->has_global_palette);
if (s->has_global_palette) {
s->background_color_index = background_color_index;
n = 1 << s->bits_per_pixel;
if (bytestream2_get_bytes_left(&s->gb) < n * 3)
return AVERROR_INVALIDDATA;
gif_read_palette(s, s->global_palette, n);
s->bg_color = s->global_palette[s->background_color_index];
} else
s->background_color_index = -1;
return 0;
}
示例3: gif_read_header1
static int gif_read_header1(GifState *s)
{
uint8_t sig[6];
int v, n;
int background_color_index;
if (bytestream2_get_bytes_left(&s->gb) < 13)
return AVERROR_INVALIDDATA;
/* read gif signature */
bytestream2_get_bufferu(&s->gb, sig, 6);
if (memcmp(sig, gif87a_sig, 6) &&
memcmp(sig, gif89a_sig, 6))
return AVERROR_INVALIDDATA;
/* read screen header */
s->transparent_color_index = -1;
s->screen_width = bytestream2_get_le16u(&s->gb);
s->screen_height = bytestream2_get_le16u(&s->gb);
v = bytestream2_get_byteu(&s->gb);
s->color_resolution = ((v & 0x70) >> 4) + 1;
s->has_global_palette = (v & 0x80);
s->bits_per_pixel = (v & 0x07) + 1;
background_color_index = bytestream2_get_byteu(&s->gb);
n = bytestream2_get_byteu(&s->gb);
if (n) {
s->avctx->sample_aspect_ratio.num = n + 15;
s->avctx->sample_aspect_ratio.den = 64;
}
ff_dlog(s->avctx, "screen_w=%d screen_h=%d bpp=%d global_palette=%d\n",
s->screen_width, s->screen_height, s->bits_per_pixel,
s->has_global_palette);
if (s->has_global_palette) {
s->background_color_index = background_color_index;
n = 1 << s->bits_per_pixel;
if (bytestream2_get_bytes_left(&s->gb) < n * 3)
return AVERROR_INVALIDDATA;
gif_read_palette(s, s->global_palette, n);
s->bg_color = s->global_palette[s->background_color_index];
} else
s->background_color_index = -1;
return 0;
}
示例4: read_uncompressed_sgi
/**
* Read an uncompressed SGI image.
* @param out_buf output buffer
* @param s the current image state
* @return 0 if read success, otherwise return -1.
*/
static int read_uncompressed_sgi(unsigned char *out_buf, SgiState *s)
{
int x, y, z;
unsigned int offset = s->height * s->width * s->bytes_per_channel;
GetByteContext gp[4];
uint8_t *out_end;
/* Test buffer size. */
if (offset * s->depth > bytestream2_get_bytes_left(&s->g))
return AVERROR_INVALIDDATA;
/* Create a reader for each plane */
for (z = 0; z < s->depth; z++) {
gp[z] = s->g;
bytestream2_skip(&gp[z], z * offset);
}
for (y = s->height - 1; y >= 0; y--) {
out_end = out_buf + (y * s->linesize);
if (s->bytes_per_channel == 1) {
for (x = s->width; x > 0; x--)
for (z = 0; z < s->depth; z++)
*out_end++ = bytestream2_get_byteu(&gp[z]);
} else {
uint16_t *out16 = (uint16_t *)out_end;
for (x = s->width; x > 0; x--)
for (z = 0; z < s->depth; z++)
*out16++ = bytestream2_get_ne16u(&gp[z]);
}
}
return 0;
}
示例5: expand_rle_row
/**
* Expand an RLE row into a channel.
* @param s the current image state
* @param out_buf Points to one line after the output buffer.
* @param out_end end of line in output buffer
* @param pixelstride pixel stride of input buffer
* @return size of output in bytes, -1 if buffer overflows
*/
static int expand_rle_row(SgiState *s, uint8_t *out_buf,
uint8_t *out_end, int pixelstride)
{
unsigned char pixel, count;
unsigned char *orig = out_buf;
while (1) {
if (bytestream2_get_bytes_left(&s->g) < 1)
return AVERROR_INVALIDDATA;
pixel = bytestream2_get_byteu(&s->g);
if (!(count = (pixel & 0x7f))) {
return (out_buf - orig) / pixelstride;
}
/* Check for buffer overflow. */
if(out_buf + pixelstride * count >= out_end) return -1;
if (pixel & 0x80) {
while (count--) {
*out_buf = bytestream2_get_byte(&s->g);
out_buf += pixelstride;
}
} else {
pixel = bytestream2_get_byte(&s->g);
while (count--) {
*out_buf = pixel;
out_buf += pixelstride;
}
}
}
}
示例6: get_cox
/** get common part for COD and COC segments */
static int get_cox(J2kDecoderContext *s, J2kCodingStyle *c)
{
if (bytestream2_get_bytes_left(&s->g) < 5)
return AVERROR(EINVAL);
c->nreslevels = bytestream2_get_byteu(&s->g) + 1; // num of resolution levels - 1
c->log2_cblk_width = bytestream2_get_byteu(&s->g) + 2; // cblk width
c->log2_cblk_height = bytestream2_get_byteu(&s->g) + 2; // cblk height
c->cblk_style = bytestream2_get_byteu(&s->g);
if (c->cblk_style != 0){ // cblk style
av_log(s->avctx, AV_LOG_WARNING, "extra cblk styles %X\n", c->cblk_style);
}
c->transform = bytestream2_get_byteu(&s->g); // transformation
if (c->csty & J2K_CSTY_PREC) {
int i;
for (i = 0; i < c->nreslevels; i++)
bytestream2_get_byte(&s->g);
}
return 0;
}
示例7: gif_read_extension
static int gif_read_extension(GifState *s)
{
int ext_code, ext_len, gce_flags, gce_transparent_index;
/* There must be at least 2 bytes:
* 1 for extension label and 1 for extension length. */
if (bytestream2_get_bytes_left(&s->gb) < 2)
return AVERROR_INVALIDDATA;
ext_code = bytestream2_get_byteu(&s->gb);
ext_len = bytestream2_get_byteu(&s->gb);
av_dlog(s->avctx, "ext_code=0x%x len=%d\n", ext_code, ext_len);
switch(ext_code) {
case GIF_GCE_EXT_LABEL:
if (ext_len != 4)
goto discard_ext;
/* We need at least 5 bytes more: 4 is for extension body
* and 1 for next block size. */
if (bytestream2_get_bytes_left(&s->gb) < 5)
return AVERROR_INVALIDDATA;
gce_flags = bytestream2_get_byteu(&s->gb);
bytestream2_skipu(&s->gb, 2); // delay during which the frame is shown
gce_transparent_index = bytestream2_get_byteu(&s->gb);
if (gce_flags & 0x01)
s->transparent_color_index = gce_transparent_index;
else
s->transparent_color_index = -1;
s->gce_disposal = (gce_flags >> 2) & 0x7;
av_dlog(s->avctx, "gce_flags=%x tcolor=%d disposal=%d\n",
gce_flags,
s->transparent_color_index, s->gce_disposal);
if (s->gce_disposal > 3) {
s->gce_disposal = GCE_DISPOSAL_NONE;
av_dlog(s->avctx, "invalid value in gce_disposal (%d). Using default value of 0.\n", ext_len);
}
ext_len = bytestream2_get_byteu(&s->gb);
break;
}
/* NOTE: many extension blocks can come after */
discard_ext:
while (ext_len) {
/* There must be at least ext_len bytes and 1 for next block size byte. */
if (bytestream2_get_bytes_left(&s->gb) < ext_len + 1)
return AVERROR_INVALIDDATA;
bytestream2_skipu(&s->gb, ext_len);
ext_len = bytestream2_get_byteu(&s->gb);
av_dlog(s->avctx, "ext_len1=%d\n", ext_len);
}
return 0;
}
示例8: tgq_decode_frame
static int tgq_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt){
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data;
int x,y;
int big_endian;
if (buf_size < 16) {
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
return -1;
}
big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
if (big_endian) {
s->width = bytestream2_get_be16u(&s->gb);
s->height = bytestream2_get_be16u(&s->gb);
} else {
s->width = bytestream2_get_le16u(&s->gb);
s->height = bytestream2_get_le16u(&s->gb);
}
if (s->avctx->width!=s->width || s->avctx->height!=s->height) {
avcodec_set_dimensions(s->avctx, s->width, s->height);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
}
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3);
if (!s->frame.data[0]) {
s->frame.key_frame = 1;
s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.buffer_hints = FF_BUFFER_HINTS_VALID;
if (ff_get_buffer(avctx, &s->frame)) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
}
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
if (tgq_decode_mb(s, y, x) < 0)
return AVERROR_INVALIDDATA;
*got_frame = 1;
*(AVFrame*)data = s->frame;
return avpkt->size;
}
示例9: get_coc
/** get coding parameters for a component in the whole image on a particular tile */
static int get_coc(J2kDecoderContext *s, J2kCodingStyle *c, uint8_t *properties)
{
int compno;
if (bytestream2_get_bytes_left(&s->g) < 2)
return AVERROR(EINVAL);
compno = bytestream2_get_byteu(&s->g);
c += compno;
c->csty = bytestream2_get_byte(&s->g);
get_cox(s, c);
properties[compno] |= HAD_COC;
return 0;
}
示例10: tgq_decode_frame
static int tgq_decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TgqContext *s = avctx->priv_data;
AVFrame *frame = data;
int x, y, ret;
int big_endian;
if (buf_size < 16) {
av_log(avctx, AV_LOG_WARNING, "truncated header\n");
return AVERROR_INVALIDDATA;
}
big_endian = AV_RL32(&buf[4]) > 0x000FFFFF;
bytestream2_init(&s->gb, buf + 8, buf_size - 8);
if (big_endian) {
s->width = bytestream2_get_be16u(&s->gb);
s->height = bytestream2_get_be16u(&s->gb);
} else {
s->width = bytestream2_get_le16u(&s->gb);
s->height = bytestream2_get_le16u(&s->gb);
}
ret = ff_set_dimensions(s->avctx, s->width, s->height);
if (ret < 0)
return ret;
tgq_calculate_qtable(s, bytestream2_get_byteu(&s->gb));
bytestream2_skip(&s->gb, 3);
if ((ret = ff_get_buffer(avctx, frame, 0)) < 0)
return ret;
frame->key_frame = 1;
frame->pict_type = AV_PICTURE_TYPE_I;
for (y = 0; y < FFALIGN(avctx->height, 16) >> 4; y++)
for (x = 0; x < FFALIGN(avctx->width, 16) >> 4; x++)
if (tgq_decode_mb(s, frame, y, x) < 0)
return AVERROR_INVALIDDATA;
*got_frame = 1;
return avpkt->size;
}
示例11: get_qcx
/** get common part for QCD and QCC segments */
static int get_qcx(J2kDecoderContext *s, int n, J2kQuantStyle *q)
{
int i, x;
if (bytestream2_get_bytes_left(&s->g) < 1)
return AVERROR(EINVAL);
x = bytestream2_get_byteu(&s->g); // Sqcd
q->nguardbits = x >> 5;
q->quantsty = x & 0x1f;
if (q->quantsty == J2K_QSTY_NONE){
n -= 3;
if (bytestream2_get_bytes_left(&s->g) < n || 32*3 < n)
return AVERROR(EINVAL);
for (i = 0; i < n; i++)
q->expn[i] = bytestream2_get_byteu(&s->g) >> 3;
} else if (q->quantsty == J2K_QSTY_SI){
示例12: expand_rle_row8
/**
* Expand an RLE row into a channel.
* @param s the current image state
* @param out_buf Points to one line after the output buffer.
* @param len length of out_buf in bytes
* @param pixelstride pixel stride of input buffer
* @return size of output in bytes, -1 if buffer overflows
*/
static int expand_rle_row8(SgiState *s, uint8_t *out_buf,
int len, int pixelstride)
{
unsigned char pixel, count;
unsigned char *orig = out_buf;
uint8_t *out_end = out_buf + len;
while (out_buf < out_end) {
if (bytestream2_get_bytes_left(&s->g) < 1)
return AVERROR_INVALIDDATA;
pixel = bytestream2_get_byteu(&s->g);
if (!(count = (pixel & 0x7f))) {
break;
}
/* Check for buffer overflow. */
if (pixelstride * (count - 1) >= len) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid pixel count.\n");
return AVERROR_INVALIDDATA;
}
if (pixel & 0x80) {
while (count--) {
*out_buf = bytestream2_get_byte(&s->g);
out_buf += pixelstride;
}
} else {
pixel = bytestream2_get_byte(&s->g);
while (count--) {
*out_buf = pixel;
out_buf += pixelstride;
}
}
}
return (out_buf - orig) / pixelstride;
}
示例13: gif_read_image
static int gif_read_image(GifState *s, AVFrame *frame)
{
int left, top, width, height, bits_per_pixel, code_size, flags, pw;
int is_interleaved, has_local_palette, y, pass, y1, linesize, pal_size;
uint32_t *ptr, *pal, *px, *pr, *ptr1;
int ret;
uint8_t *idx;
/* At least 9 bytes of Image Descriptor. */
if (bytestream2_get_bytes_left(&s->gb) < 9)
return AVERROR_INVALIDDATA;
left = bytestream2_get_le16u(&s->gb);
top = bytestream2_get_le16u(&s->gb);
width = bytestream2_get_le16u(&s->gb);
height = bytestream2_get_le16u(&s->gb);
flags = bytestream2_get_byteu(&s->gb);
is_interleaved = flags & 0x40;
has_local_palette = flags & 0x80;
bits_per_pixel = (flags & 0x07) + 1;
av_dlog(s->avctx, "image x=%d y=%d w=%d h=%d\n", left, top, width, height);
if (has_local_palette) {
pal_size = 1 << bits_per_pixel;
if (bytestream2_get_bytes_left(&s->gb) < pal_size * 3)
return AVERROR_INVALIDDATA;
gif_read_palette(s, s->local_palette, pal_size);
pal = s->local_palette;
} else {
if (!s->has_global_palette) {
av_log(s->avctx, AV_LOG_ERROR, "picture doesn't have either global or local palette.\n");
return AVERROR_INVALIDDATA;
}
pal = s->global_palette;
}
if (s->keyframe) {
if (s->transparent_color_index == -1 && s->has_global_palette) {
/* transparency wasn't set before the first frame, fill with background color */
gif_fill(frame, s->bg_color);
} else {
/* otherwise fill with transparent color.
* this is necessary since by default picture filled with 0x80808080. */
gif_fill(frame, s->trans_color);
}
}
/* verify that all the image is inside the screen dimensions */
if (!width || width > s->screen_width || left >= s->screen_width) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid image width.\n");
return AVERROR_INVALIDDATA;
}
if (!height || height > s->screen_height || top >= s->screen_height) {
av_log(s->avctx, AV_LOG_ERROR, "Invalid image height.\n");
return AVERROR_INVALIDDATA;
}
if (left + width > s->screen_width) {
/* width must be kept around to avoid lzw vs line desync */
pw = s->screen_width - left;
av_log(s->avctx, AV_LOG_WARNING, "Image too wide by %d, truncating.\n",
left + width - s->screen_width);
} else {
pw = width;
}
if (top + height > s->screen_height) {
/* we don't care about the extra invisible lines */
av_log(s->avctx, AV_LOG_WARNING, "Image too high by %d, truncating.\n",
top + height - s->screen_height);
height = s->screen_height - top;
}
/* process disposal method */
if (s->gce_prev_disposal == GCE_DISPOSAL_BACKGROUND) {
gif_fill_rect(frame, s->stored_bg_color, s->gce_l, s->gce_t, s->gce_w, s->gce_h);
} else if (s->gce_prev_disposal == GCE_DISPOSAL_RESTORE) {
gif_copy_img_rect(s->stored_img, (uint32_t *)frame->data[0],
frame->linesize[0] / sizeof(uint32_t), s->gce_l, s->gce_t, s->gce_w, s->gce_h);
}
s->gce_prev_disposal = s->gce_disposal;
if (s->gce_disposal != GCE_DISPOSAL_NONE) {
s->gce_l = left; s->gce_t = top;
s->gce_w = pw; s->gce_h = height;
if (s->gce_disposal == GCE_DISPOSAL_BACKGROUND) {
if (s->transparent_color_index >= 0)
s->stored_bg_color = s->trans_color;
else
s->stored_bg_color = s->bg_color;
} else if (s->gce_disposal == GCE_DISPOSAL_RESTORE) {
av_fast_malloc(&s->stored_img, &s->stored_img_size, frame->linesize[0] * frame->height);
if (!s->stored_img)
return AVERROR(ENOMEM);
gif_copy_img_rect((uint32_t *)frame->data[0], s->stored_img,
//.........这里部分代码省略.........
示例14: decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
AVFrame *f = data;
GetByteContext gb;
int width, height, ret, bits_pixel, pixel;
uint8_t *out_buf;
uint8_t count;
int x, y;
bytestream2_init(&gb, avpkt->data, avpkt->size);
if (bytestream2_get_bytes_left(&gb) < ALIAS_HEADER_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Header too small %d.\n", avpkt->size);
return AVERROR_INVALIDDATA;
}
width = bytestream2_get_be16u(&gb);
height = bytestream2_get_be16u(&gb);
bytestream2_skipu(&gb, 4); // obsolete X, Y offset
bits_pixel = bytestream2_get_be16u(&gb);
if (bits_pixel == 24)
avctx->pix_fmt = AV_PIX_FMT_BGR24;
else if (bits_pixel == 8)
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
else {
av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
return AVERROR_INVALIDDATA;
}
ret = ff_set_dimensions(avctx, width, height);
if (ret < 0)
return ret;
ret = ff_get_buffer(avctx, f, 0);
if (ret < 0)
return ret;
f->pict_type = AV_PICTURE_TYPE_I;
f->key_frame = 1;
x = 0;
y = 1;
out_buf = f->data[0];
while (bytestream2_get_bytes_left(&gb) > 0) {
int i;
/* set buffer at the right position at every new line */
if (x == avctx->width) {
x = 0;
out_buf = f->data[0] + f->linesize[0] * y++;
if (y > avctx->height) {
av_log(avctx, AV_LOG_ERROR,
"Ended frame decoding with %d bytes left.\n",
bytestream2_get_bytes_left(&gb));
return AVERROR_INVALIDDATA;
}
}
/* read packet and copy data */
count = bytestream2_get_byteu(&gb);
if (!count || x + count > avctx->width) {
av_log(avctx, AV_LOG_ERROR, "Invalid run length %d.\n", count);
return AVERROR_INVALIDDATA;
}
if (avctx->pix_fmt == AV_PIX_FMT_BGR24) {
pixel = bytestream2_get_be24(&gb);
for (i = 0; i < count; i++) {
AV_WB24(out_buf, pixel);
out_buf += 3;
}
} else { // AV_PIX_FMT_GRAY8
pixel = bytestream2_get_byte(&gb);
for (i = 0; i < count; i++)
*out_buf++ = pixel;
}
x += i;
}
if (x != width || y != height) {
av_log(avctx, AV_LOG_ERROR, "Picture stopped at %d,%d.\n", x, y);
return AVERROR_INVALIDDATA;
}
*got_frame = 1;
return avpkt->size;
}
示例15: xwd_decode_frame
//.........这里部分代码省略.........
av_log(avctx, AV_LOG_ERROR, "input buffer too small\n");
return AVERROR_INVALIDDATA;
}
if (pixformat != XWD_Z_PIXMAP) {
avpriv_report_missing_feature(avctx, "Pixmap format %"PRIu32, pixformat);
return AVERROR_PATCHWELCOME;
}
avctx->pix_fmt = AV_PIX_FMT_NONE;
switch (vclass) {
case XWD_STATIC_GRAY:
case XWD_GRAY_SCALE:
if (bpp != 1 && bpp != 8)
return AVERROR_INVALIDDATA;
if (bpp == 1 && pixdepth == 1) {
avctx->pix_fmt = AV_PIX_FMT_MONOWHITE;
} else if (bpp == 8 && pixdepth == 8) {
avctx->pix_fmt = AV_PIX_FMT_GRAY8;
}
break;
case XWD_STATIC_COLOR:
case XWD_PSEUDO_COLOR:
if (bpp == 8)
avctx->pix_fmt = AV_PIX_FMT_PAL8;
break;
case XWD_TRUE_COLOR:
case XWD_DIRECT_COLOR:
if (bpp != 16 && bpp != 24 && bpp != 32)
return AVERROR_INVALIDDATA;
if (bpp == 16 && pixdepth == 15) {
if (rgb[0] == 0x7C00 && rgb[1] == 0x3E0 && rgb[2] == 0x1F)
avctx->pix_fmt = be ? AV_PIX_FMT_RGB555BE : AV_PIX_FMT_RGB555LE;
else if (rgb[0] == 0x1F && rgb[1] == 0x3E0 && rgb[2] == 0x7C00)
avctx->pix_fmt = be ? AV_PIX_FMT_BGR555BE : AV_PIX_FMT_BGR555LE;
} else if (bpp == 16 && pixdepth == 16) {
if (rgb[0] == 0xF800 && rgb[1] == 0x7E0 && rgb[2] == 0x1F)
avctx->pix_fmt = be ? AV_PIX_FMT_RGB565BE : AV_PIX_FMT_RGB565LE;
else if (rgb[0] == 0x1F && rgb[1] == 0x7E0 && rgb[2] == 0xF800)
avctx->pix_fmt = be ? AV_PIX_FMT_BGR565BE : AV_PIX_FMT_BGR565LE;
} else if (bpp == 24) {
if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF)
avctx->pix_fmt = be ? AV_PIX_FMT_RGB24 : AV_PIX_FMT_BGR24;
else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000)
avctx->pix_fmt = be ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_RGB24;
} else if (bpp == 32) {
if (rgb[0] == 0xFF0000 && rgb[1] == 0xFF00 && rgb[2] == 0xFF)
avctx->pix_fmt = be ? AV_PIX_FMT_ARGB : AV_PIX_FMT_BGRA;
else if (rgb[0] == 0xFF && rgb[1] == 0xFF00 && rgb[2] == 0xFF0000)
avctx->pix_fmt = be ? AV_PIX_FMT_ABGR : AV_PIX_FMT_RGBA;
}
bytestream2_skipu(&gb, ncolors * XWD_CMAP_SIZE);
break;
default:
av_log(avctx, AV_LOG_ERROR, "invalid visual class\n");
return AVERROR_INVALIDDATA;
}
if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
avpriv_request_sample(avctx,
"Unknown file: bpp %"PRIu32", pixdepth %"PRIu32", vclass %"PRIu32"",
bpp, pixdepth, vclass);
return AVERROR_PATCHWELCOME;
}
if ((ret = ff_get_buffer(avctx, p, 0)) < 0)
return ret;
p->key_frame = 1;
p->pict_type = AV_PICTURE_TYPE_I;
if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
uint32_t *dst = (uint32_t *)p->data[1];
uint8_t red, green, blue;
for (i = 0; i < ncolors; i++) {
bytestream2_skipu(&gb, 4); // skip colormap entry number
red = bytestream2_get_byteu(&gb);
bytestream2_skipu(&gb, 1);
green = bytestream2_get_byteu(&gb);
bytestream2_skipu(&gb, 1);
blue = bytestream2_get_byteu(&gb);
bytestream2_skipu(&gb, 3); // skip bitmask flag and padding
dst[i] = 0xFFU << 24 | red << 16 | green << 8 | blue;
}
}
ptr = p->data[0];
for (i = 0; i < avctx->height; i++) {
bytestream2_get_bufferu(&gb, ptr, rsize);
bytestream2_skipu(&gb, lsize - rsize);
ptr += p->linesize[0];
}
*got_frame = 1;
return buf_size;
}