本文整理汇总了C++中AV_RL16函数的典型用法代码示例。如果您正苦于以下问题:C++ AV_RL16函数的具体用法?C++ AV_RL16怎么用?C++ AV_RL16使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AV_RL16函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: skeleton_header
static int skeleton_header(AVFormatContext *s, int idx)
{
struct ogg *ogg = s->priv_data;
struct ogg_stream *os = ogg->streams + idx;
AVStream *st = s->streams[idx];
uint8_t *buf = os->buf + os->pstart;
int version_major, version_minor;
int64_t start_num, start_den;
uint64_t start_granule;
int target_idx, start_time;
strcpy(st->codec->codec_name, "skeleton");
st->codec->codec_type = AVMEDIA_TYPE_DATA;
if ((os->flags & OGG_FLAG_EOS) && os->psize == 0)
return 1;
if (os->psize < 8)
return -1;
if (!strncmp(buf, "fishead", 8)) {
if (os->psize < 64)
return -1;
version_major = AV_RL16(buf+8);
version_minor = AV_RL16(buf+10);
if (version_major != 3 && version_major != 4) {
av_log(s, AV_LOG_WARNING, "Unknown skeleton version %d.%d\n",
version_major, version_minor);
return -1;
}
// This is the overall start time. We use it for the start time of
// of the skeleton stream since if left unset lavf assumes 0,
// which we don't want since skeleton is timeless
// FIXME: the real meaning of this field is "start playback at
// this time which can be in the middle of a packet
start_num = AV_RL64(buf+12);
start_den = AV_RL64(buf+20);
if (start_den > 0 && start_num > 0) {
int base_den;
av_reduce(&start_time, &base_den, start_num, start_den, INT_MAX);
avpriv_set_pts_info(st, 64, 1, base_den);
os->lastpts =
st->start_time = start_time;
}
} else if (!strncmp(buf, "fisbone", 8)) {
if (os->psize < 52)
return -1;
target_idx = ogg_find_stream(ogg, AV_RL32(buf+12));
start_granule = AV_RL64(buf+36);
if (target_idx < 0) {
av_log(s, AV_LOG_WARNING, "Serial number in fisbone doesn't match any stream\n");
return 1;
}
os = ogg->streams + target_idx;
if (os->start_granule != OGG_NOGRANULE_VALUE) {
av_log(s, AV_LOG_WARNING, "Multiple fisbone for the same stream\n");
return 1;
}
if (start_granule != OGG_NOGRANULE_VALUE) {
os->start_granule = start_granule;
}
}
return 1;
}
示例2: ipmovie_read_header
static int ipmovie_read_header(AVFormatContext *s)
{
IPMVEContext *ipmovie = s->priv_data;
AVIOContext *pb = s->pb;
AVPacket pkt;
AVStream *st;
unsigned char chunk_preamble[CHUNK_PREAMBLE_SIZE];
int chunk_type, i;
uint8_t signature_buffer[sizeof(signature)];
avio_read(pb, signature_buffer, sizeof(signature_buffer));
while (memcmp(signature_buffer, signature, sizeof(signature))) {
memmove(signature_buffer, signature_buffer + 1, sizeof(signature_buffer) - 1);
signature_buffer[sizeof(signature_buffer) - 1] = avio_r8(pb);
if (avio_feof(pb))
return AVERROR_EOF;
}
/* initialize private context members */
ipmovie->video_pts = ipmovie->audio_frame_count = 0;
ipmovie->audio_chunk_offset = ipmovie->video_chunk_offset =
ipmovie->decode_map_chunk_offset = 0;
/* on the first read, this will position the stream at the first chunk */
ipmovie->next_chunk_offset = avio_tell(pb) + 4;
for (i = 0; i < 256; i++)
ipmovie->palette[i] = 0xFFU << 24;
/* process the first chunk which should be CHUNK_INIT_VIDEO */
if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_VIDEO)
return AVERROR_INVALIDDATA;
/* peek ahead to the next chunk-- if it is an init audio chunk, process
* it; if it is the first video chunk, this is a silent file */
if (avio_read(pb, chunk_preamble, CHUNK_PREAMBLE_SIZE) !=
CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
chunk_type = AV_RL16(&chunk_preamble[2]);
avio_seek(pb, -CHUNK_PREAMBLE_SIZE, SEEK_CUR);
if (chunk_type == CHUNK_VIDEO)
ipmovie->audio_type = AV_CODEC_ID_NONE; /* no audio */
else if (process_ipmovie_chunk(ipmovie, pb, &pkt) != CHUNK_INIT_AUDIO)
return AVERROR_INVALIDDATA;
/* initialize the stream decoders */
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 63, 1, 1000000);
ipmovie->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_INTERPLAY_VIDEO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = ipmovie->video_width;
st->codec->height = ipmovie->video_height;
st->codec->bits_per_coded_sample = ipmovie->video_bpp;
if (ipmovie->audio_type) {
st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 32, 1, ipmovie->audio_sample_rate);
ipmovie->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = ipmovie->audio_type;
st->codec->codec_tag = 0; /* no tag */
st->codec->channels = ipmovie->audio_channels;
st->codec->channel_layout = st->codec->channels == 1 ? AV_CH_LAYOUT_MONO :
AV_CH_LAYOUT_STEREO;
st->codec->sample_rate = ipmovie->audio_sample_rate;
st->codec->bits_per_coded_sample = ipmovie->audio_bits;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample;
if (st->codec->codec_id == AV_CODEC_ID_INTERPLAY_DPCM)
st->codec->bit_rate /= 2;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
}
return 0;
}
示例3: get_tcp_server_response
/** Read incoming MMST media, header or command packet. */
static MMSSCPacketType get_tcp_server_response(MMSTContext *mmst)
{
int read_result;
MMSSCPacketType packet_type= -1;
MMSContext *mms = &mmst->mms;
for(;;) {
read_result = url_read_complete(mms->mms_hd, mms->in_buffer, 8);
if (read_result != 8) {
if(read_result < 0) {
av_log(NULL, AV_LOG_ERROR,
"Error reading packet header: %d (%s)\n",
read_result, strerror(read_result));
packet_type = SC_PKT_CANCEL;
} else {
av_log(NULL, AV_LOG_ERROR,
"The server closed the connection\n");
packet_type = SC_PKT_NO_DATA;
}
return packet_type;
}
// handle command packet.
if(AV_RL32(mms->in_buffer + 4)==0xb00bface) {
int length_remaining, hr;
mmst->incoming_flags= mms->in_buffer[3];
read_result= url_read_complete(mms->mms_hd, mms->in_buffer+8, 4);
if(read_result != 4) {
av_log(NULL, AV_LOG_ERROR,
"Reading command packet length failed: %d (%s)\n",
read_result,
read_result < 0 ? strerror(read_result) :
"The server closed the connection");
return read_result < 0 ? read_result : AVERROR_IO;
}
length_remaining= AV_RL32(mms->in_buffer+8) + 4;
dprintf(NULL, "Length remaining is %d\n", length_remaining);
// read the rest of the packet.
if (length_remaining < 0
|| length_remaining > sizeof(mms->in_buffer) - 12) {
av_log(NULL, AV_LOG_ERROR,
"Incoming packet length %d exceeds bufsize %zu\n",
length_remaining, sizeof(mms->in_buffer) - 12);
return AVERROR_INVALIDDATA;
}
read_result = url_read_complete(mms->mms_hd, mms->in_buffer + 12,
length_remaining) ;
if (read_result != length_remaining) {
av_log(NULL, AV_LOG_ERROR,
"Reading pkt data (length=%d) failed: %d (%s)\n",
length_remaining, read_result,
read_result < 0 ? strerror(read_result) :
"The server closed the connection");
return read_result < 0 ? read_result : AVERROR_IO;
}
packet_type= AV_RL16(mms->in_buffer+36);
hr = AV_RL32(mms->in_buffer + 40);
if (hr) {
av_log(NULL, AV_LOG_ERROR,
"Server sent an error status code: 0x%08x\n", hr);
return AVERROR_UNKNOWN;
}
} else {
int length_remaining;
int packet_id_type;
int tmp;
// note we cache the first 8 bytes,
// then fill up the buffer with the others
tmp = AV_RL16(mms->in_buffer + 6);
length_remaining = (tmp - 8) & 0xffff;
mmst->incoming_packet_seq = AV_RL32(mms->in_buffer);
packet_id_type = mms->in_buffer[4];
mmst->incoming_flags = mms->in_buffer[5];
if (length_remaining < 0
|| length_remaining > sizeof(mms->in_buffer) - 8) {
av_log(NULL, AV_LOG_ERROR,
"Data length %d is invalid or too large (max=%zu)\n",
length_remaining, sizeof(mms->in_buffer));
return AVERROR_INVALIDDATA;
}
mms->remaining_in_len = length_remaining;
mms->read_in_ptr = mms->in_buffer;
read_result= url_read_complete(mms->mms_hd, mms->in_buffer, length_remaining);
if(read_result != length_remaining) {
av_log(NULL, AV_LOG_ERROR,
"Failed to read packet data of size %d: %d (%s)\n",
length_remaining, read_result,
read_result < 0 ? strerror(read_result) :
"The server closed the connection");
return read_result < 0 ? read_result : AVERROR_IO;
}
// if we successfully read everything.
if(packet_id_type == mmst->header_packet_id) {
packet_type = SC_PKT_ASF_HEADER;
// Store the asf header
//.........这里部分代码省略.........
示例4: decode_frame
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = buf + avpkt->size;
KgvContext * const c = avctx->priv_data;
int offsets[8];
uint8_t *out, *prev;
int outcnt = 0, maxcnt;
int w, h, i, res;
if (avpkt->size < 2)
return AVERROR_INVALIDDATA;
w = (buf[0] + 1) * 8;
h = (buf[1] + 1) * 8;
buf += 2;
if (w != avctx->width || h != avctx->height) {
av_freep(&c->frame_buffer);
av_freep(&c->last_frame_buffer);
if ((res = ff_set_dimensions(avctx, w, h)) < 0)
return res;
}
if (!c->frame_buffer) {
c->frame_buffer = av_mallocz(avctx->width * avctx->height * 2);
c->last_frame_buffer = av_mallocz(avctx->width * avctx->height * 2);
if (!c->frame_buffer || !c->last_frame_buffer) {
decode_flush(avctx);
return AVERROR(ENOMEM);
}
}
maxcnt = w * h;
if ((res = ff_get_buffer(avctx, frame, 0)) < 0)
return res;
out = (uint8_t*)c->frame_buffer;
prev = (uint8_t*)c->last_frame_buffer;
for (i = 0; i < 8; i++)
offsets[i] = -1;
while (outcnt < maxcnt && buf_end - 2 >= buf) {
int code = AV_RL16(buf);
buf += 2;
if (!(code & 0x8000)) {
AV_WN16A(&out[2 * outcnt], code); // rgb555 pixel coded directly
outcnt++;
} else {
int count;
if ((code & 0x6000) == 0x6000) {
// copy from previous frame
int oidx = (code >> 10) & 7;
int start;
count = (code & 0x3FF) + 3;
if (offsets[oidx] < 0) {
if (buf_end - 3 < buf)
break;
offsets[oidx] = AV_RL24(buf);
buf += 3;
}
start = (outcnt + offsets[oidx]) % maxcnt;
if (maxcnt - start < count || maxcnt - outcnt < count)
break;
if (!prev) {
av_log(avctx, AV_LOG_ERROR,
"Frame reference does not exist\n");
break;
}
memcpy(out + 2 * outcnt, prev + 2 * start, 2 * count);
} else {
// copy from earlier in this frame
int offset = (code & 0x1FFF) + 1;
if (!(code & 0x6000)) {
count = 2;
} else if ((code & 0x6000) == 0x2000) {
count = 3;
} else {
if (buf_end - 1 < buf)
break;
count = 4 + *buf++;
}
if (outcnt < offset || maxcnt - outcnt < count)
break;
av_memcpy_backptr(out + 2 * outcnt, 2 * offset, 2 * count);
}
//.........这里部分代码省略.........
示例5: truemotion1_decode_header
/* Returns the number of bytes consumed from the bytestream. Returns -1 if
* there was an error while decoding the header */
static int truemotion1_decode_header(TrueMotion1Context *s)
{
int i;
int width_shift = 0;
int new_pix_fmt;
struct frame_header header;
uint8_t header_buffer[128] = { 0 }; /* logical maximum size of the header */
const uint8_t *sel_vector_table;
header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f;
if (s->buf[0] < 0x10 || header.header_size >= s->size)
{
av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]);
return -1;
}
/* unscramble the header bytes with a XOR operation */
for (i = 1; i < header.header_size; i++)
header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];
header.compression = header_buffer[0];
header.deltaset = header_buffer[1];
header.vectable = header_buffer[2];
header.ysize = AV_RL16(&header_buffer[3]);
header.xsize = AV_RL16(&header_buffer[5]);
header.checksum = AV_RL16(&header_buffer[7]);
header.version = header_buffer[9];
header.header_type = header_buffer[10];
header.flags = header_buffer[11];
header.control = header_buffer[12];
/* Version 2 */
if (header.version >= 2)
{
if (header.header_type > 3)
{
av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type);
return -1;
} else if ((header.header_type == 2) || (header.header_type == 3)) {
s->flags = header.flags;
if (!(s->flags & FLAG_INTERFRAME))
s->flags |= FLAG_KEYFRAME;
} else
s->flags = FLAG_KEYFRAME;
} else /* Version 1 */
s->flags = FLAG_KEYFRAME;
if (s->flags & FLAG_SPRITE) {
av_log_ask_for_sample(s->avctx, "SPRITE frame found.\n");
/* FIXME header.width, height, xoffset and yoffset aren't initialized */
#if 0
s->w = header.width;
s->h = header.height;
s->x = header.xoffset;
s->y = header.yoffset;
#else
return -1;
#endif
} else {
s->w = header.xsize;
s->h = header.ysize;
if (header.header_type < 2) {
if ((s->w < 213) && (s->h >= 176))
{
s->flags |= FLAG_INTERPOLATED;
av_log_ask_for_sample(s->avctx, "INTERPOLATION selected.\n");
}
}
}
if (header.compression >= 17) {
av_log(s->avctx, AV_LOG_ERROR, "invalid compression type (%d)\n", header.compression);
return -1;
}
if ((header.deltaset != s->last_deltaset) ||
(header.vectable != s->last_vectable))
select_delta_tables(s, header.deltaset);
if ((header.compression & 1) && header.header_type)
sel_vector_table = pc_tbl2;
else {
if (header.vectable > 0 && header.vectable < 4)
sel_vector_table = tables[header.vectable - 1];
else {
av_log(s->avctx, AV_LOG_ERROR, "invalid vector table id (%d)\n", header.vectable);
return -1;
}
}
if (compression_types[header.compression].algorithm == ALGO_RGB24H) {
new_pix_fmt = PIX_FMT_RGB32;
width_shift = 1;
} else
new_pix_fmt = PIX_FMT_RGB555; // RGB565 is supported as well
s->w >>= width_shift;
if (av_image_check_size(s->w, s->h, 0, s->avctx) < 0)
//.........这里部分代码省略.........
示例6: str_probe
static int str_probe(AVProbeData *p)
{
uint8_t *sector= p->buf;
uint8_t *end= sector + p->buf_size;
int aud=0, vid=0;
if (p->buf_size < RAW_CD_SECTOR_SIZE)
return 0;
if ((AV_RL32(&p->buf[0]) == RIFF_TAG) &&
(AV_RL32(&p->buf[8]) == CDXA_TAG)) {
/* RIFF header seen; skip 0x2C bytes */
sector += RIFF_HEADER_SIZE;
}
while (end - sector >= RAW_CD_SECTOR_SIZE) {
/* look for CD sync header (00, 0xFF x 10, 00) */
if (memcmp(sector,sync_header,sizeof(sync_header)))
return 0;
if (sector[0x11] >= 32)
return 0;
switch (sector[0x12] & CDXA_TYPE_MASK) {
case CDXA_TYPE_DATA:
case CDXA_TYPE_VIDEO: {
int current_sector = AV_RL16(§or[0x1C]);
int sector_count = AV_RL16(§or[0x1E]);
int frame_size = AV_RL32(§or[0x24]);
if(!( frame_size>=0
&& current_sector < sector_count
&& sector_count*VIDEO_DATA_CHUNK_SIZE >=frame_size)){
return 0;
}
/*st->codec->width = AV_RL16(§or[0x28]);
st->codec->height = AV_RL16(§or[0x2A]);*/
// if (current_sector == sector_count-1) {
vid++;
// }
}
break;
case CDXA_TYPE_AUDIO:
if(sector[0x13]&0x2A)
return 0;
aud++;
break;
default:
if(sector[0x12] & CDXA_TYPE_MASK)
return 0;
}
sector += RAW_CD_SECTOR_SIZE;
}
/* MPEG files (like those ripped from VCDs) can also look like this;
* only return half certainty */
if(vid+aud > 3) return 50;
else if(vid+aud) return 1;
else return 0;
}
示例7: get_ext_stream_properties
static int get_ext_stream_properties(char *buf, int buf_len, int stream_num, struct asf_priv* asf, int is_video)
{
int pos=0;
uint8_t *buffer = &buf[0];
uint64_t avg_ft;
unsigned bitrate;
while ((pos = find_asf_guid(buf, asf_ext_stream_header, pos, buf_len)) >= 0) {
int this_stream_num, stnamect, payct, i;
int buf_max_index=pos+50;
if (buf_max_index > buf_len) return 0;
buffer = &buf[pos];
// the following info is available
// some of it may be useful but we're skipping it for now
// starttime(8 bytes), endtime(8),
// leak-datarate(4), bucket-datasize(4), init-bucket-fullness(4),
// alt-leak-datarate(4), alt-bucket-datasize(4), alt-init-bucket-fullness(4),
// max-object-size(4),
// flags(4) (reliable,seekable,no_cleanpoints?,resend-live-cleanpoints, rest of bits reserved)
buffer += 8+8;
bitrate = AV_RL32(buffer);
buffer += 8*4;
this_stream_num=AV_RL16(buffer);buffer+=2;
if (this_stream_num == stream_num) {
buf_max_index+=14;
if (buf_max_index > buf_len) return 0;
buffer+=2; //skip stream-language-id-index
avg_ft = AV_RL64(buffer); // provided in 100ns units
buffer+=8;
asf->bps = bitrate / 8;
// after this are values for stream-name-count and
// payload-extension-system-count
// followed by associated info for each
stnamect = AV_RL16(buffer);buffer+=2;
payct = AV_RL16(buffer);buffer+=2;
// need to read stream names if present in order
// to get lengths - values are ignored for now
for (i=0; i<stnamect; i++) {
int stream_name_len;
buf_max_index+=4;
if (buf_max_index > buf_len) return 0;
buffer+=2; //language_id_index
stream_name_len = AV_RL16(buffer);buffer+=2;
buffer+=stream_name_len; //stream_name
buf_max_index+=stream_name_len;
if (buf_max_index > buf_len) return 0;
}
if (is_video) {
asf->vid_repdata_count = payct;
asf->vid_repdata_sizes = malloc(payct*sizeof(int));
} else {
asf->aud_repdata_count = payct;
asf->aud_repdata_sizes = malloc(payct*sizeof(int));
}
for (i=0; i<payct; i++) {
int payload_len;
buf_max_index+=22;
if (buf_max_index > buf_len) return 0;
// Each payload extension definition starts with a GUID.
// In dvr-ms files one of these indicates the presence an
// extension that contains pts values and this is always present
// in the video and audio streams.
// Another GUID indicates the presence of an extension
// that contains useful video frame demuxing information.
// Note that the extension data in each packet does not contain
// these GUIDs and that this header section defines the order the data
// will appear in.
if (memcmp(buffer, asf_dvr_ms_timing_rep_data, 16) == 0) {
if (is_video)
asf->vid_ext_timing_index = i;
else
asf->aud_ext_timing_index = i;
} else if (is_video && memcmp(buffer, asf_dvr_ms_vid_frame_rep_data, 16) == 0)
asf->vid_ext_frame_index = i;
buffer+=16;
payload_len = AV_RL16(buffer);buffer+=2;
if (is_video)
asf->vid_repdata_sizes[i] = payload_len;
else
asf->aud_repdata_sizes[i] = payload_len;
buffer+=4;//sys_len
}
return 1;
}
}
return 1;
}
示例8: wsvqa_read_header
static int wsvqa_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
WsVqaDemuxContext *wsvqa = s->priv_data;
ByteIOContext *pb = s->pb;
AVStream *st;
unsigned char *header;
unsigned char scratch[VQA_PREAMBLE_SIZE];
unsigned int chunk_tag;
unsigned int chunk_size;
/* initialize the video decoder stream */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
wsvqa->video_stream_index = st->index;
st->codec->codec_type = CODEC_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_WS_VQA;
st->codec->codec_tag = 0; /* no fourcc */
/* skip to the start of the VQA header */
url_fseek(pb, 20, SEEK_SET);
/* the VQA header needs to go to the decoder */
st->codec->extradata_size = VQA_HEADER_SIZE;
st->codec->extradata = av_mallocz(VQA_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
header = (unsigned char *)st->codec->extradata;
if (get_buffer(pb, st->codec->extradata, VQA_HEADER_SIZE) !=
VQA_HEADER_SIZE) {
av_free(st->codec->extradata);
return AVERROR(EIO);
}
st->codec->width = AV_RL16(&header[6]);
st->codec->height = AV_RL16(&header[8]);
/* initialize the audio decoder stream for VQA v1 or nonzero samplerate */
if (AV_RL16(&header[24]) || (AV_RL16(&header[0]) == 1 && AV_RL16(&header[2]) == 1)) {
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
av_set_pts_info(st, 33, 1, VQA_FRAMERATE);
st->codec->codec_type = CODEC_TYPE_AUDIO;
if (AV_RL16(&header[0]) == 1)
st->codec->codec_id = CODEC_ID_WESTWOOD_SND1;
else
st->codec->codec_id = CODEC_ID_ADPCM_IMA_WS;
st->codec->codec_tag = 0; /* no tag */
st->codec->sample_rate = AV_RL16(&header[24]);
if (!st->codec->sample_rate)
st->codec->sample_rate = 22050;
st->codec->channels = header[26];
if (!st->codec->channels)
st->codec->channels = 1;
st->codec->bits_per_coded_sample = 16;
st->codec->bit_rate = st->codec->channels * st->codec->sample_rate *
st->codec->bits_per_coded_sample / 4;
st->codec->block_align = st->codec->channels * st->codec->bits_per_coded_sample;
wsvqa->audio_stream_index = st->index;
wsvqa->audio_samplerate = st->codec->sample_rate;
wsvqa->audio_channels = st->codec->channels;
wsvqa->audio_frame_counter = 0;
}
/* there are 0 or more chunks before the FINF chunk; iterate until
* FINF has been skipped and the file will be ready to be demuxed */
do {
if (get_buffer(pb, scratch, VQA_PREAMBLE_SIZE) != VQA_PREAMBLE_SIZE) {
av_free(st->codec->extradata);
return AVERROR(EIO);
}
chunk_tag = AV_RB32(&scratch[0]);
chunk_size = AV_RB32(&scratch[4]);
/* catch any unknown header tags, for curiousity */
switch (chunk_tag) {
case CINF_TAG:
case CINH_TAG:
case CIND_TAG:
case PINF_TAG:
case PINH_TAG:
case PIND_TAG:
case FINF_TAG:
case CMDS_TAG:
break;
default:
av_log (s, AV_LOG_ERROR, " note: unknown chunk seen (%c%c%c%c)\n",
scratch[0], scratch[1],
scratch[2], scratch[3]);
break;
}
url_fseek(pb, chunk_size, SEEK_CUR);
} while (chunk_tag != FINF_TAG);
wsvqa->video_pts = wsvqa->audio_frame_counter = 0;
return 0;
//.........这里部分代码省略.........
示例9: decode_frame
static int decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
PicContext *s = avctx->priv_data;
int buf_size = avpkt->size;
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + buf_size;
uint32_t *palette;
int bits_per_plane, bpp, etype, esize, npal;
int i, x, y, plane;
if (buf_size < 11)
return AVERROR_INVALIDDATA;
if (bytestream_get_le16(&buf) != 0x1234)
return AVERROR_INVALIDDATA;
s->width = bytestream_get_le16(&buf);
s->height = bytestream_get_le16(&buf);
buf += 4;
bits_per_plane = *buf & 0xF;
s->nb_planes = (*buf++ >> 4) + 1;
bpp = s->nb_planes ? bits_per_plane*s->nb_planes : bits_per_plane;
if (bits_per_plane > 8 || bpp < 1 || bpp > 32) {
av_log_ask_for_sample(s, "unsupported bit depth\n");
return AVERROR_INVALIDDATA;
}
if (*buf == 0xFF) {
buf += 2;
etype = bytestream_get_le16(&buf);
esize = bytestream_get_le16(&buf);
if (buf_end - buf < esize)
return AVERROR_INVALIDDATA;
} else {
etype = -1;
esize = 0;
}
avctx->pix_fmt = PIX_FMT_PAL8;
if (s->width != avctx->width && s->height != avctx->height) {
if (av_image_check_size(s->width, s->height, 0, avctx) < 0)
return -1;
avcodec_set_dimensions(avctx, s->width, s->height);
if (s->frame.data[0])
avctx->release_buffer(avctx, &s->frame);
}
if (avctx->get_buffer(avctx, &s->frame) < 0){
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
memset(s->frame.data[0], 0, s->height * s->frame.linesize[0]);
s->frame.pict_type = AV_PICTURE_TYPE_I;
s->frame.palette_has_changed = 1;
palette = (uint32_t*)s->frame.data[1];
if (etype == 1 && esize > 1 && *buf < 6) {
int idx = *buf;
npal = 4;
for (i = 0; i < npal; i++)
palette[i] = ff_cga_palette[ cga_mode45_index[idx][i] ];
} else if (etype == 2) {
npal = FFMIN(esize, 16);
for (i = 0; i < npal; i++)
palette[i] = ff_cga_palette[ FFMIN(buf[i], 16)];
} else if (etype == 3) {
npal = FFMIN(esize, 16);
for (i = 0; i < npal; i++)
palette[i] = ff_ega_palette[ FFMIN(buf[i], 63)];
} else if (etype == 4 || etype == 5) {
npal = FFMIN(esize / 3, 256);
for (i = 0; i < npal; i++)
palette[i] = AV_RB24(buf + i*3) << 2;
} else {
if (bpp == 1) {
npal = 2;
palette[0] = 0x000000;
palette[1] = 0xFFFFFF;
} else if (bpp == 2) {
npal = 4;
for (i = 0; i < npal; i++)
palette[i] = ff_cga_palette[ cga_mode45_index[0][i] ];
} else {
npal = 16;
memcpy(palette, ff_cga_palette, npal * 4);
}
}
// fill remaining palette entries
memset(palette + npal, 0, AVPALETTE_SIZE - npal * 4);
buf += esize;
x = 0;
y = s->height - 1;
plane = 0;
if (bytestream_get_le16(&buf)) {
while (buf_end - buf >= 6) {
const uint8_t *buf_pend = buf + FFMIN(AV_RL16(buf), buf_end - buf);
//.........这里部分代码省略.........
示例10: ws_snd_decode_frame
static int ws_snd_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
// WSSNDContext *c = avctx->priv_data;
int in_size, out_size;
int sample = 0;
int i;
short *samples = data;
if (!buf_size)
return 0;
out_size = AV_RL16(&buf[0]);
*data_size = out_size * 2;
in_size = AV_RL16(&buf[2]);
buf += 4;
if (out_size > *data_size) {
av_log(avctx, AV_LOG_ERROR, "Frame is too large to fit in buffer\n");
return -1;
}
if (in_size > buf_size) {
av_log(avctx, AV_LOG_ERROR, "Frame data is larger than input buffer\n");
return -1;
}
if (in_size == out_size) {
for (i = 0; i < out_size; i++)
*samples++ = (*buf++ - 0x80) << 8;
return buf_size;
}
while (out_size > 0) {
int code;
uint8_t count;
code = (*buf) >> 6;
count = (*buf) & 0x3F;
buf++;
switch(code) {
case 0: /* ADPCM 2-bit */
for (count++; count > 0; count--) {
code = *buf++;
sample += ws_adpcm_2bit[code & 0x3];
CLIP8(sample);
*samples++ = sample << 8;
sample += ws_adpcm_2bit[(code >> 2) & 0x3];
CLIP8(sample);
*samples++ = sample << 8;
sample += ws_adpcm_2bit[(code >> 4) & 0x3];
CLIP8(sample);
*samples++ = sample << 8;
sample += ws_adpcm_2bit[(code >> 6) & 0x3];
CLIP8(sample);
*samples++ = sample << 8;
out_size -= 4;
}
break;
case 1: /* ADPCM 4-bit */
for (count++; count > 0; count--) {
code = *buf++;
sample += ws_adpcm_4bit[code & 0xF];
CLIP8(sample);
*samples++ = sample << 8;
sample += ws_adpcm_4bit[code >> 4];
CLIP8(sample);
*samples++ = sample << 8;
out_size -= 2;
}
break;
case 2: /* no compression */
if (count & 0x20) { /* big delta */
int8_t t;
t = count;
t <<= 3;
sample += t >> 3;
*samples++ = sample << 8;
out_size--;
} else { /* copy */
for (count++; count > 0; count--) {
*samples++ = (*buf++ - 0x80) << 8;
out_size--;
}
sample = buf[-1] - 0x80;
}
break;
default: /* run */
for(count++; count > 0; count--) {
*samples++ = sample << 8;
out_size--;
}
}
示例11: AV_RL16
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = vmd->is_indeo3 ? CODEC_ID_INDEO3 : CODEC_ID_VMDVIDEO;
vst->codec->codec_tag = 0; /* no fourcc */
vst->codec->width = AV_RL16(&vmd->vmd_header[12]);
vst->codec->height = AV_RL16(&vmd->vmd_header[14]);
if(vmd->is_indeo3 && vst->codec->width > 320)
{
vst->codec->width >>= 1;
vst->codec->height >>= 1;
}
vst->codec->extradata_size = VMD_HEADER_SIZE;
vst->codec->extradata = av_mallocz(VMD_HEADER_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
memcpy(vst->codec->extradata, vmd->vmd_header, VMD_HEADER_SIZE);
/* if sample rate is 0, assume no audio */
vmd->sample_rate = AV_RL16(&vmd->vmd_header[804]);
if (vmd->sample_rate)
{
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
vmd->audio_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_VMDAUDIO;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->channels = (vmd->vmd_header[811] & 0x80) ? 2 : 1;
st->codec->sample_rate = vmd->sample_rate;
st->codec->block_align = AV_RL16(&vmd->vmd_header[806]);
if (st->codec->block_align & 0x8000)
{
st->codec->bits_per_coded_sample = 16;
示例12: flic_read_header
static int flic_read_header(AVFormatContext *s,
AVFormatParameters *ap)
{
FlicDemuxContext *flic = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char header[FLIC_HEADER_SIZE];
AVStream *st, *ast;
int speed;
int magic_number;
unsigned char preamble[FLIC_PREAMBLE_SIZE];
flic->frame_number = 0;
/* load the whole header and pull out the width and height */
if (avio_read(pb, header, FLIC_HEADER_SIZE) != FLIC_HEADER_SIZE)
return AVERROR(EIO);
magic_number = AV_RL16(&header[4]);
speed = AV_RL32(&header[0x10]);
if (speed == 0)
speed = FLIC_DEFAULT_SPEED;
/* initialize the decoder streams */
st = av_new_stream(s, 0);
if (!st)
return AVERROR(ENOMEM);
flic->video_stream_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = CODEC_ID_FLIC;
st->codec->codec_tag = 0; /* no fourcc */
st->codec->width = AV_RL16(&header[0x08]);
st->codec->height = AV_RL16(&header[0x0A]);
if (!st->codec->width || !st->codec->height) {
/* Ugly hack needed for the following sample: */
/* http://samples.mplayerhq.hu/fli-flc/fli-bugs/specular.flc */
av_log(s, AV_LOG_WARNING,
"File with no specified width/height. Trying 640x480.\n");
st->codec->width = 640;
st->codec->height = 480;
}
/* send over the whole 128-byte FLIC header */
st->codec->extradata_size = FLIC_HEADER_SIZE;
st->codec->extradata = av_malloc(FLIC_HEADER_SIZE);
memcpy(st->codec->extradata, header, FLIC_HEADER_SIZE);
/* peek at the preamble to detect TFTD videos - they seem to always start with an audio chunk */
if (avio_read(pb, preamble, FLIC_PREAMBLE_SIZE) != FLIC_PREAMBLE_SIZE) {
av_log(s, AV_LOG_ERROR, "Failed to peek at preamble\n");
return AVERROR(EIO);
}
avio_seek(pb, -FLIC_PREAMBLE_SIZE, SEEK_CUR);
/* Time to figure out the framerate:
* If the first preamble's magic number is 0xAAAA then this file is from
* X-COM: Terror from the Deep. If on the other hand there is a FLIC chunk
* magic number at offset 0x10 assume this file is from Magic Carpet instead.
* If neither of the above is true then this is a normal FLIC file.
*/
if (AV_RL16(&preamble[4]) == FLIC_TFTD_CHUNK_AUDIO) {
/* TFTD videos have an extra 22050 Hz 8-bit mono audio stream */
ast = av_new_stream(s, 1);
if (!ast)
return AVERROR(ENOMEM);
flic->audio_stream_index = ast->index;
/* all audio frames are the same size, so use the size of the first chunk for block_align */
ast->codec->block_align = AV_RL32(&preamble[0]);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_id = CODEC_ID_PCM_U8;
ast->codec->codec_tag = 0;
ast->codec->sample_rate = FLIC_TFTD_SAMPLE_RATE;
ast->codec->channels = 1;
ast->codec->sample_fmt = AV_SAMPLE_FMT_U8;
ast->codec->bit_rate = st->codec->sample_rate * 8;
ast->codec->bits_per_coded_sample = 8;
ast->codec->channel_layout = AV_CH_LAYOUT_MONO;
ast->codec->extradata_size = 0;
/* Since the header information is incorrect we have to figure out the
* framerate using block_align and the fact that the audio is 22050 Hz.
* We usually have two cases: 2205 -> 10 fps and 1470 -> 15 fps */
av_set_pts_info(st, 64, ast->codec->block_align, FLIC_TFTD_SAMPLE_RATE);
av_set_pts_info(ast, 64, 1, FLIC_TFTD_SAMPLE_RATE);
} else if (AV_RL16(&header[0x10]) == FLIC_CHUNK_MAGIC_1) {
av_set_pts_info(st, 64, FLIC_MC_SPEED, 70);
/* rewind the stream since the first chunk is at offset 12 */
avio_seek(pb, 12, SEEK_SET);
/* send over abbreviated FLIC header chunk */
av_free(st->codec->extradata);
st->codec->extradata_size = 12;
st->codec->extradata = av_malloc(12);
memcpy(st->codec->extradata, header, 12);
} else if (magic_number == FLIC_FILE_MAGIC_1) {
//.........这里部分代码省略.........
示例13: flic_read_packet
static int flic_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
FlicDemuxContext *flic = s->priv_data;
AVIOContext *pb = s->pb;
int packet_read = 0;
unsigned int size;
int magic;
int ret = 0;
unsigned char preamble[FLIC_PREAMBLE_SIZE];
while (!packet_read) {
if ((ret = avio_read(pb, preamble, FLIC_PREAMBLE_SIZE)) !=
FLIC_PREAMBLE_SIZE) {
ret = AVERROR(EIO);
break;
}
size = AV_RL32(&preamble[0]);
magic = AV_RL16(&preamble[4]);
if (((magic == FLIC_CHUNK_MAGIC_1) || (magic == FLIC_CHUNK_MAGIC_2)) && size > FLIC_PREAMBLE_SIZE) {
if (av_new_packet(pkt, size)) {
ret = AVERROR(EIO);
break;
}
pkt->stream_index = flic->video_stream_index;
pkt->pts = flic->frame_number++;
pkt->pos = avio_tell(pb);
memcpy(pkt->data, preamble, FLIC_PREAMBLE_SIZE);
ret = avio_read(pb, pkt->data + FLIC_PREAMBLE_SIZE,
size - FLIC_PREAMBLE_SIZE);
if (ret != size - FLIC_PREAMBLE_SIZE) {
av_free_packet(pkt);
ret = AVERROR(EIO);
}
packet_read = 1;
} else if (magic == FLIC_TFTD_CHUNK_AUDIO) {
if (av_new_packet(pkt, size)) {
ret = AVERROR(EIO);
break;
}
/* skip useless 10B sub-header (yes, it's not accounted for in the chunk header) */
avio_skip(pb, 10);
pkt->stream_index = flic->audio_stream_index;
pkt->pos = avio_tell(pb);
ret = avio_read(pb, pkt->data, size);
if (ret != size) {
av_free_packet(pkt);
ret = AVERROR(EIO);
}
packet_read = 1;
} else {
/* not interested in this chunk */
avio_skip(pb, size - 6);
}
}
return ret;
}
示例14: avs_decode_frame
static int
avs_decode_frame(AVCodecContext * avctx,
void *data, int *data_size, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
const uint8_t *buf_end = avpkt->data + avpkt->size;
int buf_size = avpkt->size;
AvsContext *const avs = avctx->priv_data;
AVFrame *picture = data;
AVFrame *const p = (AVFrame *) & avs->picture;
const uint8_t *table, *vect;
uint8_t *out;
int i, j, x, y, stride, vect_w = 3, vect_h = 3;
AvsVideoSubType sub_type;
AvsBlockType type;
GetBitContext change_map;
if (avctx->reget_buffer(avctx, p)) {
av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
return -1;
}
p->reference = 1;
p->pict_type = AV_PICTURE_TYPE_P;
p->key_frame = 0;
out = avs->picture.data[0];
stride = avs->picture.linesize[0];
if (buf_end - buf < 4)
return AVERROR_INVALIDDATA;
sub_type = buf[0];
type = buf[1];
buf += 4;
if (type == AVS_PALETTE) {
int first, last;
uint32_t *pal = (uint32_t *) avs->picture.data[1];
first = AV_RL16(buf);
last = first + AV_RL16(buf + 2);
if (first >= 256 || last > 256 || buf_end - buf < 4 + 4 + 3 * (last - first))
return AVERROR_INVALIDDATA;
buf += 4;
for (i=first; i<last; i++, buf+=3)
pal[i] = (buf[0] << 18) | (buf[1] << 10) | (buf[2] << 2);
sub_type = buf[0];
type = buf[1];
buf += 4;
}
if (type != AVS_VIDEO)
return -1;
switch (sub_type) {
case AVS_I_FRAME:
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
case AVS_P_FRAME_3X3:
vect_w = 3;
vect_h = 3;
break;
case AVS_P_FRAME_2X2:
vect_w = 2;
vect_h = 2;
break;
case AVS_P_FRAME_2X3:
vect_w = 2;
vect_h = 3;
break;
default:
return -1;
}
if (buf_end - buf < 256 * vect_w * vect_h)
return AVERROR_INVALIDDATA;
table = buf + (256 * vect_w * vect_h);
if (sub_type != AVS_I_FRAME) {
int map_size = ((318 / vect_w + 7) / 8) * (198 / vect_h);
if (buf_end - table < map_size)
return AVERROR_INVALIDDATA;
init_get_bits(&change_map, table, map_size * 8);
table += map_size;
}
for (y=0; y<198; y+=vect_h) {
for (x=0; x<318; x+=vect_w) {
if (sub_type == AVS_I_FRAME || get_bits1(&change_map)) {
if (buf_end - table < 1)
return AVERROR_INVALIDDATA;
vect = &buf[*table++ * (vect_w * vect_h)];
for (j=0; j<vect_w; j++) {
out[(y + 0) * stride + x + j] = vect[(0 * vect_w) + j];
out[(y + 1) * stride + x + j] = vect[(1 * vect_w) + j];
if (vect_h == 3)
out[(y + 2) * stride + x + j] =
vect[(2 * vect_w) + j];
//.........这里部分代码省略.........
示例15: roq_read_packet
static int roq_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
RoqDemuxContext *roq = s->priv_data;
AVIOContext *pb = s->pb;
int ret = 0;
unsigned int chunk_size;
unsigned int chunk_type;
unsigned int codebook_size;
unsigned char preamble[RoQ_CHUNK_PREAMBLE_SIZE];
int packet_read = 0;
int64_t codebook_offset;
while (!packet_read) {
if (avio_feof(s->pb))
return AVERROR(EIO);
/* get the next chunk preamble */
if ((ret = avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE)) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
chunk_type = AV_RL16(&preamble[0]);
chunk_size = AV_RL32(&preamble[2]);
if(chunk_size > INT_MAX)
return AVERROR_INVALIDDATA;
chunk_size = ffio_limit(pb, chunk_size);
switch (chunk_type) {
case RoQ_INFO:
if (roq->video_stream_index == -1) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 63, 1, roq->frame_rate);
roq->video_stream_index = st->index;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_ROQ;
st->codecpar->codec_tag = 0; /* no fourcc */
if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) != RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
st->codecpar->width = roq->width = AV_RL16(preamble);
st->codecpar->height = roq->height = AV_RL16(preamble + 2);
break;
}
/* don't care about this chunk anymore */
avio_skip(pb, RoQ_CHUNK_PREAMBLE_SIZE);
break;
case RoQ_QUAD_CODEBOOK:
if (roq->video_stream_index < 0)
return AVERROR_INVALIDDATA;
/* packet needs to contain both this codebook and next VQ chunk */
codebook_offset = avio_tell(pb) - RoQ_CHUNK_PREAMBLE_SIZE;
codebook_size = chunk_size;
avio_skip(pb, codebook_size);
if (avio_read(pb, preamble, RoQ_CHUNK_PREAMBLE_SIZE) !=
RoQ_CHUNK_PREAMBLE_SIZE)
return AVERROR(EIO);
chunk_size = AV_RL32(&preamble[2]) + RoQ_CHUNK_PREAMBLE_SIZE * 2 +
codebook_size;
/* rewind */
avio_seek(pb, codebook_offset, SEEK_SET);
/* load up the packet */
ret= av_get_packet(pb, pkt, chunk_size);
if (ret != chunk_size)
return AVERROR(EIO);
pkt->stream_index = roq->video_stream_index;
pkt->pts = roq->video_pts++;
packet_read = 1;
break;
case RoQ_SOUND_MONO:
case RoQ_SOUND_STEREO:
if (roq->audio_stream_index == -1) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 32, 1, RoQ_AUDIO_SAMPLE_RATE);
roq->audio_stream_index = st->index;
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = AV_CODEC_ID_ROQ_DPCM;
st->codecpar->codec_tag = 0; /* no tag */
if (chunk_type == RoQ_SOUND_STEREO) {
st->codecpar->channels = 2;
st->codecpar->channel_layout = AV_CH_LAYOUT_STEREO;
} else {
st->codecpar->channels = 1;
st->codecpar->channel_layout = AV_CH_LAYOUT_MONO;
}
roq->audio_channels = st->codecpar->channels;
st->codecpar->sample_rate = RoQ_AUDIO_SAMPLE_RATE;
st->codecpar->bits_per_coded_sample = 16;
//.........这里部分代码省略.........