本文整理汇总了C++中AVERROR函数的典型用法代码示例。如果您正苦于以下问题:C++ AVERROR函数的具体用法?C++ AVERROR怎么用?C++ AVERROR使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了AVERROR函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: http_open_cnx
/* return non zero if error */
static int http_open_cnx(URLContext *h)
{
const char *path, *proxy_path;
char hostname[1024], hoststr[1024];
char auth[1024];
char path1[1024];
char buf[1024];
int port, use_proxy, err, location_changed = 0, redirects = 0;
HTTPAuthType cur_auth_type;
HTTPContext *s = h->priv_data;
URLContext *hd = NULL;
s->init = 1;
proxy_path = getenv("http_proxy");
use_proxy = (proxy_path != NULL) && !getenv("no_proxy") &&
av_strstart(proxy_path, "http://", NULL);
/* fill the dest addr */
redo:
/* needed in any case to build the host string */
ff_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
path1, sizeof(path1), s->location);
ff_url_join(hoststr, sizeof(hoststr), NULL, NULL, hostname, port, NULL);
if (use_proxy) {
ff_url_split(NULL, 0, auth, sizeof(auth), hostname, sizeof(hostname), &port,
NULL, 0, proxy_path);
path = s->location;
} else {
if (path1[0] == '\0')
path = "/";
else
path = path1;
}
if (port < 0)
port = 80;
ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
err = url_open(&hd, buf, URL_RDWR);
if (err < 0)
goto fail;
s->hd = hd;
cur_auth_type = s->auth_state.auth_type;
if (http_connect(h, path, hoststr, auth, &location_changed) < 0)
goto fail;
if (s->http_code == 401) {
if (cur_auth_type == HTTP_AUTH_NONE && s->auth_state.auth_type != HTTP_AUTH_NONE) {
url_close(hd);
goto redo;
} else
goto fail;
}
if ((s->http_code == 302 || s->http_code == 303) && location_changed == 1) {
/* url moved, get next */
url_close(hd);
if (redirects++ >= MAX_REDIRECTS)
return AVERROR(EIO);
location_changed = 0;
goto redo;
}
return 0;
fail:
if (hd)
url_close(hd);
s->hd = NULL;
return AVERROR(EIO);
}
示例2: rm_read_audio_stream_info
//.........这里部分代码省略.........
case AV_CODEC_ID_RA_288:
st->codec->extradata_size= 0;
ast->audio_framesize = st->codec->block_align;
st->codec->block_align = coded_framesize;
break;
case AV_CODEC_ID_COOK:
st->need_parsing = AVSTREAM_PARSE_HEADERS;
case AV_CODEC_ID_ATRAC3:
case AV_CODEC_ID_SIPR:
if (read_all) {
codecdata_length = 0;
} else {
avio_rb16(pb); avio_r8(pb);
if (version == 5)
avio_r8(pb);
codecdata_length = avio_rb32(pb);
if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
return -1;
}
}
ast->audio_framesize = st->codec->block_align;
if (st->codec->codec_id == AV_CODEC_ID_SIPR) {
if (flavor > 3) {
av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n",
flavor);
return -1;
}
st->codec->block_align = ff_sipr_subpk_size[flavor];
} else {
if(sub_packet_size <= 0){
av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
return -1;
}
st->codec->block_align = ast->sub_packet_size;
}
if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0)
return ret;
break;
case AV_CODEC_ID_AAC:
avio_rb16(pb); avio_r8(pb);
if (version == 5)
avio_r8(pb);
codecdata_length = avio_rb32(pb);
if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
return -1;
}
if (codecdata_length >= 1) {
avio_r8(pb);
if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0)
return ret;
}
break;
default:
av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name));
}
if (ast->deint_id == DEINT_ID_INT4 ||
ast->deint_id == DEINT_ID_GENR ||
ast->deint_id == DEINT_ID_SIPR) {
if (st->codec->block_align <= 0 ||
ast->audio_framesize * sub_packet_h > (unsigned)INT_MAX ||
ast->audio_framesize * sub_packet_h < st->codec->block_align)
return AVERROR_INVALIDDATA;
if (av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h) < 0)
return AVERROR(ENOMEM);
}
switch (ast->deint_id) {
case DEINT_ID_INT4:
if (ast->coded_framesize > ast->audio_framesize ||
sub_packet_h <= 1 ||
ast->coded_framesize * sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize)
return AVERROR_INVALIDDATA;
break;
case DEINT_ID_GENR:
if (ast->sub_packet_size <= 0 ||
ast->sub_packet_size > ast->audio_framesize)
return AVERROR_INVALIDDATA;
break;
case DEINT_ID_SIPR:
case DEINT_ID_INT0:
case DEINT_ID_VBRS:
case DEINT_ID_VBRF:
break;
default:
av_log(s, AV_LOG_ERROR, "Unknown interleaver %X\n", ast->deint_id);
return AVERROR_INVALIDDATA;
}
if (read_all) {
avio_r8(pb);
avio_r8(pb);
avio_r8(pb);
rm_read_metadata(s, pb, 0);
}
}
return 0;
}
示例3: rm_assemble_video_frame
static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb,
RMDemuxContext *rm, RMStream *vst,
AVPacket *pkt, int len, int *pseq,
int64_t *timestamp)
{
int hdr;
int seq = 0, pic_num = 0, len2 = 0, pos = 0; //init to silcense compiler warning
int type;
int ret;
hdr = avio_r8(pb); len--;
type = hdr >> 6;
if(type != 3){ // not frame as a part of packet
seq = avio_r8(pb); len--;
}
if(type != 1){ // not whole frame
len2 = get_num(pb, &len);
pos = get_num(pb, &len);
pic_num = avio_r8(pb); len--;
}
if(len<0)
return -1;
rm->remaining_len = len;
if(type&1){ // frame, not slice
if(type == 3){ // frame as a part of packet
len= len2;
*timestamp = pos;
}
if(rm->remaining_len < len)
return -1;
rm->remaining_len -= len;
if(av_new_packet(pkt, len + 9) < 0)
return AVERROR(EIO);
pkt->data[0] = 0;
AV_WL32(pkt->data + 1, 1);
AV_WL32(pkt->data + 5, 0);
if ((ret = avio_read(pb, pkt->data + 9, len)) != len) {
av_free_packet(pkt);
return ret < 0 ? ret : AVERROR(EIO);
}
return 0;
}
//now we have to deal with single slice
*pseq = seq;
if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){
if (len2 > ffio_limit(pb, len2)) {
av_log(s, AV_LOG_ERROR, "Impossibly sized packet\n");
return AVERROR_INVALIDDATA;
}
vst->slices = ((hdr & 0x3F) << 1) + 1;
vst->videobufsize = len2 + 8*vst->slices + 1;
av_free_packet(&vst->pkt); //FIXME this should be output.
if(av_new_packet(&vst->pkt, vst->videobufsize) < 0)
return AVERROR(ENOMEM);
memset(vst->pkt.data, 0, vst->pkt.size);
vst->videobufpos = 8*vst->slices + 1;
vst->cur_slice = 0;
vst->curpic_num = pic_num;
vst->pktpos = avio_tell(pb);
}
if(type == 2)
len = FFMIN(len, pos);
if(++vst->cur_slice > vst->slices)
return 1;
AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1);
AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1);
if(vst->videobufpos + len > vst->videobufsize)
return 1;
if (avio_read(pb, vst->pkt.data + vst->videobufpos, len) != len)
return AVERROR(EIO);
vst->videobufpos += len;
rm->remaining_len-= len;
if (type == 2 || vst->videobufpos == vst->videobufsize) {
vst->pkt.data[0] = vst->cur_slice-1;
*pkt= vst->pkt;
vst->pkt.data= NULL;
vst->pkt.size= 0;
vst->pkt.buf = NULL;
#if FF_API_DESTRUCT_PACKET
vst->pkt.destruct = NULL;
#endif
if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin
memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices,
vst->videobufpos - 1 - 8*vst->slices);
pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices);
pkt->pts = AV_NOPTS_VALUE;
pkt->pos = vst->pktpos;
vst->slices = 0;
return 0;
}
return 1;
}
示例4: qdm2_parse_packet
/** return 0 on packet, no more left, 1 on packet, -1 on partial packet... */
static int qdm2_parse_packet(AVFormatContext *s, PayloadContext *qdm,
AVStream *st, AVPacket *pkt,
uint32_t *timestamp,
const uint8_t *buf, int len, uint16_t seq,
int flags)
{
int res = AVERROR_INVALIDDATA, n;
const uint8_t *end = buf + len, *p = buf;
if (len > 0) {
if (len < 2)
return AVERROR_INVALIDDATA;
/* configuration block */
if (*p == 0xff) {
if (qdm->n_pkts > 0) {
av_log(s, AV_LOG_WARNING,
"Out of sequence config - dropping queue\n");
qdm->n_pkts = 0;
memset(qdm->len, 0, sizeof(qdm->len));
}
if ((res = qdm2_parse_config(qdm, st, ++p, end)) < 0)
return res;
p += res;
/* We set codec_id to AV_CODEC_ID_NONE initially to
* delay decoder initialization since extradata is
* carried within the RTP stream, not SDP. Here,
* by setting codec_id to AV_CODEC_ID_QDM2, we are signalling
* to the decoder that it is OK to initialize. */
st->codec->codec_id = AV_CODEC_ID_QDM2;
}
if (st->codec->codec_id == AV_CODEC_ID_NONE)
return AVERROR(EAGAIN);
/* subpackets */
while (end - p >= 4) {
if ((res = qdm2_parse_subpacket(qdm, st, p, end)) < 0)
return res;
p += res;
}
qdm->timestamp = *timestamp;
if (++qdm->n_pkts < qdm->subpkts_per_block)
return AVERROR(EAGAIN);
qdm->cache = 0;
for (n = 0; n < 0x80; n++)
if (qdm->len[n] > 0)
qdm->cache++;
}
/* output the subpackets into freshly created superblock structures */
if (!qdm->cache || (res = qdm2_restore_block(qdm, st, pkt)) < 0)
return res;
if (--qdm->cache == 0)
qdm->n_pkts = 0;
*timestamp = qdm->timestamp;
qdm->timestamp = RTP_NOTS_VALUE;
return (qdm->cache > 0) ? 1 : 0;
}
示例5: vid_read_packet
static int vid_read_packet(AVFormatContext *s,
AVPacket *pkt)
{
BVID_DemuxContext *vid = s->priv_data;
AVIOContext *pb = s->pb;
unsigned char block_type;
int audio_length;
int ret_value;
if(vid->is_finished || url_feof(pb))
return AVERROR(EIO);
block_type = avio_r8(pb);
switch(block_type){
case PALETTE_BLOCK:
if (vid->palette) {
av_log(s, AV_LOG_WARNING, "discarding unused palette\n");
av_freep(&vid->palette);
}
vid->palette = av_malloc(BVID_PALETTE_SIZE);
if (!vid->palette)
return AVERROR(ENOMEM);
if (avio_read(pb, vid->palette, BVID_PALETTE_SIZE) != BVID_PALETTE_SIZE) {
av_freep(&vid->palette);
return AVERROR(EIO);
}
return vid_read_packet(s, pkt);
case FIRST_AUDIO_BLOCK:
avio_rl16(pb);
// soundblaster DAC used for sample rate, as on specification page (link above)
vid->sample_rate = 1000000 / (256 - avio_r8(pb));
case AUDIO_BLOCK:
if (vid->audio_index < 0) {
AVStream *st = avformat_new_stream(s, NULL);
if (!st)
return AVERROR(ENOMEM);
vid->audio_index = st->index;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = CODEC_ID_PCM_U8;
st->codec->channels = 1;
st->codec->bits_per_coded_sample = 8;
st->codec->sample_rate = vid->sample_rate;
st->codec->bit_rate = 8 * st->codec->sample_rate;
st->start_time = 0;
avpriv_set_pts_info(st, 64, 1, vid->sample_rate);
}
audio_length = avio_rl16(pb);
if ((ret_value = av_get_packet(pb, pkt, audio_length)) != audio_length) {
if (ret_value < 0)
return ret_value;
av_log(s, AV_LOG_ERROR, "incomplete audio block\n");
return AVERROR(EIO);
}
pkt->stream_index = vid->audio_index;
pkt->duration = audio_length;
pkt->flags |= AV_PKT_FLAG_KEY;
return 0;
case VIDEO_P_FRAME:
case VIDEO_YOFF_P_FRAME:
case VIDEO_I_FRAME:
return read_frame(vid, pb, pkt, block_type, s);
case EOF_BLOCK:
if(vid->nframes != 0)
av_log(s, AV_LOG_VERBOSE, "reached terminating character but not all frames read.\n");
vid->is_finished = 1;
return AVERROR(EIO);
default:
av_log(s, AV_LOG_ERROR, "unknown block (character = %c, decimal = %d, hex = %x)!!!\n",
block_type, block_type, block_type);
return AVERROR_INVALIDDATA;
}
}
示例6: read_header
static int read_header(AVFormatContext *s)
{
BinkDemuxContext *bink = s->priv_data;
AVIOContext *pb = s->pb;
uint32_t fps_num, fps_den;
AVStream *vst, *ast;
unsigned int i;
uint32_t pos, next_pos;
uint16_t flags;
int keyframe;
vst = avformat_new_stream(s, NULL);
if (!vst)
return AVERROR(ENOMEM);
vst->codec->codec_tag = avio_rl32(pb);
bink->file_size = avio_rl32(pb) + 8;
vst->duration = avio_rl32(pb);
if (vst->duration > 1000000) {
av_log(s, AV_LOG_ERROR, "invalid header: more than 1000000 frames\n");
return AVERROR(EIO);
}
if (avio_rl32(pb) > bink->file_size) {
av_log(s, AV_LOG_ERROR,
"invalid header: largest frame size greater than file size\n");
return AVERROR(EIO);
}
avio_skip(pb, 4);
vst->codec->width = avio_rl32(pb);
vst->codec->height = avio_rl32(pb);
fps_num = avio_rl32(pb);
fps_den = avio_rl32(pb);
if (fps_num == 0 || fps_den == 0) {
av_log(s, AV_LOG_ERROR, "invalid header: invalid fps (%d/%d)\n", fps_num, fps_den);
return AVERROR(EIO);
}
avpriv_set_pts_info(vst, 64, fps_den, fps_num);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = CODEC_ID_BINKVIDEO;
vst->codec->extradata = av_mallocz(4 + FF_INPUT_BUFFER_PADDING_SIZE);
vst->codec->extradata_size = 4;
avio_read(pb, vst->codec->extradata, 4);
bink->num_audio_tracks = avio_rl32(pb);
if (bink->num_audio_tracks > BINK_MAX_AUDIO_TRACKS) {
av_log(s, AV_LOG_ERROR,
"invalid header: more than "AV_STRINGIFY(BINK_MAX_AUDIO_TRACKS)" audio tracks (%d)\n",
bink->num_audio_tracks);
return AVERROR(EIO);
}
if (bink->num_audio_tracks) {
avio_skip(pb, 4 * bink->num_audio_tracks);
for (i = 0; i < bink->num_audio_tracks; i++) {
ast = avformat_new_stream(s, NULL);
if (!ast)
return AVERROR(ENOMEM);
ast->codec->codec_type = AVMEDIA_TYPE_AUDIO;
ast->codec->codec_tag = 0;
ast->codec->sample_rate = avio_rl16(pb);
avpriv_set_pts_info(ast, 64, 1, ast->codec->sample_rate);
flags = avio_rl16(pb);
ast->codec->codec_id = flags & BINK_AUD_USEDCT ?
CODEC_ID_BINKAUDIO_DCT : CODEC_ID_BINKAUDIO_RDFT;
ast->codec->channels = flags & BINK_AUD_STEREO ? 2 : 1;
ast->codec->extradata = av_mallocz(4 + FF_INPUT_BUFFER_PADDING_SIZE);
if (!ast->codec->extradata)
return AVERROR(ENOMEM);
ast->codec->extradata_size = 4;
AV_WL32(ast->codec->extradata, vst->codec->codec_tag);
}
for (i = 0; i < bink->num_audio_tracks; i++)
s->streams[i + 1]->id = avio_rl32(pb);
}
/* frame index table */
next_pos = avio_rl32(pb);
for (i = 0; i < vst->duration; i++) {
pos = next_pos;
if (i == vst->duration - 1) {
next_pos = bink->file_size;
keyframe = 0;
} else {
next_pos = avio_rl32(pb);
keyframe = pos & 1;
}
pos &= ~1;
next_pos &= ~1;
if (next_pos <= pos) {
//.........这里部分代码省略.........
示例7: cdxl_decode_frame
static int cdxl_decode_frame(AVCodecContext *avctx, void *data,
int *data_size, AVPacket *pkt)
{
CDXLVideoContext *c = avctx->priv_data;
AVFrame * const p = &c->frame;
int ret, w, h, encoding, aligned_width, buf_size = pkt->size;
const uint8_t *buf = pkt->data;
if (buf_size < 32)
return AVERROR_INVALIDDATA;
encoding = buf[1] & 7;
c->format = buf[1] & 0xE0;
w = AV_RB16(&buf[14]);
h = AV_RB16(&buf[16]);
c->bpp = buf[19];
c->palette_size = AV_RB16(&buf[20]);
c->palette = buf + 32;
c->video = c->palette + c->palette_size;
c->video_size = buf_size - c->palette_size - 32;
if (c->palette_size > 512)
return AVERROR_INVALIDDATA;
if (buf_size < c->palette_size + 32)
return AVERROR_INVALIDDATA;
if (c->bpp < 1)
return AVERROR_INVALIDDATA;
if (c->format != BIT_PLANAR && c->format != BIT_LINE) {
av_log_ask_for_sample(avctx, "unsupported pixel format: 0x%0x\n", c->format);
return AVERROR_PATCHWELCOME;
}
if ((ret = av_image_check_size(w, h, 0, avctx)) < 0)
return ret;
if (w != avctx->width || h != avctx->height)
avcodec_set_dimensions(avctx, w, h);
aligned_width = FFALIGN(c->avctx->width, 16);
c->padded_bits = aligned_width - c->avctx->width;
if (c->video_size < aligned_width * avctx->height * c->bpp / 8)
return AVERROR_INVALIDDATA;
if (!encoding && c->palette_size && c->bpp <= 8) {
avctx->pix_fmt = AV_PIX_FMT_PAL8;
} else if (encoding == 1 && (c->bpp == 6 || c->bpp == 8)) {
if (c->palette_size != (1 << (c->bpp - 1)))
return AVERROR_INVALIDDATA;
avctx->pix_fmt = AV_PIX_FMT_BGR24;
} else {
av_log_ask_for_sample(avctx, "unsupported encoding %d and bpp %d\n",
encoding, c->bpp);
return AVERROR_PATCHWELCOME;
}
if (p->data[0])
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = avctx->get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
p->pict_type = AV_PICTURE_TYPE_I;
if (encoding) {
av_fast_padded_malloc(&c->new_video, &c->new_video_size,
h * w + FF_INPUT_BUFFER_PADDING_SIZE);
if (!c->new_video)
return AVERROR(ENOMEM);
if (c->bpp == 8)
cdxl_decode_ham8(c);
else
cdxl_decode_ham6(c);
} else {
cdxl_decode_rgb(c);
}
*data_size = sizeof(AVFrame);
*(AVFrame*)data = c->frame;
return buf_size;
}
示例8: mxg_read_packet
static int mxg_read_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret;
unsigned int size;
uint8_t *startmarker_ptr, *end, *search_end, marker;
MXGContext *mxg = s->priv_data;
while (!url_feof(s->pb) && !s->pb->error) {
if (mxg->cache_size <= OVERREAD_SIZE) {
/* update internal buffer */
ret = mxg_update_cache(s, DEFAULT_PACKET_SIZE + OVERREAD_SIZE);
if (ret < 0)
return ret;
}
end = mxg->buffer_ptr + mxg->cache_size;
/* find start marker - 0xff */
if (mxg->cache_size > OVERREAD_SIZE) {
search_end = end - OVERREAD_SIZE;
startmarker_ptr = mxg_find_startmarker(mxg->buffer_ptr, search_end);
} else {
search_end = end;
startmarker_ptr = mxg_find_startmarker(mxg->buffer_ptr, search_end);
if (startmarker_ptr >= search_end - 1 ||
*(startmarker_ptr + 1) != EOI) break;
}
if (startmarker_ptr != search_end) { /* start marker found */
marker = *(startmarker_ptr + 1);
mxg->buffer_ptr = startmarker_ptr + 2;
mxg->cache_size = end - mxg->buffer_ptr;
if (marker == SOI) {
mxg->soi_ptr = startmarker_ptr;
} else if (marker == EOI) {
if (!mxg->soi_ptr) {
av_log(s, AV_LOG_WARNING, "Found EOI before SOI, skipping\n");
continue;
}
pkt->pts = pkt->dts = mxg->dts;
pkt->stream_index = VIDEO_STREAM_INDEX;
pkt->destruct = NULL;
pkt->size = mxg->buffer_ptr - mxg->soi_ptr;
pkt->data = mxg->soi_ptr;
if (mxg->soi_ptr - mxg->buffer > mxg->cache_size) {
if (mxg->cache_size > 0) {
memcpy(mxg->buffer, mxg->buffer_ptr, mxg->cache_size);
}
mxg->buffer_ptr = mxg->buffer;
}
mxg->soi_ptr = 0;
return pkt->size;
} else if ( (SOF0 <= marker && marker <= SOF15) ||
(SOS <= marker && marker <= COM) ) {
/* all other markers that start marker segment also contain
length value (see specification for JPEG Annex B.1) */
size = AV_RB16(mxg->buffer_ptr);
if (size < 2)
return AVERROR(EINVAL);
if (mxg->cache_size < size) {
ret = mxg_update_cache(s, size);
if (ret < 0)
return ret;
startmarker_ptr = mxg->buffer_ptr - 2;
mxg->cache_size = 0;
} else {
mxg->cache_size -= size;
}
mxg->buffer_ptr += size;
if (marker == APP13 && size >= 16) { /* audio data */
/* time (GMT) of first sample in usec since 1970, little-endian */
pkt->pts = pkt->dts = AV_RL64(startmarker_ptr + 8);
pkt->stream_index = AUDIO_STREAM_INDEX;
pkt->destruct = NULL;
pkt->size = size - 14;
pkt->data = startmarker_ptr + 16;
if (startmarker_ptr - mxg->buffer > mxg->cache_size) {
if (mxg->cache_size > 0) {
memcpy(mxg->buffer, mxg->buffer_ptr, mxg->cache_size);
}
mxg->buffer_ptr = mxg->buffer;
}
return pkt->size;
} else if (marker == COM && size >= 18 &&
!strncmp(startmarker_ptr + 4, "MXF", 3)) {
/* time (GMT) of video frame in usec since 1970, little-endian */
mxg->dts = AV_RL64(startmarker_ptr + 12);
}
}
} else {
/* start marker not found */
//.........这里部分代码省略.........
示例9: main
int main (int argc, char **argv)
{
int ret = 0, got_frame;
if (argc != 4 && argc != 5) {
fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decodes them, and writes decoded\n"
"video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n\n"
"If the -refcount option is specified, the program use the\n"
"reference counting frame system which allows keeping a copy of\n"
"the data for longer than one decode call.\n"
"\n", argv[0]);
exit(1);
}
if (argc == 5 && !strcmp(argv[1], "-refcount")) {
refcount = 1;
argv++;
}
src_filename = argv[1];
video_dst_filename = argv[2];
audio_dst_filename = argv[3];
/* register all formats and codecs */
av_register_all();
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", src_filename);
exit(1);
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
video_dec_ctx = video_stream->codec;
video_dst_file = fopen(video_dst_filename, "wb");
if (!video_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
ret = 1;
goto end;
}
/* allocate image where the decoded image will be put */
width = video_dec_ctx->width;
height = video_dec_ctx->height;
pix_fmt = video_dec_ctx->pix_fmt;
printf("width:%d height:%d pix_fmt:%d\n", width, height, pix_fmt);
ret = av_image_alloc(video_dst_data, video_dst_linesize,
width, height, pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
video_dst_bufsize = ret;
/* create scaling context */
sws_ctx = sws_getContext(width, height, pix_fmt,
width*SCALE_MULTIPLE, height*SCALE_MULTIPLE, pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr, "Impossible to create scale context for the conversion "
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
av_get_pix_fmt_name(pix_fmt), width,height,
av_get_pix_fmt_name(pix_fmt), width,height);
ret = AVERROR(EINVAL);
goto end;
}
ret = av_image_alloc(scale_video_dst_data, scale_video_dst_linesize,
width*SCALE_MULTIPLE, height*SCALE_MULTIPLE, pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
scale_video_dst_bufsize = ret;
AllocPic();
}
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dec_ctx = audio_stream->codec;
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
ret = 1;
goto end;
}
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (!audio_stream && !video_stream) {
//.........这里部分代码省略.........
示例10: filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FlangerContext *s = ctx->priv;
AVFrame *out_frame;
int chan, i;
if (av_frame_is_writable(frame)) {
out_frame = frame;
} else {
out_frame = ff_get_audio_buffer(inlink, frame->nb_samples);
if (!out_frame)
return AVERROR(ENOMEM);
av_frame_copy_props(out_frame, frame);
}
for (i = 0; i < frame->nb_samples; i++) {
s->delay_buf_pos = (s->delay_buf_pos + s->max_samples - 1) % s->max_samples;
for (chan = 0; chan < inlink->channels; chan++) {
double *src = (double *)frame->extended_data[chan];
double *dst = (double *)out_frame->extended_data[chan];
double delayed_0, delayed_1;
double delayed;
double in, out;
int channel_phase = chan * s->lfo_length * s->channel_phase + .5;
double delay = s->lfo[(s->lfo_pos + channel_phase) % s->lfo_length];
int int_delay = (int)delay;
double frac_delay = modf(delay, &delay);
double *delay_buffer = (double *)s->delay_buffer[chan];
in = src[i];
delay_buffer[s->delay_buf_pos] = in + s->delay_last[chan] *
s->feedback_gain;
delayed_0 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
delayed_1 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
if (s->interpolation == INTERPOLATION_LINEAR) {
delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay;
} else {
double a, b;
double delayed_2 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
delayed_2 -= delayed_0;
delayed_1 -= delayed_0;
a = delayed_2 * .5 - delayed_1;
b = delayed_1 * 2 - delayed_2 *.5;
delayed = delayed_0 + (a * frac_delay + b) * frac_delay;
}
s->delay_last[chan] = delayed;
out = in * s->in_gain + delayed * s->delay_gain;
dst[i] = out;
}
s->lfo_pos = (s->lfo_pos + 1) % s->lfo_length;
}
if (frame != out_frame)
av_frame_free(&frame);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
示例11: decode_frame
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
ASV1Context * const a = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
AVFrame *picture = data;
AVFrame * const p = &a->picture;
int mb_x, mb_y, ret;
if (p->data[0])
avctx->release_buffer(avctx, p);
p->reference = 0;
if ((ret = ff_get_buffer(avctx, p)) < 0) {
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return ret;
}
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size,
buf_size);
if (!a->bitstream_buffer)
return AVERROR(ENOMEM);
if (avctx->codec_id == AV_CODEC_ID_ASV1)
a->dsp.bswap_buf((uint32_t*)a->bitstream_buffer, (const uint32_t*)buf, buf_size/4);
else {
int i;
for (i = 0; i < buf_size; i++)
a->bitstream_buffer[i] = ff_reverse[buf[i]];
}
init_get_bits(&a->gb, a->bitstream_buffer, buf_size*8);
for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
for (mb_x = 0; mb_x < a->mb_width2; mb_x++) {
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
idct_put(a, mb_x, mb_y);
}
}
if (a->mb_width2 != a->mb_width) {
mb_x = a->mb_width2;
for (mb_y = 0; mb_y < a->mb_height2; mb_y++) {
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
idct_put(a, mb_x, mb_y);
}
}
if (a->mb_height2 != a->mb_height) {
mb_y = a->mb_height2;
for (mb_x = 0; mb_x < a->mb_width; mb_x++) {
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
idct_put(a, mb_x, mb_y);
}
}
*picture = a->picture;
*got_frame = 1;
emms_c();
return (get_bits_count(&a->gb) + 31) / 32 * 4;
}
示例12: main
int main(int argc, char** argv) {
if (argc != 2)
fprintf(stderr, "usage: %s webm-file\n", argv[0]), exit(1);
char errmsg[512];
// Switch stdin to non-blocking IO to test out a non-blocking av_read_frame()
if ( fcntl(0, F_SETFL, fcntl(0, F_GETFL, NULL) | O_NONBLOCK) == -1 )
perror("fcntl"), exit(1);
av_register_all();
AVInputFormat* webm_fmt = av_find_input_format("webm");
AVFormatContext* demuxer = avformat_alloc_context();
demuxer->flags |= AVFMT_FLAG_NONBLOCK;
int error = avformat_open_input(&demuxer, argv[1], webm_fmt, NULL);
//int error = avformat_open_input(&demuxer, "pipe:0", webm_fmt, NULL);
if (error < 0)
fprintf(stderr, "avformat_open_input(): %s\n", av_make_error_string(errmsg, sizeof(errmsg), error)), exit(1);
printf("found %d streams:\n", demuxer->nb_streams);
for(size_t i = 0; i < demuxer->nb_streams; i++) {
AVStream* stream = demuxer->streams[i];
printf("%d: time base %d/%d, codec: %s, extradata: %p, %d bytes\n",
stream->index, stream->time_base.num, stream->time_base.den,
stream->codec->codec_name, stream->codec->extradata, stream->codec->extradata_size);
switch (stream->codec->codec_type) {
case AVMEDIA_TYPE_VIDEO:
printf(" video, w: %d, h: %d, sar: %d/%d, %dx%d\n",
stream->codec->width, stream->codec->height, stream->sample_aspect_ratio.num, stream->sample_aspect_ratio.den,
stream->codec->width * stream->sample_aspect_ratio.num / stream->sample_aspect_ratio.den, stream->codec->height);
break;
case AVMEDIA_TYPE_AUDIO:
printf(" audio, %d channels, sampel rate: %d, bits per sample: %d\n",
stream->codec->channels, stream->codec->sample_rate, stream->codec->bits_per_coded_sample);
break;
default:
break;
}
}
AVPacket packet;
int ret =0;
while (true) {
ret = av_read_frame(demuxer, &packet);
if (ret == AVERROR(EAGAIN)) {
printf("sleep\n");
struct timespec duration = {0, 250 * 1000000};
nanosleep(&duration, NULL);
continue;
} else if (ret != 0) {
break;
}
if (packet.flags & AV_PKT_FLAG_KEY && packet.stream_index == 0)
printf("keyframe: stream %d, pts: %lu, dts: %lu, duration: %d, buf: %p\n", packet.stream_index, packet.pts, packet.dts, packet.duration, packet.buf);
av_free_packet(&packet);
}
avformat_close_input(&demuxer);
return 0;
}
示例13: get_siz
/** get sizes and offsets of image, tiles; number of components */
static int get_siz(J2kDecoderContext *s)
{
int i, ret;
if (bytestream2_get_bytes_left(&s->g) < 36)
return AVERROR(EINVAL);
bytestream2_get_be16u(&s->g); // Rsiz (skipped)
s->width = bytestream2_get_be32u(&s->g); // width
s->height = bytestream2_get_be32u(&s->g); // height
s->image_offset_x = bytestream2_get_be32u(&s->g); // X0Siz
s->image_offset_y = bytestream2_get_be32u(&s->g); // Y0Siz
s->tile_width = bytestream2_get_be32u(&s->g); // XTSiz
s->tile_height = bytestream2_get_be32u(&s->g); // YTSiz
s->tile_offset_x = bytestream2_get_be32u(&s->g); // XT0Siz
s->tile_offset_y = bytestream2_get_be32u(&s->g); // YT0Siz
s->ncomponents = bytestream2_get_be16u(&s->g); // CSiz
if(s->tile_width<=0 || s->tile_height<=0)
return AVERROR(EINVAL);
if (bytestream2_get_bytes_left(&s->g) < 3 * s->ncomponents)
return AVERROR(EINVAL);
for (i = 0; i < s->ncomponents; i++){ // Ssiz_i XRsiz_i, YRsiz_i
uint8_t x = bytestream2_get_byteu(&s->g);
s->cbps[i] = (x & 0x7f) + 1;
s->precision = FFMAX(s->cbps[i], s->precision);
s->sgnd[i] = !!(x & 0x80);
s->cdx[i] = bytestream2_get_byteu(&s->g);
s->cdy[i] = bytestream2_get_byteu(&s->g);
}
s->numXtiles = ff_j2k_ceildiv(s->width - s->tile_offset_x, s->tile_width);
s->numYtiles = ff_j2k_ceildiv(s->height - s->tile_offset_y, s->tile_height);
if(s->numXtiles * (uint64_t)s->numYtiles > INT_MAX/sizeof(J2kTile))
return AVERROR(EINVAL);
s->tile = av_mallocz(s->numXtiles * s->numYtiles * sizeof(J2kTile));
if (!s->tile)
return AVERROR(ENOMEM);
for (i = 0; i < s->numXtiles * s->numYtiles; i++){
J2kTile *tile = s->tile + i;
tile->comp = av_mallocz(s->ncomponents * sizeof(J2kComponent));
if (!tile->comp)
return AVERROR(ENOMEM);
}
s->avctx->width = s->width - s->image_offset_x;
s->avctx->height = s->height - s->image_offset_y;
switch(s->ncomponents){
case 1:
if (s->precision > 8) {
s->avctx->pix_fmt = PIX_FMT_GRAY16;
} else {
s->avctx->pix_fmt = PIX_FMT_GRAY8;
}
break;
case 3:
if (s->precision > 8) {
s->avctx->pix_fmt = PIX_FMT_RGB48;
} else {
s->avctx->pix_fmt = PIX_FMT_RGB24;
}
break;
case 4:
s->avctx->pix_fmt = PIX_FMT_RGBA;
break;
}
if (s->picture.data[0])
s->avctx->release_buffer(s->avctx, &s->picture);
if ((ret = s->avctx->get_buffer(s->avctx, &s->picture)) < 0)
return ret;
s->picture.pict_type = AV_PICTURE_TYPE_I;
s->picture.key_frame = 1;
return 0;
}
示例14: read_frame
static int read_frame(BVID_DemuxContext *vid, ByteIOContext *pb, AVPacket *pkt,
uint8_t block_type, AVFormatContext *s, int npixels)
{
uint8_t * vidbuf_start = NULL;
int vidbuf_nbytes = 0;
int code;
int bytes_copied = 0;
int position, delay;
unsigned int vidbuf_capacity;
vidbuf_start = av_malloc(vidbuf_capacity = BUFFER_PADDING_SIZE);
if(!vidbuf_start)
return AVERROR(ENOMEM);
// save the file position for the packet, include block type
position = url_ftell(pb) - 1;
vidbuf_start[vidbuf_nbytes++] = block_type;
// get the video delay (next int16), and set the presentation time
delay = get_le16(pb);
// set the y offset if it exists (decoder header data should be in data section)
if(block_type == VIDEO_YOFF_P_FRAME){
if(get_buffer(pb, &vidbuf_start[vidbuf_nbytes], 2) != 2)
goto fail;
vidbuf_nbytes += 2;
}
do{
vidbuf_start = av_fast_realloc(vidbuf_start, &vidbuf_capacity, vidbuf_nbytes + BUFFER_PADDING_SIZE);
if(!vidbuf_start)
return AVERROR(ENOMEM);
code = get_byte(pb);
vidbuf_start[vidbuf_nbytes++] = code;
if(code >= 0x80){ // rle sequence
if(block_type == VIDEO_I_FRAME)
vidbuf_start[vidbuf_nbytes++] = get_byte(pb);
} else if(code){ // plain sequence
if(get_buffer(pb, &vidbuf_start[vidbuf_nbytes], code) != code)
goto fail;
vidbuf_nbytes += code;
}
bytes_copied += code & 0x7F;
if(bytes_copied == npixels){ // sometimes no stop character is given, need to keep track of bytes copied
// may contain a 0 byte even if read all pixels
if(get_byte(pb))
url_fseek(pb, -1, SEEK_CUR);
break;
}
if(bytes_copied > npixels)
goto fail;
} while(code);
// copy data into packet
if(av_new_packet(pkt, vidbuf_nbytes) < 0)
goto fail;
memcpy(pkt->data, vidbuf_start, vidbuf_nbytes);
av_free(vidbuf_start);
pkt->pos = position;
pkt->stream_index = 0; // use the video decoder, which was initialized as the first stream
pkt->pts = vid->video_pts;
vid->video_pts += vid->bethsoft_global_delay + delay;
vid->nframes--; // used to check if all the frames were read
return vidbuf_nbytes;
fail:
av_free(vidbuf_start);
return -1;
}
示例15: ff_jni_init_jfields
int ff_jni_init_jfields(JNIEnv *env, void *jfields, const struct FFJniField *jfields_mapping, int global, void *log_ctx)
{
int i, ret = 0;
jclass last_clazz = NULL;
for (i = 0; jfields_mapping[i].name; i++) {
int mandatory = jfields_mapping[i].mandatory;
enum FFJniFieldType type = jfields_mapping[i].type;
if (type == FF_JNI_CLASS) {
jclass clazz;
last_clazz = NULL;
clazz = (*env)->FindClass(env, jfields_mapping[i].name);
if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
goto done;
}
last_clazz = *(jclass*)((uint8_t*)jfields + jfields_mapping[i].offset) =
global ? (*env)->NewGlobalRef(env, clazz) : clazz;
} else {
if (!last_clazz) {
ret = AVERROR_EXTERNAL;
break;
}
switch(type) {
case FF_JNI_FIELD: {
jfieldID field_id = (*env)->GetFieldID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature);
if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
goto done;
}
*(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = field_id;
break;
}
case FF_JNI_STATIC_FIELD: {
jfieldID field_id = (*env)->GetStaticFieldID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature);
if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
goto done;
}
*(jfieldID*)((uint8_t*)jfields + jfields_mapping[i].offset) = field_id;
break;
}
case FF_JNI_METHOD: {
jmethodID method_id = (*env)->GetMethodID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature);
if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
goto done;
}
*(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = method_id;
break;
}
case FF_JNI_STATIC_METHOD: {
jmethodID method_id = (*env)->GetStaticMethodID(env, last_clazz, jfields_mapping[i].method, jfields_mapping[i].signature);
if ((ret = ff_jni_exception_check(env, mandatory, log_ctx)) < 0 && mandatory) {
goto done;
}
*(jmethodID*)((uint8_t*)jfields + jfields_mapping[i].offset) = method_id;
break;
}
default:
av_log(log_ctx, AV_LOG_ERROR, "Unknown JNI field type\n");
ret = AVERROR(EINVAL);
goto done;
}
}
}
done:
if (ret < 0) {
/* reset jfields in case of failure so it does not leak references */
ff_jni_reset_jfields(env, jfields, jfields_mapping, global, log_ctx);
}
return ret;
}