当前位置: 首页>>代码示例>>C++>>正文


C++ FFABS函数代码示例

本文整理汇总了C++中FFABS函数的典型用法代码示例。如果您正苦于以下问题:C++ FFABS函数的具体用法?C++ FFABS怎么用?C++ FFABS使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了FFABS函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: build_ordered_chapter_timeline

void build_ordered_chapter_timeline(struct MPContext *mpctx)
{
    struct MPOpts *opts = mpctx->opts;

    if (!opts->ordered_chapters) {
        mp_msg(MSGT_CPLAYER, MSGL_INFO, "File uses ordered chapters, but "
               "you have disabled support for them. Ignoring.\n");
        return;
    }

    mp_msg(MSGT_CPLAYER, MSGL_INFO, "File uses ordered chapters, will build "
           "edit timeline.\n");

    struct demuxer *demuxer = mpctx->demuxer;
    struct matroska_data *m = &demuxer->matroska_data;

    // +1 because sources/uid_map[0] is original file even if all chapters
    // actually use other sources and need separate entries
    struct demuxer **sources = talloc_array_ptrtype(NULL, sources,
                                                    m->num_ordered_chapters+1);
    sources[0] = mpctx->demuxer;
    unsigned char (*uid_map)[16] = talloc_array_ptrtype(NULL, uid_map,
                                                 m->num_ordered_chapters + 1);
    int num_sources = 1;
    memcpy(uid_map[0], m->segment_uid, 16);

    for (int i = 0; i < m->num_ordered_chapters; i++) {
        struct matroska_chapter *c = m->ordered_chapters + i;
        if (!c->has_segment_uid)
            memcpy(c->segment_uid, m->segment_uid, 16);

        for (int j = 0; j < num_sources; j++)
            if (!memcmp(c->segment_uid, uid_map[j], 16))
                goto found1;
        memcpy(uid_map[num_sources], c->segment_uid, 16);
        sources[num_sources] = NULL;
        num_sources++;
    found1:
        ;
    }

    num_sources = find_ordered_chapter_sources(mpctx, sources, num_sources,
                                               uid_map);


    // +1 for terminating chapter with start time marking end of last real one
    struct timeline_part *timeline = talloc_array_ptrtype(NULL, timeline,
                                                  m->num_ordered_chapters + 1);
    struct chapter *chapters = talloc_array_ptrtype(NULL, chapters,
                                                    m->num_ordered_chapters);
    uint64_t starttime = 0;
    uint64_t missing_time = 0;
    int part_count = 0;
    int num_chapters = 0;
    uint64_t prev_part_offset = 0;
    for (int i = 0; i < m->num_ordered_chapters; i++) {
        struct matroska_chapter *c = m->ordered_chapters + i;

        int j;
        for (j = 0; j < num_sources; j++) {
            if (!memcmp(c->segment_uid, uid_map[j], 16))
                goto found2;
        }
        missing_time += c->end - c->start;
        continue;
    found2:;
        /* Only add a separate part if the time or file actually changes.
         * Matroska files have chapter divisions that are redundant from
         * timeline point of view because the same chapter structure is used
         * both to specify the timeline and for normal chapter information.
         * Removing a missing inserted external chapter can also cause this.
         * We allow for a configurable fudge factor because of files which
         * specify chapter end times that are one frame too early;
         * we don't want to try seeking over a one frame gap. */
        int64_t join_diff = c->start - starttime - prev_part_offset;
        if (part_count == 0
            || FFABS(join_diff) > opts->chapter_merge_threshold * 1000000
            || sources[j] != timeline[part_count - 1].source) {
            timeline[part_count].source = sources[j];
            timeline[part_count].start = starttime / 1e9;
            timeline[part_count].source_start = c->start / 1e9;
            prev_part_offset = c->start - starttime;
            part_count++;
        } else if (part_count > 0 && join_diff) {
            /* Chapter was merged at an inexact boundary;
             * adjust timestamps to match. */
            mp_msg(MSGT_CPLAYER, MSGL_V, "Merging timeline part %d with "
                   "offset %g ms.\n", i, join_diff / 1e6);
            starttime += join_diff;
        }
        chapters[num_chapters].start = starttime / 1e9;
        chapters[num_chapters].name = talloc_strdup(chapters, c->name);
        starttime += c->end - c->start;
        num_chapters++;
    }
    timeline[part_count].start = starttime / 1e9;
    talloc_free(uid_map);

    if (!part_count) {
        // None of the parts come from the file itself???
//.........这里部分代码省略.........
开发者ID:ArcherSeven,项目名称:mpv,代码行数:101,代码来源:tl_matroska.c

示例2: mpeg1_encode_sequence_header

/* put sequence header if needed */
static void mpeg1_encode_sequence_header(MpegEncContext *s)
{
        unsigned int vbv_buffer_size;
        unsigned int fps, v;
        int i;
        uint64_t time_code;
        float best_aspect_error= 1E10;
        float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio);
        int constraint_parameter_flag;

        if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)

        if (s->current_picture.f.key_frame) {
            AVRational framerate = ff_mpeg12_frame_rate_tab[s->frame_rate_index];

            /* mpeg1 header repeated every gop */
            put_header(s, SEQ_START_CODE);

            put_sbits(&s->pb, 12, s->width  & 0xFFF);
            put_sbits(&s->pb, 12, s->height & 0xFFF);

            for(i=1; i<15; i++){
                float error= aspect_ratio;
                if(s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <=1)
                    error-= 1.0/ff_mpeg1_aspect[i];
                else
                    error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width;

                error= FFABS(error);

                if(error < best_aspect_error){
                    best_aspect_error= error;
                    s->aspect_ratio_info= i;
                }
            }

            put_bits(&s->pb, 4, s->aspect_ratio_info);
            put_bits(&s->pb, 4, s->frame_rate_index);

            if(s->avctx->rc_max_rate){
                v = (s->avctx->rc_max_rate + 399) / 400;
                if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO)
                    v = 0x3ffff;
            }else{
                v= 0x3FFFF;
            }

            if(s->avctx->rc_buffer_size)
                vbv_buffer_size = s->avctx->rc_buffer_size;
            else
                /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */
                vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;
            vbv_buffer_size= (vbv_buffer_size + 16383) / 16384;

            put_sbits(&s->pb, 18, v);
            put_bits(&s->pb, 1, 1); /* marker */
            put_sbits(&s->pb, 10, vbv_buffer_size);

            constraint_parameter_flag=
                s->width <= 768 && s->height <= 576 &&
                s->mb_width * s->mb_height <= 396 &&
                s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 &&
                framerate.num <= framerate.den*30 &&
                s->avctx->me_range && s->avctx->me_range < 128 &&
                vbv_buffer_size <= 20 &&
                v <= 1856000/400 &&
                s->codec_id == AV_CODEC_ID_MPEG1VIDEO;

            put_bits(&s->pb, 1, constraint_parameter_flag);

            ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
            ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);

            if(s->codec_id == AV_CODEC_ID_MPEG2VIDEO){
                put_header(s, EXT_START_CODE);
                put_bits(&s->pb, 4, 1); //seq ext

                put_bits(&s->pb, 1, s->avctx->profile == 0); //escx 1 for 4:2:2 profile */

                put_bits(&s->pb, 3, s->avctx->profile); //profile
                put_bits(&s->pb, 4, s->avctx->level); //level

                put_bits(&s->pb, 1, s->progressive_sequence);
                put_bits(&s->pb, 2, s->chroma_format);
                put_bits(&s->pb, 2, s->width >>12);
                put_bits(&s->pb, 2, s->height>>12);
                put_bits(&s->pb, 12, v>>18); //bitrate ext
                put_bits(&s->pb, 1, 1); //marker
                put_bits(&s->pb, 8, vbv_buffer_size >>10); //vbv buffer ext
                put_bits(&s->pb, 1, s->low_delay);
                put_bits(&s->pb, 2, s->mpeg2_frame_rate_ext.num-1); // frame_rate_ext_n
                put_bits(&s->pb, 5, s->mpeg2_frame_rate_ext.den-1); // frame_rate_ext_d
            }
开发者ID:Armada651,项目名称:FFmpeg,代码行数:94,代码来源:mpeg12enc.c

示例3: search_for_quantizers_faac


//.........这里部分代码省略.........
                    if (sce->ics.num_windows == 1 && maxval < t) {
                        maxval  = t;
                        peakpos = start+i;
                    }
                }
            }
            if (sce->ics.num_windows == 1) {
                start2 = FFMAX(peakpos - 2, start2);
                end2   = FFMIN(peakpos + 3, end2);
            } else {
                start2 -= start;
                end2   -= start;
            }
            start += size;
            thr = pow(thr / (avg_energy * (end2 - start2)), 0.3 + 0.1*(lastband - g) / lastband);
            t   = 1.0 - (1.0 * start2 / last);
            uplim[w*16+g] = distfact / (1.4 * thr + t*t*t + 0.075);
        }
    }
    memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
    abs_pow34_v(s->scoefs, sce->coeffs, 1024);
    for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
        start = w*128;
        for (g = 0;  g < sce->ics.num_swb; g++) {
            const float *coefs  = sce->coeffs + start;
            const float *scaled = s->scoefs   + start;
            const int size      = sce->ics.swb_sizes[g];
            int scf, prev_scf, step;
            int min_scf = -1, max_scf = 256;
            float curdiff;
            if (maxq[w*16+g] < 21.544) {
                sce->zeroes[w*16+g] = 1;
                start += size;
                continue;
            }
            sce->zeroes[w*16+g] = 0;
            scf  = prev_scf = av_clip(SCALE_ONE_POS - SCALE_DIV_512 - log2f(1/maxq[w*16+g])*16/3, 60, 218);
            step = 16;
            for (;;) {
                float dist = 0.0f;
                int quant_max;

                for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
                    int b;
                    dist += quantize_band_cost(s, coefs + w2*128,
                                               scaled + w2*128,
                                               sce->ics.swb_sizes[g],
                                               scf,
                                               ESC_BT,
                                               lambda,
                                               INFINITY,
                                               &b);
                    dist -= b;
                }
                dist *= 1.0f / 512.0f / lambda;
                quant_max = quant(maxq[w*16+g], ff_aac_pow2sf_tab[POW_SF2_ZERO - scf + SCALE_ONE_POS - SCALE_DIV_512]);
                if (quant_max >= 8191) { // too much, return to the previous quantizer
                    sce->sf_idx[w*16+g] = prev_scf;
                    break;
                }
                prev_scf = scf;
                curdiff = fabsf(dist - uplim[w*16+g]);
                if (curdiff <= 1.0f)
                    step = 0;
                else
                    step = log2f(curdiff);
                if (dist > uplim[w*16+g])
                    step = -step;
                scf += step;
                scf = av_clip_uint8(scf);
                step = scf - prev_scf;
                if (FFABS(step) <= 1 || (step > 0 && scf >= max_scf) || (step < 0 && scf <= min_scf)) {
                    sce->sf_idx[w*16+g] = av_clip(scf, min_scf, max_scf);
                    break;
                }
                if (step > 0)
                    min_scf = prev_scf;
                else
                    max_scf = prev_scf;
            }
            start += size;
        }
    }
    minq = sce->sf_idx[0] ? sce->sf_idx[0] : INT_MAX;
    for (i = 1; i < 128; i++) {
        if (!sce->sf_idx[i])
            sce->sf_idx[i] = sce->sf_idx[i-1];
        else
            minq = FFMIN(minq, sce->sf_idx[i]);
    }
    if (minq == INT_MAX)
        minq = 0;
    minq = FFMIN(minq, SCALE_MAX_POS);
    maxsf = FFMIN(minq + SCALE_MAX_DIFF, SCALE_MAX_POS);
    for (i = 126; i >= 0; i--) {
        if (!sce->sf_idx[i])
            sce->sf_idx[i] = sce->sf_idx[i+1];
        sce->sf_idx[i] = av_clip(sce->sf_idx[i], minq, maxsf);
    }
}
开发者ID:Arcen,项目名称:FFmpeg,代码行数:101,代码来源:aaccoder.c

示例4: mp3_write_xing

/*
 * Write an empty XING header and initialize respective data.
 */
static void mp3_write_xing(AVFormatContext *s)
{
    MP3Context       *mp3 = s->priv_data;
    AVCodecContext *codec = s->streams[mp3->audio_stream_idx]->codec;
    AVDictionaryEntry *enc = av_dict_get(s->streams[mp3->audio_stream_idx]->metadata, "encoder", NULL, 0);
    AVIOContext *dyn_ctx;
    int32_t        header;
    MPADecodeHeader  mpah;
    int srate_idx, i, channels;
    int bitrate_idx;
    int best_bitrate_idx;
    int best_bitrate_error = INT_MAX;
    int ret;
    int ver = 0;
    int lsf, bytes_needed;

    if (!s->pb->seekable || !mp3->write_xing)
        return;

    for (i = 0; i < FF_ARRAY_ELEMS(avpriv_mpa_freq_tab); i++) {
        const uint16_t base_freq = avpriv_mpa_freq_tab[i];

        if      (codec->sample_rate == base_freq)     ver = 0x3; // MPEG 1
        else if (codec->sample_rate == base_freq / 2) ver = 0x2; // MPEG 2
        else if (codec->sample_rate == base_freq / 4) ver = 0x0; // MPEG 2.5
        else continue;

        srate_idx = i;
        break;
    }
    if (i == FF_ARRAY_ELEMS(avpriv_mpa_freq_tab)) {
        av_log(s, AV_LOG_WARNING, "Unsupported sample rate, not writing Xing "
               "header.\n");
        return;
    }

    switch (codec->channels) {
    case 1:  channels = MPA_MONO;                                          break;
    case 2:  channels = MPA_STEREO;                                        break;
    default: av_log(s, AV_LOG_WARNING, "Unsupported number of channels, "
                    "not writing Xing header.\n");
             return;
    }

    /* dummy MPEG audio header */
    header  =  0xff                                  << 24; // sync
    header |= (0x7 << 5 | ver << 3 | 0x1 << 1 | 0x1) << 16; // sync/audio-version/layer 3/no crc*/
    header |= (srate_idx << 2) << 8;
    header |= channels << 6;

    lsf = !((header & (1 << 20) && header & (1 << 19)));

    mp3->xing_offset = xing_offtbl[ver != 3][channels == 1] + 4;
    bytes_needed     = mp3->xing_offset + XING_SIZE;

    for (bitrate_idx = 1; bitrate_idx < 15; bitrate_idx++) {
        int bit_rate = 1000 * avpriv_mpa_bitrate_tab[lsf][3 - 1][bitrate_idx];
        int error    = FFABS(bit_rate - codec->bit_rate);

        if (error < best_bitrate_error){
            best_bitrate_error = error;
            best_bitrate_idx   = bitrate_idx;
        }
    }

    for (bitrate_idx = best_bitrate_idx; bitrate_idx < 15; bitrate_idx++) {
        int32_t mask = bitrate_idx << (4 + 8);
        header |= mask;

        avpriv_mpegaudio_decode_header(&mpah, header);

        if (bytes_needed <= mpah.frame_size)
            break;

        header &= ~mask;
    }

    ret = avio_open_dyn_buf(&dyn_ctx);
    if (ret < 0)
        return;

    avio_wb32(dyn_ctx, header);

    avpriv_mpegaudio_decode_header(&mpah, header);

    av_assert0(mpah.frame_size >= bytes_needed);

    ffio_fill(dyn_ctx, 0, mp3->xing_offset - 4);
    ffio_wfourcc(dyn_ctx, "Xing");
    avio_wb32(dyn_ctx, 0x01 | 0x02 | 0x04 | 0x08);  // frames / size / TOC / vbr scale

    mp3->size = mpah.frame_size;
    mp3->want = 1;

    avio_wb32(dyn_ctx, 0);  // frames
    avio_wb32(dyn_ctx, 0);  // size

//.........这里部分代码省略.........
开发者ID:founderznd,项目名称:libav,代码行数:101,代码来源:mp3enc.c

示例5: ff_h263_encode_picture_header

void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
{
    int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
    int best_clock_code=1;
    int best_divisor=60;
    int best_error= INT_MAX;

    if(s->h263_plus){
        for(i=0; i<2; i++){
            int div, error;
            div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den);
            div= av_clip(div, 1, 127);
            error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div);
            if(error < best_error){
                best_error= error;
                best_divisor= div;
                best_clock_code= i;
            }
        }
    }
    s->custom_pcf= best_clock_code!=1 || best_divisor!=60;
    coded_frame_rate= 1800000;
    coded_frame_rate_base= (1000+best_clock_code)*best_divisor;

    avpriv_align_put_bits(&s->pb);

    /* Update the pointer to last GOB */
    s->ptr_lastgob = put_bits_ptr(&s->pb);
    put_bits(&s->pb, 22, 0x20); /* PSC */
    temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->time_base.num / //FIXME use timestamp
                         (coded_frame_rate_base * (int64_t)s->avctx->time_base.den);
    put_sbits(&s->pb, 8, temp_ref); /* TemporalReference */

    put_bits(&s->pb, 1, 1);     /* marker */
    put_bits(&s->pb, 1, 0);     /* h263 id */
    put_bits(&s->pb, 1, 0);     /* split screen off */
    put_bits(&s->pb, 1, 0);     /* camera  off */
    put_bits(&s->pb, 1, 0);     /* freeze picture release off */

    format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height);
    if (!s->h263_plus) {
        /* H.263v1 */
        put_bits(&s->pb, 3, format);
        put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P));
        /* By now UMV IS DISABLED ON H.263v1, since the restrictions
        of H.263v1 UMV implies to check the predicted MV after
        calculation of the current MB to see if we're on the limits */
        put_bits(&s->pb, 1, 0);         /* Unrestricted Motion Vector: off */
        put_bits(&s->pb, 1, 0);         /* SAC: off */
        put_bits(&s->pb, 1, s->obmc);   /* Advanced Prediction */
        put_bits(&s->pb, 1, 0);         /* only I/P frames, no PB frame */
        put_bits(&s->pb, 5, s->qscale);
        put_bits(&s->pb, 1, 0);         /* Continuous Presence Multipoint mode: off */
    } else {
        int ufep=1;
        /* H.263v2 */
        /* H.263 Plus PTYPE */

        put_bits(&s->pb, 3, 7);
        put_bits(&s->pb,3,ufep); /* Update Full Extended PTYPE */
        if (format == 8)
            put_bits(&s->pb,3,6); /* Custom Source Format */
        else
            put_bits(&s->pb, 3, format);

        put_bits(&s->pb,1, s->custom_pcf);
        put_bits(&s->pb,1, s->umvplus); /* Unrestricted Motion Vector */
        put_bits(&s->pb,1,0); /* SAC: off */
        put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */
        put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */
        put_bits(&s->pb,1,s->loop_filter); /* Deblocking Filter */
        put_bits(&s->pb,1,s->h263_slice_structured); /* Slice Structured */
        put_bits(&s->pb,1,0); /* Reference Picture Selection: off */
        put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */
        put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */
        put_bits(&s->pb,1,s->modified_quant); /* Modified Quantization: */
        put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
        put_bits(&s->pb,3,0); /* Reserved */

        put_bits(&s->pb, 3, s->pict_type == AV_PICTURE_TYPE_P);

        put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */
        put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */
        put_bits(&s->pb,1,s->no_rounding); /* Rounding Type */
        put_bits(&s->pb,2,0); /* Reserved */
        put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */

        /* This should be here if PLUSPTYPE */
        put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */

        if (format == 8) {
            /* Custom Picture Format (CPFMT) */
            s->aspect_ratio_info= ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio);

            put_bits(&s->pb,4,s->aspect_ratio_info);
            put_bits(&s->pb,9,(s->width >> 2) - 1);
            put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
            put_bits(&s->pb,9,(s->height >> 2));
            if (s->aspect_ratio_info == FF_ASPECT_EXTENDED){
                put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num);
//.........这里部分代码省略.........
开发者ID:xkfz007,项目名称:libav,代码行数:101,代码来源:ituh263enc.c

示例6: decode_frame

static int decode_frame(AVCodecContext *avctx,
                            void *data, int *got_frame,
                            AVPacket *avpkt)
{
    AnsiContext *s = avctx->priv_data;
    uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    const uint8_t *buf_end   = buf+buf_size;
    int ret, i, count;

    ret = ff_reget_buffer(avctx, s->frame);
    if (ret < 0){
        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
        return ret;
    }
    if (!avctx->frame_number) {
        memset(s->frame->data[0], 0, avctx->height * FFABS(s->frame->linesize[0]));
        memset(s->frame->data[1], 0, AVPALETTE_SIZE);
    }

    s->frame->pict_type           = AV_PICTURE_TYPE_I;
    s->frame->palette_has_changed = 1;
    memcpy(s->frame->data[1], ff_cga_palette, 16 * 4);

    while(buf < buf_end) {
        switch(s->state) {
        case STATE_NORMAL:
            switch (buf[0]) {
            case 0x00: //NUL
            case 0x07: //BEL
            case 0x1A: //SUB
                /* ignore */
                break;
            case 0x08: //BS
                s->x = FFMAX(s->x - 1, 0);
                break;
            case 0x09: //HT
                i = s->x / FONT_WIDTH;
                count = ((i + 8) & ~7) - i;
                for (i = 0; i < count; i++)
                    draw_char(avctx, ' ');
                break;
            case 0x0A: //LF
                hscroll(avctx);
            case 0x0D: //CR
                s->x = 0;
                break;
            case 0x0C: //FF
                erase_screen(avctx);
                break;
            case 0x1B: //ESC
                s->state = STATE_ESCAPE;
                break;
            default:
                draw_char(avctx, buf[0]);
            }
            break;
        case STATE_ESCAPE:
            if (buf[0] == '[') {
                s->state   = STATE_CODE;
                s->nb_args = 0;
                s->args[0] = 0;
            } else {
                s->state = STATE_NORMAL;
                draw_char(avctx, 0x1B);
                continue;
            }
            break;
        case STATE_CODE:
            switch(buf[0]) {
            case '0': case '1': case '2': case '3': case '4':
            case '5': case '6': case '7': case '8': case '9':
                if (s->nb_args < MAX_NB_ARGS)
                    s->args[s->nb_args] = s->args[s->nb_args] * 10 + buf[0] - '0';
                break;
            case ';':
                s->nb_args++;
                if (s->nb_args < MAX_NB_ARGS)
                    s->args[s->nb_args] = 0;
                break;
            case 'M':
                s->state = STATE_MUSIC_PREAMBLE;
                break;
            case '=': case '?':
                /* ignore */
                break;
            default:
                if (s->nb_args > MAX_NB_ARGS)
                    av_log(avctx, AV_LOG_WARNING, "args overflow (%i)\n", s->nb_args);
                if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args])
                    s->nb_args++;
                if ((ret = execute_code(avctx, buf[0])) < 0)
                    return ret;
                s->state = STATE_NORMAL;
            }
            break;
        case STATE_MUSIC_PREAMBLE:
            if (buf[0] == 0x0E || buf[0] == 0x1B)
                s->state = STATE_NORMAL;
            /* ignore music data */
//.........这里部分代码省略.........
开发者ID:AVLeo,项目名称:libav,代码行数:101,代码来源:ansi.c

示例7: decode_frame

static int decode_frame(AVCodecContext *avctx, void *data,
                        int *got_frame, AVPacket *avpkt)
{
    const uint8_t *buf = avpkt->data;
    int buf_size = avpkt->size;
    C93DecoderContext * const c93 = avctx->priv_data;
    AVFrame * const newpic = c93->pictures[c93->currentpic];
    AVFrame * const oldpic = c93->pictures[c93->currentpic^1];
    GetByteContext gb;
    uint8_t *out;
    int stride, ret, i, x, y, b, bt = 0;

    if ((ret = ff_set_dimensions(avctx, WIDTH, HEIGHT)) < 0)
        return ret;

    c93->currentpic ^= 1;

    if ((ret = ff_reget_buffer(avctx, newpic)) < 0)
        return ret;

    stride = newpic->linesize[0];

    bytestream2_init(&gb, buf, buf_size);
    b = bytestream2_get_byte(&gb);
    if (b & C93_FIRST_FRAME) {
        newpic->pict_type = AV_PICTURE_TYPE_I;
        newpic->key_frame = 1;
    } else {
        newpic->pict_type = AV_PICTURE_TYPE_P;
        newpic->key_frame = 0;
    }

    for (y = 0; y < HEIGHT; y += 8) {
        out = newpic->data[0] + y * stride;
        for (x = 0; x < WIDTH; x += 8) {
            uint8_t *copy_from = oldpic->data[0];
            unsigned int offset, j;
            uint8_t cols[4], grps[4];
            C93BlockType block_type;

            if (!bt)
                bt = bytestream2_get_byte(&gb);

            block_type= bt & 0x0F;
            switch (block_type) {
            case C93_8X8_FROM_PREV:
                offset = bytestream2_get_le16(&gb);
                if ((ret = copy_block(avctx, out, copy_from, offset, 8, stride)) < 0)
                    return ret;
                break;

            case C93_4X4_FROM_CURR:
                copy_from = newpic->data[0];
            case C93_4X4_FROM_PREV:
                for (j = 0; j < 8; j += 4) {
                    for (i = 0; i < 8; i += 4) {
                        int offset = bytestream2_get_le16(&gb);
                        int from_x = offset % WIDTH;
                        int from_y = offset / WIDTH;
                        if (block_type == C93_4X4_FROM_CURR && from_y == y+j &&
                            (FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) {
                            avpriv_request_sample(avctx, "block overlap %d %d %d %d\n", from_x, x+i, from_y, y+j);
                            return AVERROR_INVALIDDATA;
                        }
                        if ((ret = copy_block(avctx, &out[j*stride+i],
                                              copy_from, offset, 4, stride)) < 0)
                            return ret;
                    }
                }
                break;

            case C93_8X8_2COLOR:
                bytestream2_get_buffer(&gb, cols, 2);
                for (i = 0; i < 8; i++) {
                    draw_n_color(out + i*stride, stride, 8, 1, 1, cols,
                                     NULL, bytestream2_get_byte(&gb));
                }

                break;

            case C93_4X4_2COLOR:
            case C93_4X4_4COLOR:
            case C93_4X4_4COLOR_GRP:
                for (j = 0; j < 8; j += 4) {
                    for (i = 0; i < 8; i += 4) {
                        if (block_type == C93_4X4_2COLOR) {
                            bytestream2_get_buffer(&gb, cols, 2);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    1, cols, NULL, bytestream2_get_le16(&gb));
                        } else if (block_type == C93_4X4_4COLOR) {
                            bytestream2_get_buffer(&gb, cols, 4);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    2, cols, NULL, bytestream2_get_le32(&gb));
                        } else {
                            bytestream2_get_buffer(&gb, grps, 4);
                            draw_n_color(out + i + j*stride, stride, 4, 4,
                                    1, cols, grps, bytestream2_get_le16(&gb));
                        }
                    }
                }
//.........这里部分代码省略.........
开发者ID:markjreed,项目名称:vice-emu,代码行数:101,代码来源:c93.c

示例8: filter_slice16

static int filter_slice16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
    ATADenoiseContext *s = ctx->priv;
    ThreadData *td = arg;
    AVFrame *in = td->in;
    AVFrame *out = td->out;
    const int size = s->size;
    const int mid = s->mid;
    int p, x, y, i, j;

    for (p = 0; p < s->nb_planes; p++) {
        const int h = s->planeheight[p];
        const int w = s->planewidth[p];
        const int slice_start = (h * jobnr) / nb_jobs;
        const int slice_end = (h * (jobnr+1)) / nb_jobs;
        const uint16_t *src = (uint16_t *)(in->data[p] + slice_start * in->linesize[p]);
        uint16_t *dst = (uint16_t *)(out->data[p] + slice_start * out->linesize[p]);
        const int thra = s->thra[p];
        const int thrb = s->thrb[p];
        const uint8_t **data = (const uint8_t **)s->data[p];
        const int *linesize = (const int *)s->linesize[p];
        const uint16_t *srcf[SIZE];

        if (!((1 << p) & s->planes)) {
            av_image_copy_plane((uint8_t *)dst, out->linesize[p], (uint8_t *)src, in->linesize[p],
                                w * 2, slice_end - slice_start);
            continue;
        }

        for (i = 0; i < s->size; i++)
            srcf[i] = (const uint16_t *)(data[i] + slice_start * linesize[i]);

        for (y = slice_start; y < slice_end; y++) {
            for (x = 0; x < w; x++) {
                const int srcx = src[x];
                unsigned lsumdiff = 0, rsumdiff = 0;
                unsigned ldiff, rdiff;
                unsigned sum = srcx;
                int l = 0, r = 0;
                int srcjx, srcix;

                for (j = mid - 1, i = mid + 1; j >= 0 && i < size; j--, i++) {
                    srcjx = srcf[j][x];

                    ldiff = FFABS(srcx - srcjx);
                    lsumdiff += ldiff;
                    if (ldiff > thra ||
                        lsumdiff > thrb)
                        break;
                    l++;
                    sum += srcjx;

                    srcix = srcf[i][x];

                    rdiff = FFABS(srcx - srcix);
                    rsumdiff += rdiff;
                    if (rdiff > thra ||
                        rsumdiff > thrb)
                        break;
                    r++;
                    sum += srcix;
                }

                dst[x] = sum / (r + l + 1);
            }

            dst += out->linesize[p] / 2;
            src += in->linesize[p] / 2;

            for (i = 0; i < size; i++)
                srcf[i] += linesize[i] / 2;
        }
    }

    return 0;
}
开发者ID:Diagonactic,项目名称:plex-new-transcoder,代码行数:76,代码来源:vf_atadenoise.c

示例9: mpeg1_encode_sequence_header

/* put sequence header if needed */
static void mpeg1_encode_sequence_header(MpegEncContext *s)
{
        unsigned int vbv_buffer_size;
        unsigned int fps, v;
        int i;
        uint64_t time_code;
        float best_aspect_error= 1E10;
        float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio);
        int constraint_parameter_flag;

        if(aspect_ratio==0.0) aspect_ratio= 1.0; //pixel aspect 1:1 (VGA)

        if (s->current_picture.key_frame) {
            AVRational framerate= ff_frame_rate_tab[s->frame_rate_index];

            /* mpeg1 header repeated every gop */
            put_header(s, SEQ_START_CODE);

            put_bits(&s->pb, 12, s->width);
            put_bits(&s->pb, 12, s->height);
#if 0 //MEANX
            for(i=1; i<15; i++){
                float error= aspect_ratio;
                if(s->codec_id == CODEC_ID_MPEG1VIDEO || i <=1)
                    error-= 1.0/ff_mpeg1_aspect[i];
                else
                    error-= av_q2d(ff_mpeg2_aspect[i])*s->height/s->width;

                error= FFABS(error);

                if(error < best_aspect_error){
                    best_aspect_error= error;
                    s->aspect_ratio_info= i;
                }
            }
#endif // MEANX
            //MEANX put_bits(&s->pb, 4, s->aspect_ratio_info);
            //MEANX put_bits(&s->pb, 4, s->frame_rate_index);
 // MEANX 4:3
	     if(s->avctx->sample_aspect_ratio.num==16 && s->avctx->sample_aspect_ratio.den==9)
            {
                //printf("FFmpeg : Wide\n");
                put_bits(&s->pb,4,3); //16:9
            }
            else        //4:3
            {
              if(s->codec_id == CODEC_ID_MPEG2VIDEO)
                put_bits(&s->pb, 4, 2);
              else
                put_bits(&s->pb, 4, 12); // MPEG1
            }
// /MEANX
// //MEANX PULLDOWN            put_bits(&s->pb, 4, s->frame_rate_index);
if((s->flags2 & CODEC_FLAG2_32_PULLDOWN) && (s->codec_id == CODEC_ID_MPEG2VIDEO))
            {           
                put_bits(&s->pb, 4,4);
            }
            else
            {                                  
                put_bits(&s->pb, 4, s->frame_rate_index);
            } //MEANX pulldown

            if(s->avctx->rc_max_rate_header){ //MEANX we use header
                v = (s->avctx->rc_max_rate_header + 399) / 400;
                if (v > 0x3ffff && s->codec_id == CODEC_ID_MPEG1VIDEO)
                    v = 0x3ffff;
            }else{
                v= 0x3FFFF;
            }
// MEANX we use rc_buffer_size_header here to force
                // a correct rc_buffer_size


            if(s->avctx->rc_buffer_size_header)
                vbv_buffer_size = s->avctx->rc_buffer_size_header;
            else
                /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */
                vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024;
            vbv_buffer_size= (vbv_buffer_size + 16383) / 16384;

            put_bits(&s->pb, 18, v & 0x3FFFF);
            put_bits(&s->pb, 1, 1); /* marker */
            put_bits(&s->pb, 10, vbv_buffer_size & 0x3FF);

            constraint_parameter_flag=
                s->width <= 768 && s->height <= 576 &&
                s->mb_width * s->mb_height <= 396 &&
                s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 &&
                framerate.num <= framerate.den*30 &&
                s->avctx->me_range && s->avctx->me_range < 128 &&
                vbv_buffer_size <= 20 &&
                v <= 1856000/400 &&
                s->codec_id == CODEC_ID_MPEG1VIDEO;

            put_bits(&s->pb, 1, constraint_parameter_flag);

            ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix);
            ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix);

//.........这里部分代码省略.........
开发者ID:BackupTheBerlios,项目名称:avidemux-svn,代码行数:101,代码来源:mpeg12enc.c

示例10: put_subframe

static void put_subframe(DCAContext *c,
                         int32_t subband_data[8 * SUBSUBFRAMES][MAX_CHANNELS][32],
                         int subframe)
{
    int i, sub, ss, ch, max_value;
    int32_t *lfe_data = c->lfe_data + 4 * SUBSUBFRAMES * subframe;

    /* Subsubframes count */
    put_bits(&c->pb, 2, SUBSUBFRAMES -1);

    /* Partial subsubframe sample count: dummy */
    put_bits(&c->pb, 3, 0);

    /* Prediction mode: no ADPCM, in each channel and subband */
    for (ch = 0; ch < c->prim_channels; ch++)
        for (sub = 0; sub < DCA_SUBBANDS; sub++)
            put_bits(&c->pb, 1, 0);

    /* Prediction VQ addres: not transmitted */
    /* Bit allocation index */
    for (ch = 0; ch < c->prim_channels; ch++)
        for (sub = 0; sub < DCA_SUBBANDS; sub++)
            put_bits(&c->pb, 5, QUANTIZER_BITS+3);

    if (SUBSUBFRAMES > 1) {
        /* Transition mode: none for each channel and subband */
        for (ch = 0; ch < c->prim_channels; ch++)
            for (sub = 0; sub < DCA_SUBBANDS; sub++)
                put_bits(&c->pb, 1, 0); /* codebook A4 */
    }

    /* Determine scale_factor */
    for (ch = 0; ch < c->prim_channels; ch++)
        for (sub = 0; sub < DCA_SUBBANDS; sub++) {
            max_value = 0;
            for (i = 0; i < 8 * SUBSUBFRAMES; i++)
                max_value = FFMAX(max_value, FFABS(subband_data[i][ch][sub]));
            c->scale_factor[ch][sub] = find_scale_factor7(max_value, QUANTIZER_BITS);
        }

    if (c->lfe_channel) {
        max_value = 0;
        for (i = 0; i < 4 * SUBSUBFRAMES; i++)
            max_value = FFMAX(max_value, FFABS(lfe_data[i]));
        c->lfe_scale_factor = find_scale_factor7(max_value, LFE_BITS);
    }

    /* Scale factors: the same for each channel and subband,
       encoded according to Table D.1.2 */
    for (ch = 0; ch < c->prim_channels; ch++)
        for (sub = 0; sub < DCA_SUBBANDS; sub++)
            put_bits(&c->pb, 7, c->scale_factor[ch][sub]);

    /* Joint subband scale factor codebook select: not transmitted */
    /* Scale factors for joint subband coding: not transmitted */
    /* Stereo down-mix coefficients: not transmitted */
    /* Dynamic range coefficient: not transmitted */
    /* Stde information CRC check word: not transmitted */
    /* VQ encoded high frequency subbands: not transmitted */

    /* LFE data */
    if (c->lfe_channel) {
        for (i = 0; i < 4 * SUBSUBFRAMES; i++)
            put_sample7(c, lfe_data[i], LFE_BITS, c->lfe_scale_factor);
        put_bits(&c->pb, 8, c->lfe_scale_factor);
    }

    /* Audio data (subsubframes) */

    for (ss = 0; ss < SUBSUBFRAMES ; ss++)
        for (ch = 0; ch < c->prim_channels; ch++)
            for (sub = 0; sub < DCA_SUBBANDS; sub++)
                for (i = 0; i < 8; i++)
                    put_sample7(c, subband_data[ss * 8 + i][ch][sub], QUANTIZER_BITS, c->scale_factor[ch][sub]);

    /* DSYNC */
    put_bits(&c->pb, 16, 0xffff);
}
开发者ID:AdamCDunlap,项目名称:hmc-robot-drivers,代码行数:78,代码来源:dcaenc.c

示例11: swap_channel_layouts_on_filter

static void swap_channel_layouts_on_filter(AVFilterContext *filter)
{
    AVFilterLink *link = NULL;
    int i, j, k;

    for (i = 0; i < filter->nb_inputs; i++) {
        link = filter->inputs[i];

        if (link->type == AVMEDIA_TYPE_AUDIO &&
            link->out_channel_layouts->nb_channel_layouts == 1)
            break;
    }
    if (i == filter->nb_inputs)
        return;

    for (i = 0; i < filter->nb_outputs; i++) {
        AVFilterLink *outlink = filter->outputs[i];
        int best_idx = -1, best_score = INT_MIN, best_count_diff = INT_MAX;

        if (outlink->type != AVMEDIA_TYPE_AUDIO ||
            outlink->in_channel_layouts->nb_channel_layouts < 2)
            continue;

        for (j = 0; j < outlink->in_channel_layouts->nb_channel_layouts; j++) {
            uint64_t  in_chlayout = link->out_channel_layouts->channel_layouts[0];
            uint64_t out_chlayout = outlink->in_channel_layouts->channel_layouts[j];
            int  in_channels      = av_get_channel_layout_nb_channels(in_chlayout);
            int out_channels      = av_get_channel_layout_nb_channels(out_chlayout);
            int count_diff        = out_channels - in_channels;
            int matched_channels, extra_channels;
            int score = 100000;

            if (FF_LAYOUT2COUNT(in_chlayout) || FF_LAYOUT2COUNT(out_chlayout)) {
                /* Compute score in case the input or output layout encodes
                   a channel count; in this case the score is not altered by
                   the computation afterwards, as in_chlayout and
                   out_chlayout have both been set to 0 */
                if (FF_LAYOUT2COUNT(in_chlayout))
                    in_channels = FF_LAYOUT2COUNT(in_chlayout);
                if (FF_LAYOUT2COUNT(out_chlayout))
                    out_channels = FF_LAYOUT2COUNT(out_chlayout);
                score -= 10000 + FFABS(out_channels - in_channels) +
                         (in_channels > out_channels ? 10000 : 0);
                in_chlayout = out_chlayout = 0;
                /* Let the remaining computation run, even if the score
                   value is not altered */
            }

            /* channel substitution */
            for (k = 0; k < FF_ARRAY_ELEMS(ch_subst); k++) {
                uint64_t cmp0 = ch_subst[k][0];
                uint64_t cmp1 = ch_subst[k][1];
                if (( in_chlayout & cmp0) && (!(out_chlayout & cmp0)) &&
                    (out_chlayout & cmp1) && (!( in_chlayout & cmp1))) {
                    in_chlayout  &= ~cmp0;
                    out_chlayout &= ~cmp1;
                    /* add score for channel match, minus a deduction for
                       having to do the substitution */
                    score += 10 * av_get_channel_layout_nb_channels(cmp1) - 2;
                }
            }

            /* no penalty for LFE channel mismatch */
            if ( (in_chlayout & AV_CH_LOW_FREQUENCY) &&
                (out_chlayout & AV_CH_LOW_FREQUENCY))
                score += 10;
            in_chlayout  &= ~AV_CH_LOW_FREQUENCY;
            out_chlayout &= ~AV_CH_LOW_FREQUENCY;

            matched_channels = av_get_channel_layout_nb_channels(in_chlayout &
                                                                 out_chlayout);
            extra_channels   = av_get_channel_layout_nb_channels(out_chlayout &
                                                                 (~in_chlayout));
            score += 10 * matched_channels - 5 * extra_channels;

            if (score > best_score ||
                (count_diff < best_count_diff && score == best_score)) {
                best_score = score;
                best_idx   = j;
                best_count_diff = count_diff;
            }
        }
        av_assert0(best_idx >= 0);
        FFSWAP(uint64_t, outlink->in_channel_layouts->channel_layouts[0],
               outlink->in_channel_layouts->channel_layouts[best_idx]);
    }

}
开发者ID:Ivnz,项目名称:iFrameExtracotrWithFFMPEG,代码行数:88,代码来源:avfiltergraph.c

示例12: main

int main(void)
{
    AVRational a,b,r;
    for (a.num = -2; a.num <= 2; a.num++) {
        for (a.den = -2; a.den <= 2; a.den++) {
            for (b.num = -2; b.num <= 2; b.num++) {
                for (b.den = -2; b.den <= 2; b.den++) {
                    int c = av_cmp_q(a,b);
                    double d = av_q2d(a) == av_q2d(b) ?
                               0 : (av_q2d(a) - av_q2d(b));
                    if (d > 0)       d = 1;
                    else if (d < 0)  d = -1;
                    else if (d != d) d = INT_MIN;
                    if (c != d)
                        av_log(NULL, AV_LOG_ERROR, "%d/%d %d/%d, %d %f\n", a.num,
                               a.den, b.num, b.den, c,d);
                    r = av_sub_q(av_add_q(b,a), b);
                    if(b.den && (r.num*a.den != a.num*r.den || !r.num != !a.num || !r.den != !a.den))
                        av_log(NULL, AV_LOG_ERROR, "%d/%d ", r.num, r.den);
                }
            }
        }
    }

    for (a.num = 1; a.num <= 10; a.num++) {
        for (a.den = 1; a.den <= 10; a.den++) {
            if (av_gcd(a.num, a.den) > 1)
                continue;
            for (b.num = 1; b.num <= 10; b.num++) {
                for (b.den = 1; b.den <= 10; b.den++) {
                    int start;
                    if (av_gcd(b.num, b.den) > 1)
                        continue;
                    if (av_cmp_q(b, a) < 0)
                        continue;
                    for (start = 0; start < 10 ; start++) {
                        int acc= start;
                        int i;

                        for (i = 0; i<100; i++) {
                            int exact = start + av_rescale_q(i+1, b, a);
                            acc = av_add_stable(a, acc, b, 1);
                            if (FFABS(acc - exact) > 2) {
                                av_log(NULL, AV_LOG_ERROR, "%d/%d %d/%d, %d %d\n", a.num,
                                       a.den, b.num, b.den, acc, exact);
                                return 1;
                            }
                        }
                    }
                }
            }
        }
    }

    for (a.den = 1; a.den < 0x100000000U/3; a.den*=3) {
        for (a.num = -1; a.num < (1<<27); a.num += 1 + a.num/100) {
            float f  = av_int2float(av_q2intfloat(a));
            float f2 = av_q2d(a);
            if (fabs(f - f2) > fabs(f)/5000000) {
                av_log(NULL, AV_LOG_ERROR, "%d/%d %f %f\n", a.num,
                       a.den, f, f2);
                return 1;
            }

        }
    }

    return 0;
}
开发者ID:309746069,项目名称:FFmpeg,代码行数:69,代码来源:rational.c

示例13: vectorscope8

static void vectorscope8(VectorscopeContext *s, AVFrame *in, AVFrame *out, int pd)
{
    const uint8_t * const *src = (const uint8_t * const *)in->data;
    const int slinesizex = in->linesize[s->x];
    const int slinesizey = in->linesize[s->y];
    const int slinesized = in->linesize[pd];
    const int dlinesize = out->linesize[0];
    const int intensity = s->intensity;
    const int px = s->x, py = s->y;
    const int h = s->planeheight[py];
    const int w = s->planewidth[px];
    const uint8_t *spx = src[px];
    const uint8_t *spy = src[py];
    const uint8_t *spd = src[pd];
    const int hsub = s->hsub;
    const int vsub = s->vsub;
    uint8_t **dst = out->data;
    uint8_t *dpx = dst[px];
    uint8_t *dpy = dst[py];
    uint8_t *dpd = dst[pd];
    const int tmin = s->tmin;
    const int tmax = s->tmax;
    int i, j, k;

    for (k = 0; k < 4 && dst[k]; k++)
        for (i = 0; i < out->height ; i++)
            memset(dst[k] + i * out->linesize[k],
                   (s->mode == COLOR || s->mode == COLOR5) && k == s->pd ? 0 : s->bg_color[k], out->width);

    switch (s->mode) {
    case COLOR5:
    case COLOR:
    case GRAY:
        if (s->is_yuv) {
            for (i = 0; i < h; i++) {
                const int iwx = i * slinesizex;
                const int iwy = i * slinesizey;
                const int iwd = i * slinesized;
                for (j = 0; j < w; j++) {
                    const int x = spx[iwx + j];
                    const int y = spy[iwy + j];
                    const int z = spd[iwd + j];
                    const int pos = y * dlinesize + x;

                    if (z < tmin || z > tmax)
                        continue;

                    dpd[pos] = FFMIN(dpd[pos] + intensity, 255);
                    if (dst[3])
                        dst[3][pos] = 255;
                }
            }
        } else {
            for (i = 0; i < h; i++) {
                const int iwx = i * slinesizex;
                const int iwy = i * slinesizey;
                const int iwd = i * slinesized;
                for (j = 0; j < w; j++) {
                    const int x = spx[iwx + j];
                    const int y = spy[iwy + j];
                    const int z = spd[iwd + j];
                    const int pos = y * dlinesize + x;

                    if (z < tmin || z > tmax)
                        continue;

                    dst[0][pos] = FFMIN(dst[0][pos] + intensity, 255);
                    dst[1][pos] = FFMIN(dst[1][pos] + intensity, 255);
                    dst[2][pos] = FFMIN(dst[2][pos] + intensity, 255);
                    if (dst[3])
                        dst[3][pos] = 255;
                }
            }
        }
        break;
    case COLOR2:
        if (s->is_yuv) {
            for (i = 0; i < h; i++) {
                const int iw1 = i * slinesizex;
                const int iw2 = i * slinesizey;
                const int iwd = i * slinesized;
                for (j = 0; j < w; j++) {
                    const int x = spx[iw1 + j];
                    const int y = spy[iw2 + j];
                    const int z = spd[iwd + j];
                    const int pos = y * dlinesize + x;

                    if (z < tmin || z > tmax)
                        continue;

                    if (!dpd[pos])
                        dpd[pos] = FFABS(128 - x) + FFABS(128 - y);
                    dpx[pos] = x;
                    dpy[pos] = y;
                    if (dst[3])
                        dst[3][pos] = 255;
                }
            }
        } else {
            for (i = 0; i < h; i++) {
//.........这里部分代码省略.........
开发者ID:SilverCrux,项目名称:FFmpeg,代码行数:101,代码来源:vf_vectorscope.c

示例14: flv_read_packet

static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    FLVContext *flv = s->priv_data;
    int ret, i, type, size, flags;
    int stream_type=-1;
    int64_t next, pos;
    int64_t dts, pts = AV_NOPTS_VALUE;
    int av_uninit(channels);
    int av_uninit(sample_rate);
    AVStream *st = NULL;

    for(;; avio_skip(s->pb, 4)) { /* pkt size is repeated at end. skip it */
        pos = avio_tell(s->pb);
        type = avio_r8(s->pb);
        size = avio_rb24(s->pb);
        dts = avio_rb24(s->pb);
        dts |= avio_r8(s->pb) << 24;
        av_dlog(s, "type:%d, size:%d, dts:%"PRId64"\n", type, size, dts);
        if (url_feof(s->pb))
            return AVERROR_EOF;
        avio_skip(s->pb, 3); /* stream id, always 0 */
        flags = 0;

        if (flv->validate_next < flv->validate_count) {
            int64_t validate_pos = flv->validate_index[flv->validate_next].pos;
            if (pos == validate_pos) {
                if (FFABS(dts - flv->validate_index[flv->validate_next].dts) <=
                        VALIDATE_INDEX_TS_THRESH) {
                    flv->validate_next++;
                } else {
                    clear_index_entries(s, validate_pos);
                    flv->validate_count = 0;
                }
            } else if (pos > validate_pos) {
                clear_index_entries(s, validate_pos);
                flv->validate_count = 0;
            }
        }

        if(size == 0)
            continue;

        next= size + avio_tell(s->pb);

        if (type == FLV_TAG_TYPE_AUDIO) {
            stream_type=FLV_STREAM_TYPE_AUDIO;
            flags = avio_r8(s->pb);
            size--;
        } else if (type == FLV_TAG_TYPE_VIDEO) {
            stream_type=FLV_STREAM_TYPE_VIDEO;
            flags = avio_r8(s->pb);
            size--;
            if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_VIDEO_INFO_CMD)
                goto skip;
        } else if (type == FLV_TAG_TYPE_META) {
            if (size > 13+1+4 && dts == 0) { // Header-type metadata stuff
                flv_read_metabody(s, next);
                goto skip;
            } else if (dts != 0) { // Script-data "special" metadata frames - don't skip
                stream_type=FLV_STREAM_TYPE_DATA;
            } else {
                goto skip;
            }
        } else {
            av_log(s, AV_LOG_DEBUG, "skipping flv packet: type %d, size %d, flags %d\n", type, size, flags);
skip:
            avio_seek(s->pb, next, SEEK_SET);
            continue;
        }

        /* skip empty data packets */
        if (!size)
            continue;

        /* now find stream */
        for(i=0; i<s->nb_streams; i++) {
            st = s->streams[i];
            if (st->id == stream_type)
                break;
        }
        if(i == s->nb_streams) {
            av_log(s, AV_LOG_WARNING, "Stream discovered after head already parsed\n");
            st = create_stream(s, stream_type,
            (int[]) {
                AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_DATA
            }[stream_type]);
        }
        av_dlog(s, "%d %X %d \n", stream_type, flags, st->discard);
        if(  (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || (stream_type == FLV_STREAM_TYPE_AUDIO)))
                ||(st->discard >= AVDISCARD_BIDIR  &&  ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && (stream_type == FLV_STREAM_TYPE_VIDEO)))
                || st->discard >= AVDISCARD_ALL
          ) {
            avio_seek(s->pb, next, SEEK_SET);
            continue;
        }
        if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY)
            av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME);
        break;
    }
开发者ID:zzilla,项目名称:ONVIF-Device-Manager,代码行数:99,代码来源:flvdec.c

示例15: AAC_RENAME


//.........这里部分代码省略.........
                goto err;
        }
    } else
        memset(ps->iid_par, 0, sizeof(ps->iid_par));

    if (ps->enable_icc)
        for (e = 0; e < ps->num_env; e++) {
            int dt = get_bits1(gb);
            if (read_icc_data(avctx, gb, ps, ps->icc_par, dt ? huff_icc_dt : huff_icc_df, e, dt))
                goto err;
        }
    else
        memset(ps->icc_par, 0, sizeof(ps->icc_par));

    if (ps->enable_ext) {
        int cnt = get_bits(gb, 4);
        if (cnt == 15) {
            cnt += get_bits(gb, 8);
        }
        cnt *= 8;
        while (cnt > 7) {
            int ps_extension_id = get_bits(gb, 2);
            cnt -= 2 + ps_read_extension_data(gb, ps, ps_extension_id);
        }
        if (cnt < 0) {
            av_log(avctx, AV_LOG_ERROR, "ps extension overflow %d\n", cnt);
            goto err;
        }
        skip_bits(gb, cnt);
    }

    ps->enable_ipdopd &= !PS_BASELINE;

    //Fix up envelopes
    if (!ps->num_env || ps->border_position[ps->num_env] < numQMFSlots - 1) {
        //Create a fake envelope
        int source = ps->num_env ? ps->num_env - 1 : ps->num_env_old - 1;
        int b;
        if (source >= 0 && source != ps->num_env) {
            if (ps->enable_iid) {
                memcpy(ps->iid_par+ps->num_env, ps->iid_par+source, sizeof(ps->iid_par[0]));
            }
            if (ps->enable_icc) {
                memcpy(ps->icc_par+ps->num_env, ps->icc_par+source, sizeof(ps->icc_par[0]));
            }
            if (ps->enable_ipdopd) {
                memcpy(ps->ipd_par+ps->num_env, ps->ipd_par+source, sizeof(ps->ipd_par[0]));
                memcpy(ps->opd_par+ps->num_env, ps->opd_par+source, sizeof(ps->opd_par[0]));
            }
        }
        if (ps->enable_iid){
            for (b = 0; b < ps->nr_iid_par; b++) {
                if (FFABS(ps->iid_par[ps->num_env][b]) > 7 + 8 * ps->iid_quant) {
                    av_log(avctx, AV_LOG_ERROR, "iid_par invalid\n");
                    goto err;
                }
            }
        }
        if (ps->enable_icc){
            for (b = 0; b < ps->nr_iid_par; b++) {
                if (ps->icc_par[ps->num_env][b] > 7U) {
                    av_log(avctx, AV_LOG_ERROR, "icc_par invalid\n");
                    goto err;
                }
            }
        }
        ps->num_env++;
        ps->border_position[ps->num_env] = numQMFSlots - 1;
    }


    ps->is34bands_old = ps->is34bands;
    if (!PS_BASELINE && (ps->enable_iid || ps->enable_icc))
        ps->is34bands = (ps->enable_iid && ps->nr_iid_par == 34) ||
                        (ps->enable_icc && ps->nr_icc_par == 34);

    //Baseline
    if (!ps->enable_ipdopd) {
        memset(ps->ipd_par, 0, sizeof(ps->ipd_par));
        memset(ps->opd_par, 0, sizeof(ps->opd_par));
    }

    if (header)
        ps->start = 1;

    bits_consumed = get_bits_count(gb) - bit_count_start;
    if (bits_consumed <= bits_left) {
        skip_bits_long(gb_host, bits_consumed);
        return bits_consumed;
    }
    av_log(avctx, AV_LOG_ERROR, "Expected to read %d PS bits actually read %d.\n", bits_left, bits_consumed);
err:
    ps->start = 0;
    skip_bits_long(gb_host, bits_left);
    memset(ps->iid_par, 0, sizeof(ps->iid_par));
    memset(ps->icc_par, 0, sizeof(ps->icc_par));
    memset(ps->ipd_par, 0, sizeof(ps->ipd_par));
    memset(ps->opd_par, 0, sizeof(ps->opd_par));
    return bits_left;
}
开发者ID:clook,项目名称:FFmpeg,代码行数:101,代码来源:aacps.c


注:本文中的FFABS函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。