本文整理汇总了C++中FFMAX函数的典型用法代码示例。如果您正苦于以下问题:C++ FFMAX函数的具体用法?C++ FFMAX怎么用?C++ FFMAX使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了FFMAX函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: libopenjpeg_decode_frame
static int libopenjpeg_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LibOpenJPEGContext *ctx = avctx->priv_data;
AVFrame *picture = &ctx->image, *output = data;
opj_dinfo_t *dec;
opj_cio_t *stream;
opj_image_t *image;
int width, height, has_alpha = 0, ret = -1;
int x, y, index;
uint8_t *img_ptr;
int adjust[4];
*data_size = 0;
// Check if input is a raw jpeg2k codestream or in jp2 wrapping
if((AV_RB32(buf) == 12) &&
(AV_RB32(buf + 4) == JP2_SIG_TYPE) &&
(AV_RB32(buf + 8) == JP2_SIG_VALUE)) {
dec = opj_create_decompress(CODEC_JP2);
} else {
// If the AVPacket contains a jp2c box, then skip to
// the starting byte of the codestream.
if (AV_RB32(buf + 4) == AV_RB32("jp2c"))
buf += 8;
dec = opj_create_decompress(CODEC_J2K);
}
if(!dec) {
av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n");
return -1;
}
opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL);
ctx->dec_params.cp_reduce = avctx->lowres;
// Tie decoder with decoding parameters
opj_setup_decoder(dec, &ctx->dec_params);
stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
if(!stream) {
av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
opj_destroy_decompress(dec);
return -1;
}
// Decode the codestream
image = opj_decode_with_info(dec, stream, NULL);
opj_cio_close(stream);
if(!image) {
av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
opj_destroy_decompress(dec);
return -1;
}
width = image->comps[0].w << avctx->lowres;
height = image->comps[0].h << avctx->lowres;
if(avcodec_check_dimensions(avctx, width, height) < 0) {
av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height);
goto done;
}
avcodec_set_dimensions(avctx, width, height);
switch(image->numcomps)
{
case 1:
avctx->pix_fmt = PIX_FMT_GRAY8;
break;
case 3:
if(check_image_attributes(image)) {
avctx->pix_fmt = PIX_FMT_RGB24;
} else {
avctx->pix_fmt = PIX_FMT_GRAY8;
av_log(avctx, AV_LOG_ERROR, "Only first component will be used.\n");
}
break;
case 4:
has_alpha = 1;
avctx->pix_fmt = PIX_FMT_RGBA;
break;
default:
av_log(avctx, AV_LOG_ERROR, "%d components unsupported.\n", image->numcomps);
goto done;
}
if(picture->data[0])
avctx->release_buffer(avctx, picture);
if(avctx->get_buffer(avctx, picture) < 0) {
av_log(avctx, AV_LOG_ERROR, "Couldn't allocate image buffer.\n");
return -1;
}
for(x = 0; x < image->numcomps; x++) {
adjust[x] = FFMAX(image->comps[x].prec - 8, 0);
}
for(y = 0; y < avctx->height; y++) {
index = y*avctx->width;
img_ptr = picture->data[0] + y*picture->linesize[0];
//.........这里部分代码省略.........
示例2: libopenjpeg_encode_init
static av_cold int libopenjpeg_encode_init(AVCodecContext *avctx)
{
LibOpenJPEGContext *ctx = avctx->priv_data;
int err = AVERROR(ENOMEM);
opj_set_default_encoder_parameters(&ctx->enc_params);
ctx->enc_params.cp_rsiz = ctx->profile;
ctx->enc_params.mode = !!avctx->global_quality;
ctx->enc_params.cp_cinema = ctx->cinema_mode;
ctx->enc_params.prog_order = ctx->prog_order;
ctx->enc_params.numresolution = ctx->numresolution;
ctx->enc_params.cp_disto_alloc = ctx->disto_alloc;
ctx->enc_params.cp_fixed_alloc = ctx->fixed_alloc;
ctx->enc_params.cp_fixed_quality = ctx->fixed_quality;
ctx->enc_params.tcp_numlayers = ctx->numlayers;
ctx->enc_params.tcp_rates[0] = FFMAX(avctx->compression_level, 0) * 2;
if (ctx->cinema_mode > 0) {
ctx->enc_params.irreversible = 1;
ctx->enc_params.tcp_mct = 1;
ctx->enc_params.tile_size_on = 0;
/* no subsampling */
ctx->enc_params.cp_tdx=1;
ctx->enc_params.cp_tdy=1;
ctx->enc_params.subsampling_dx = 1;
ctx->enc_params.subsampling_dy = 1;
/* Tile and Image shall be at (0,0) */
ctx->enc_params.cp_tx0 = 0;
ctx->enc_params.cp_ty0 = 0;
ctx->enc_params.image_offset_x0 = 0;
ctx->enc_params.image_offset_y0 = 0;
/* Codeblock size= 32*32 */
ctx->enc_params.cblockw_init = 32;
ctx->enc_params.cblockh_init = 32;
ctx->enc_params.csty |= 0x01;
/* No ROI */
ctx->enc_params.roi_compno = -1;
if (ctx->enc_params.prog_order != CPRL) {
av_log(avctx, AV_LOG_ERROR, "prog_order forced to CPRL\n");
ctx->enc_params.prog_order = CPRL;
}
ctx->enc_params.tp_flag = 'C';
ctx->enc_params.tp_on = 1;
}
ctx->compress = opj_create_compress(ctx->format);
if (!ctx->compress) {
av_log(avctx, AV_LOG_ERROR, "Error creating the compressor\n");
return AVERROR(ENOMEM);
}
ctx->image = mj2_create_image(avctx, &ctx->enc_params);
if (!ctx->image) {
av_log(avctx, AV_LOG_ERROR, "Error creating the mj2 image\n");
err = AVERROR(EINVAL);
goto fail;
}
opj_setup_encoder(ctx->compress, &ctx->enc_params, ctx->image);
ctx->stream = opj_cio_open((opj_common_ptr) ctx->compress, NULL, 0);
if (!ctx->stream) {
av_log(avctx, AV_LOG_ERROR, "Error creating the cio stream\n");
err = AVERROR(ENOMEM);
goto fail;
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame) {
av_log(avctx, AV_LOG_ERROR, "Error allocating coded frame\n");
goto fail;
}
memset(&ctx->event_mgr, 0, sizeof(opj_event_mgr_t));
ctx->event_mgr.info_handler = info_callback;
ctx->event_mgr.error_handler = error_callback;
ctx->event_mgr.warning_handler = warning_callback;
opj_set_event_mgr((opj_common_ptr) ctx->compress, &ctx->event_mgr, avctx);
return 0;
fail:
opj_cio_close(ctx->stream);
ctx->stream = NULL;
opj_destroy_compress(ctx->compress);
ctx->compress = NULL;
opj_image_destroy(ctx->image);
ctx->image = NULL;
av_freep(&avctx->coded_frame);
return err;
}
示例3: vectorscope
//.........这里部分代码省略.........
dst[0][pos] = FFMIN(dst[0][pos] + intensity, 255);
dst[1][pos] = FFMIN(dst[1][pos] + intensity, 255);
dst[2][pos] = FFMIN(dst[2][pos] + intensity, 255);
if (dst[3])
dst[3][pos] = 255;
}
}
}
break;
case COLOR2:
if (s->is_yuv) {
for (i = 0; i < h; i++) {
const int iw1 = i * slinesizex;
const int iw2 = i * slinesizey;
for (j = 0; j < w; j++) {
const int x = spx[iw1 + j];
const int y = spy[iw2 + j];
const int pos = y * dlinesize + x;
if (!dpd[pos])
dpd[pos] = FFABS(128 - x) + FFABS(128 - y);
dpx[pos] = x;
dpy[pos] = y;
if (dst[3])
dst[3][pos] = 255;
}
}
} else {
for (i = 0; i < h; i++) {
const int iw1 = i * slinesizex;
const int iw2 = i * slinesizey;
for (j = 0; j < w; j++) {
const int x = spx[iw1 + j];
const int y = spy[iw2 + j];
const int pos = y * dlinesize + x;
if (!dpd[pos])
dpd[pos] = FFMIN(x + y, 255);
dpx[pos] = x;
dpy[pos] = y;
if (dst[3])
dst[3][pos] = 255;
}
}
}
break;
case COLOR3:
for (i = 0; i < h; i++) {
const int iw1 = i * slinesizex;
const int iw2 = i * slinesizey;
for (j = 0; j < w; j++) {
const int x = spx[iw1 + j];
const int y = spy[iw2 + j];
const int pos = y * dlinesize + x;
dpd[pos] = FFMIN(255, dpd[pos] + intensity);
dpx[pos] = x;
dpy[pos] = y;
if (dst[3])
dst[3][pos] = 255;
}
}
break;
case COLOR4:
for (i = 0; i < in->height; i++) {
const int iwx = (i >> vsub) * slinesizex;
const int iwy = (i >> vsub) * slinesizey;
const int iwd = i * slinesized;
for (j = 0; j < in->width; j++) {
const int x = spx[iwx + (j >> hsub)];
const int y = spy[iwy + (j >> hsub)];
const int pos = y * dlinesize + x;
dpd[pos] = FFMAX(spd[iwd + j], dpd[pos]);
dpx[pos] = x;
dpy[pos] = y;
if (dst[3])
dst[3][pos] = 255;
}
}
break;
default:
av_assert0(0);
}
envelope(s, out);
if (s->mode == COLOR) {
for (i = 0; i < out->height; i++) {
for (j = 0; j < out->width; j++) {
if (!dpd[i * out->linesize[pd] + j]) {
dpx[i * out->linesize[px] + j] = j;
dpy[i * out->linesize[py] + j] = i;
dpd[i * out->linesize[pd] + j] = 128;
}
}
}
}
}
示例4: ff_adaptive_quantization
float ff_adaptive_quantization(MpegEncContext *s, double q){
int i;
const float lumi_masking= s->avctx->lumi_masking / (128.0*128.0);
const float dark_masking= s->avctx->dark_masking / (128.0*128.0);
const float temp_cplx_masking= s->avctx->temporal_cplx_masking;
const float spatial_cplx_masking = s->avctx->spatial_cplx_masking;
const float p_masking = s->avctx->p_masking;
const float border_masking = s->avctx->border_masking;
float bits_sum= 0.0;
float cplx_sum= 0.0;
#if __STDC_VERSION__ >= 199901L
float cplx_tab[s->mb_num];
float bits_tab[s->mb_num];
#else
float *cplx_tab=_alloca(sizeof(float)*s->mb_num);
float *bits_tab=_alloca(sizeof(float)*s->mb_num);
#endif
const int qmin= s->avctx->mb_lmin;
const int qmax= s->avctx->mb_lmax;
Picture * const pic= &s->current_picture;
const int mb_width = s->mb_width;
const int mb_height = s->mb_height;
float avg= 0.0;
for(i=0; i<s->mb_num; i++){
const int mb_xy= s->mb_index2xy[i];
float temp_cplx= sqrt(pic->mc_mb_var[mb_xy]); //FIXME merge in pow()
float spat_cplx= sqrt(pic->mb_var[mb_xy]);
const int lumi= pic->mb_mean[mb_xy];
float bits, cplx, factor;
int mb_x = mb_xy % s->mb_stride;
int mb_y = mb_xy / s->mb_stride;
int mb_distance;
float mb_factor = 0.0;
#if 0
if(spat_cplx < q/3) spat_cplx= q/3; //FIXME finetune
if(temp_cplx < q/3) temp_cplx= q/3; //FIXME finetune
#endif
if(spat_cplx < 4) spat_cplx= 4; //FIXME finetune
if(temp_cplx < 4) temp_cplx= 4; //FIXME finetune
if((s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTRA)){//FIXME hq mode
cplx= spat_cplx;
factor= 1.0 + p_masking;
}else{
cplx= temp_cplx;
factor= pow(temp_cplx, - temp_cplx_masking);
}
factor*=pow(spat_cplx, - spatial_cplx_masking);
if(lumi>127)
factor*= (1.0 - (lumi-128)*(lumi-128)*lumi_masking);
else
factor*= (1.0 - (lumi-128)*(lumi-128)*dark_masking);
if(mb_x < mb_width/5){
mb_distance = mb_width/5 - mb_x;
mb_factor = (float)mb_distance / (float)(mb_width/5);
}else if(mb_x > 4*mb_width/5){
mb_distance = mb_x - 4*mb_width/5;
mb_factor = (float)mb_distance / (float)(mb_width/5);
}
if(mb_y < mb_height/5){
mb_distance = mb_height/5 - mb_y;
mb_factor = FFMAX(mb_factor, (float)mb_distance / (float)(mb_height/5));
}else if(mb_y > 4*mb_height/5){
mb_distance = mb_y - 4*mb_height/5;
mb_factor = FFMAX(mb_factor, (float)mb_distance / (float)(mb_height/5));
}
factor*= 1.0 - border_masking*mb_factor;
if(factor<0.00001) factor= 0.00001;
bits= cplx*factor;
cplx_sum+= cplx;
bits_sum+= bits;
cplx_tab[i]= cplx;
bits_tab[i]= bits;
}
/* handle qmin/qmax clipping */
if(s->flags&CODEC_FLAG_NORMALIZE_AQP){
float factor= bits_sum/cplx_sum;
for(i=0; i<s->mb_num; i++){
float newq= q*cplx_tab[i]/bits_tab[i];
newq*= factor;
if (newq > qmax){
bits_sum -= bits_tab[i];
cplx_sum -= cplx_tab[i]*q/qmax;
}
else if(newq < qmin){
bits_sum -= bits_tab[i];
cplx_sum -= cplx_tab[i]*q/qmin;
}
}
if(bits_sum < 0.001) bits_sum= 0.001;
if(cplx_sum < 0.001) cplx_sum= 0.001;
}
//.........这里部分代码省略.........
示例5: av_probe_input_buffer2
int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
const char *filename, void *logctx,
unsigned int offset, unsigned int max_probe_size)
{
AVProbeData pd = { filename ? filename : "" };
uint8_t *buf = NULL;
int ret = 0, probe_size, buf_offset = 0;
int score = 0;
int ret2;
if (!max_probe_size)
max_probe_size = PROBE_BUF_MAX;
else if (max_probe_size < PROBE_BUF_MIN) {
av_log(logctx, AV_LOG_ERROR,
"Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
return AVERROR(EINVAL);
}
if (offset >= max_probe_size)
return AVERROR(EINVAL);
if (pb->av_class) {
uint8_t *mime_type_opt = NULL;
av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type_opt);
pd.mime_type = (const char *)mime_type_opt;
}
#if 0
if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
if (!av_strcasecmp(mime_type, "audio/aacp")) {
*fmt = av_find_input_format("aac");
}
av_freep(&mime_type);
}
#endif
for (probe_size = PROBE_BUF_MIN; probe_size <= max_probe_size && !*fmt;
probe_size = FFMIN(probe_size << 1,
FFMAX(max_probe_size, probe_size + 1))) {
score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
/* Read probe data. */
if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
goto fail;
if ((ret = avio_read(pb, buf + buf_offset,
probe_size - buf_offset)) < 0) {
/* Fail if error was not end of file, otherwise, lower score. */
if (ret != AVERROR_EOF)
goto fail;
score = 0;
ret = 0; /* error was end of file, nothing read */
}
buf_offset += ret;
if (buf_offset < offset)
continue;
pd.buf_size = buf_offset - offset;
pd.buf = &buf[offset];
memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
/* Guess file format. */
*fmt = av_probe_input_format2(&pd, 1, &score);
if (*fmt) {
/* This can only be true in the last iteration. */
if (score <= AVPROBE_SCORE_RETRY) {
av_log(logctx, AV_LOG_WARNING,
"Format %s detected only with low score of %d, "
"misdetection possible!\n", (*fmt)->name, score);
} else
av_log(logctx, AV_LOG_DEBUG,
"Format %s probed with size=%d and score=%d\n",
(*fmt)->name, probe_size, score);
#if 0
FILE *f = fopen("probestat.tmp", "ab");
fprintf(f, "probe_size:%d format:%s score:%d filename:%s\n", probe_size, (*fmt)->name, score, filename);
fclose(f);
#endif
}
}
if (!*fmt)
ret = AVERROR_INVALIDDATA;
fail:
/* Rewind. Reuse probe buffer to avoid seeking. */
ret2 = ffio_rewind_with_probe_data(pb, &buf, buf_offset);
if (ret >= 0)
ret = ret2;
av_freep(&pd.mime_type);
return ret < 0 ? ret : score;
}
示例6: auto_matrix
//.........这里部分代码省略.........
}else{
matrix[ SIDE_LEFT][ BACK_LEFT]+= 1.0;
matrix[SIDE_RIGHT][BACK_RIGHT]+= 1.0;
}
}else if(s->out_ch_layout & AV_CH_FRONT_LEFT){
matrix[ FRONT_LEFT][ BACK_LEFT]+= s->slev;
matrix[FRONT_RIGHT][BACK_RIGHT]+= s->slev;
}else if(s->out_ch_layout & AV_CH_FRONT_CENTER){
matrix[ FRONT_CENTER][BACK_LEFT ]+= s->slev*M_SQRT1_2;
matrix[ FRONT_CENTER][BACK_RIGHT]+= s->slev*M_SQRT1_2;
}else
av_assert0(0);
}
if(unaccounted & AV_CH_SIDE_LEFT){
if(s->out_ch_layout & AV_CH_BACK_LEFT){
/* if back channels do not exist in the input, just copy side
channels to back channels, otherwise mix side into back */
if (s->in_ch_layout & AV_CH_BACK_LEFT) {
matrix[BACK_LEFT ][SIDE_LEFT ] += M_SQRT1_2;
matrix[BACK_RIGHT][SIDE_RIGHT] += M_SQRT1_2;
} else {
matrix[BACK_LEFT ][SIDE_LEFT ] += 1.0;
matrix[BACK_RIGHT][SIDE_RIGHT] += 1.0;
}
}else if(s->out_ch_layout & AV_CH_BACK_CENTER){
matrix[BACK_CENTER][ SIDE_LEFT]+= M_SQRT1_2;
matrix[BACK_CENTER][SIDE_RIGHT]+= M_SQRT1_2;
}else if(s->out_ch_layout & AV_CH_FRONT_LEFT){
matrix[ FRONT_LEFT][ SIDE_LEFT]+= s->slev;
matrix[FRONT_RIGHT][SIDE_RIGHT]+= s->slev;
}else if(s->out_ch_layout & AV_CH_FRONT_CENTER){
matrix[ FRONT_CENTER][SIDE_LEFT ]+= s->slev*M_SQRT1_2;
matrix[ FRONT_CENTER][SIDE_RIGHT]+= s->slev*M_SQRT1_2;
}else
av_assert0(0);
}
if(unaccounted & AV_CH_FRONT_LEFT_OF_CENTER){
if(s->out_ch_layout & AV_CH_FRONT_LEFT){
matrix[ FRONT_LEFT][ FRONT_LEFT_OF_CENTER]+= 1.0;
matrix[FRONT_RIGHT][FRONT_RIGHT_OF_CENTER]+= 1.0;
}else if(s->out_ch_layout & AV_CH_FRONT_CENTER){
matrix[ FRONT_CENTER][ FRONT_LEFT_OF_CENTER]+= M_SQRT1_2;
matrix[ FRONT_CENTER][FRONT_RIGHT_OF_CENTER]+= M_SQRT1_2;
}else
av_assert0(0);
}
/* mix LFE into front left/right or center */
if (unaccounted & AV_CH_LOW_FREQUENCY) {
if (s->out_ch_layout & AV_CH_FRONT_CENTER) {
matrix[FRONT_CENTER][LOW_FREQUENCY] += s->lfe_mix_level;
} else if (s->out_ch_layout & AV_CH_FRONT_LEFT) {
matrix[FRONT_LEFT ][LOW_FREQUENCY] += s->lfe_mix_level * M_SQRT1_2;
matrix[FRONT_RIGHT][LOW_FREQUENCY] += s->lfe_mix_level * M_SQRT1_2;
} else
av_assert0(0);
}
for(out_i=i=0; i<64; i++){
double sum=0;
int in_i=0;
for(j=0; j<64; j++){
s->matrix[out_i][in_i]= matrix[i][j];
if(matrix[i][j]){
sum += fabs(matrix[i][j]);
}
if(s->in_ch_layout & (1ULL<<j))
in_i++;
}
maxcoef= FFMAX(maxcoef, sum);
if(s->out_ch_layout & (1ULL<<i))
out_i++;
}
if(s->rematrix_volume < 0)
maxcoef = -s->rematrix_volume;
if(( av_get_packed_sample_fmt(s->out_sample_fmt) < AV_SAMPLE_FMT_FLT
|| av_get_packed_sample_fmt(s->int_sample_fmt) < AV_SAMPLE_FMT_FLT) && maxcoef > 1.0){
for(i=0; i<SWR_CH_MAX; i++)
for(j=0; j<SWR_CH_MAX; j++){
s->matrix[i][j] /= maxcoef;
}
}
if(s->rematrix_volume > 0){
for(i=0; i<SWR_CH_MAX; i++)
for(j=0; j<SWR_CH_MAX; j++){
s->matrix[i][j] *= s->rematrix_volume;
}
}
for(i=0; i<av_get_channel_layout_nb_channels(s->out_ch_layout); i++){
for(j=0; j<av_get_channel_layout_nb_channels(s->in_ch_layout); j++){
av_log(NULL, AV_LOG_DEBUG, "%f ", s->matrix[i][j]);
}
av_log(NULL, AV_LOG_DEBUG, "\n");
}
return 0;
}
示例7: apply_delogo
/**
* Apply a simple delogo algorithm to the image in dst and put the
* result in src.
*
* The algorithm is only applied to the region specified by the logo
* parameters.
*
* @param w width of the input image
* @param h height of the input image
* @param logo_x x coordinate of the top left corner of the logo region
* @param logo_y y coordinate of the top left corner of the logo region
* @param logo_w width of the logo
* @param logo_h height of the logo
* @param band the size of the band around the processed area
* @param show show a rectangle around the processed area, useful for
* parameters tweaking
* @param direct if non-zero perform in-place processing
*/
static void apply_delogo(uint8_t *dst, int dst_linesize,
uint8_t *src, int src_linesize,
int w, int h,
int logo_x, int logo_y, int logo_w, int logo_h,
int band, int show, int direct)
{
int x, y;
int interp, dist;
uint8_t *xdst, *xsrc;
uint8_t *topleft, *botleft, *topright;
int xclipl, xclipr, yclipt, yclipb;
int logo_x1, logo_x2, logo_y1, logo_y2;
xclipl = FFMAX(-logo_x, 0);
xclipr = FFMAX(logo_x+logo_w-w, 0);
yclipt = FFMAX(-logo_y, 0);
yclipb = FFMAX(logo_y+logo_h-h, 0);
logo_x1 = logo_x + xclipl;
logo_x2 = logo_x + logo_w - xclipr;
logo_y1 = logo_y + yclipt;
logo_y2 = logo_y + logo_h - yclipb;
topleft = src+logo_y1 * src_linesize+logo_x1;
topright = src+logo_y1 * src_linesize+logo_x2-1;
botleft = src+(logo_y2-1) * src_linesize+logo_x1;
dst += (logo_y1+1)*dst_linesize;
src += (logo_y1+1)*src_linesize;
if (!direct)
av_image_copy_plane(dst, dst_linesize, src, src_linesize, w, h);
for (y = logo_y1+1; y < logo_y2-1; y++) {
for (x = logo_x1+1,
xdst = dst+logo_x1+1,
xsrc = src+logo_x1+1; x < logo_x2-1; x++, xdst++, xsrc++) {
interp =
(topleft[src_linesize*(y-logo_y -yclipt)] +
topleft[src_linesize*(y-logo_y-1-yclipt)] +
topleft[src_linesize*(y-logo_y+1-yclipt)]) * (logo_w-(x-logo_x))/logo_w
+
(topright[src_linesize*(y-logo_y-yclipt)] +
topright[src_linesize*(y-logo_y-1-yclipt)] +
topright[src_linesize*(y-logo_y+1-yclipt)]) * (x-logo_x)/logo_w
+
(topleft[x-logo_x-xclipl] +
topleft[x-logo_x-1-xclipl] +
topleft[x-logo_x+1-xclipl]) * (logo_h-(y-logo_y))/logo_h
+
(botleft[x-logo_x-xclipl] +
botleft[x-logo_x-1-xclipl] +
botleft[x-logo_x+1-xclipl]) * (y-logo_y)/logo_h;
interp /= 6;
if (y >= logo_y+band && y < logo_y+logo_h-band &&
x >= logo_x+band && x < logo_x+logo_w-band) {
*xdst = interp;
} else {
dist = 0;
if (x < logo_x+band)
dist = FFMAX(dist, logo_x-x+band);
else if (x >= logo_x+logo_w-band)
dist = FFMAX(dist, x-(logo_x+logo_w-1-band));
if (y < logo_y+band)
dist = FFMAX(dist, logo_y-y+band);
else if (y >= logo_y+logo_h-band)
dist = FFMAX(dist, y-(logo_y+logo_h-1-band));
*xdst = (*xsrc*dist + interp*(band-dist))/band;
if (show && (dist == band-1))
*xdst = 0;
}
}
dst += dst_linesize;
src += src_linesize;
}
}
示例8: mark_pns
static void mark_pns(AACEncContext *s, AVCodecContext *avctx, SingleChannelElement *sce)
{
FFPsyBand *band;
int w, g, w2;
int wlen = 1024 / sce->ics.num_windows;
int bandwidth, cutoff;
const float lambda = s->lambda;
const float freq_mult = avctx->sample_rate*0.5f/wlen;
const float spread_threshold = FFMIN(0.75f, NOISE_SPREAD_THRESHOLD*FFMAX(0.5f, lambda/100.f));
const float pns_transient_energy_r = FFMIN(0.7f, lambda / 140.f);
int refbits = avctx->bit_rate * 1024.0 / avctx->sample_rate
/ ((avctx->flags & CODEC_FLAG_QSCALE) ? 2.0f : avctx->channels)
* (lambda / 120.f);
/** Keep this in sync with twoloop's cutoff selection */
float rate_bandwidth_multiplier = 1.5f;
int frame_bit_rate = (avctx->flags & CODEC_FLAG_QSCALE)
? (refbits * rate_bandwidth_multiplier * avctx->sample_rate / 1024)
: (avctx->bit_rate / avctx->channels);
frame_bit_rate *= 1.15f;
if (avctx->cutoff > 0) {
bandwidth = avctx->cutoff;
} else {
bandwidth = FFMAX(3000, AAC_CUTOFF_FROM_BITRATE(frame_bit_rate, 1, avctx->sample_rate));
}
cutoff = bandwidth * 2 * wlen / avctx->sample_rate;
memcpy(sce->band_alt, sce->band_type, sizeof(sce->band_type));
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
for (g = 0; g < sce->ics.num_swb; g++) {
float sfb_energy = 0.0f, threshold = 0.0f, spread = 2.0f;
float min_energy = -1.0f, max_energy = 0.0f;
const int start = sce->ics.swb_offset[g];
const float freq = start*freq_mult;
const float freq_boost = FFMAX(0.88f*freq/NOISE_LOW_LIMIT, 1.0f);
if (freq < NOISE_LOW_LIMIT || start >= cutoff) {
sce->can_pns[w*16+g] = 0;
continue;
}
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
sfb_energy += band->energy;
spread = FFMIN(spread, band->spread);
threshold += band->threshold;
if (!w2) {
min_energy = max_energy = band->energy;
} else {
min_energy = FFMIN(min_energy, band->energy);
max_energy = FFMAX(max_energy, band->energy);
}
}
/* PNS is acceptable when all of these are true:
* 1. high spread energy (noise-like band)
* 2. near-threshold energy (high PE means the random nature of PNS content will be noticed)
* 3. on short window groups, all windows have similar energy (variations in energy would be destroyed by PNS)
*/
sce->pns_ener[w*16+g] = sfb_energy;
if (sfb_energy < threshold*sqrtf(1.5f/freq_boost) || spread < spread_threshold || min_energy < pns_transient_energy_r * max_energy) {
sce->can_pns[w*16+g] = 0;
} else {
sce->can_pns[w*16+g] = 1;
}
}
}
}
示例9: search_for_ms
static void search_for_ms(AACEncContext *s, ChannelElement *cpe)
{
int start = 0, i, w, w2, g, sid_sf_boost, prev_mid, prev_side;
uint8_t nextband0[128], nextband1[128];
float *M = s->scoefs + 128*0, *S = s->scoefs + 128*1;
float *L34 = s->scoefs + 128*2, *R34 = s->scoefs + 128*3;
float *M34 = s->scoefs + 128*4, *S34 = s->scoefs + 128*5;
const float lambda = s->lambda;
const float mslambda = FFMIN(1.0f, lambda / 120.f);
SingleChannelElement *sce0 = &cpe->ch[0];
SingleChannelElement *sce1 = &cpe->ch[1];
if (!cpe->common_window)
return;
/** Scout out next nonzero bands */
ff_init_nextband_map(sce0, nextband0);
ff_init_nextband_map(sce1, nextband1);
prev_mid = sce0->sf_idx[0];
prev_side = sce1->sf_idx[0];
for (w = 0; w < sce0->ics.num_windows; w += sce0->ics.group_len[w]) {
start = 0;
for (g = 0; g < sce0->ics.num_swb; g++) {
float bmax = bval2bmax(g * 17.0f / sce0->ics.num_swb) / 0.0045f;
if (!cpe->is_mask[w*16+g])
cpe->ms_mask[w*16+g] = 0;
if (!sce0->zeroes[w*16+g] && !sce1->zeroes[w*16+g] && !cpe->is_mask[w*16+g]) {
float Mmax = 0.0f, Smax = 0.0f;
/* Must compute mid/side SF and book for the whole window group */
for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) {
for (i = 0; i < sce0->ics.swb_sizes[g]; i++) {
M[i] = (sce0->coeffs[start+(w+w2)*128+i]
+ sce1->coeffs[start+(w+w2)*128+i]) * 0.5;
S[i] = M[i]
- sce1->coeffs[start+(w+w2)*128+i];
}
s->abs_pow34(M34, M, sce0->ics.swb_sizes[g]);
s->abs_pow34(S34, S, sce0->ics.swb_sizes[g]);
for (i = 0; i < sce0->ics.swb_sizes[g]; i++ ) {
Mmax = FFMAX(Mmax, M34[i]);
Smax = FFMAX(Smax, S34[i]);
}
}
for (sid_sf_boost = 0; sid_sf_boost < 4; sid_sf_boost++) {
float dist1 = 0.0f, dist2 = 0.0f;
int B0 = 0, B1 = 0;
int minidx;
int mididx, sididx;
int midcb, sidcb;
minidx = FFMIN(sce0->sf_idx[w*16+g], sce1->sf_idx[w*16+g]);
mididx = av_clip(minidx, 0, SCALE_MAX_POS - SCALE_DIV_512);
sididx = av_clip(minidx - sid_sf_boost * 3, 0, SCALE_MAX_POS - SCALE_DIV_512);
if (sce0->band_type[w*16+g] != NOISE_BT && sce1->band_type[w*16+g] != NOISE_BT
&& ( !ff_sfdelta_can_replace(sce0, nextband0, prev_mid, mididx, w*16+g)
|| !ff_sfdelta_can_replace(sce1, nextband1, prev_side, sididx, w*16+g))) {
/* scalefactor range violation, bad stuff, will decrease quality unacceptably */
continue;
}
midcb = find_min_book(Mmax, mididx);
sidcb = find_min_book(Smax, sididx);
/* No CB can be zero */
midcb = FFMAX(1,midcb);
sidcb = FFMAX(1,sidcb);
for (w2 = 0; w2 < sce0->ics.group_len[w]; w2++) {
FFPsyBand *band0 = &s->psy.ch[s->cur_channel+0].psy_bands[(w+w2)*16+g];
FFPsyBand *band1 = &s->psy.ch[s->cur_channel+1].psy_bands[(w+w2)*16+g];
float minthr = FFMIN(band0->threshold, band1->threshold);
int b1,b2,b3,b4;
for (i = 0; i < sce0->ics.swb_sizes[g]; i++) {
M[i] = (sce0->coeffs[start+(w+w2)*128+i]
+ sce1->coeffs[start+(w+w2)*128+i]) * 0.5;
S[i] = M[i]
- sce1->coeffs[start+(w+w2)*128+i];
}
s->abs_pow34(L34, sce0->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]);
s->abs_pow34(R34, sce1->coeffs+start+(w+w2)*128, sce0->ics.swb_sizes[g]);
s->abs_pow34(M34, M, sce0->ics.swb_sizes[g]);
s->abs_pow34(S34, S, sce0->ics.swb_sizes[g]);
dist1 += quantize_band_cost(s, &sce0->coeffs[start + (w+w2)*128],
L34,
sce0->ics.swb_sizes[g],
sce0->sf_idx[w*16+g],
sce0->band_type[w*16+g],
lambda / band0->threshold, INFINITY, &b1, NULL, 0);
dist1 += quantize_band_cost(s, &sce1->coeffs[start + (w+w2)*128],
R34,
sce1->ics.swb_sizes[g],
sce1->sf_idx[w*16+g],
sce1->band_type[w*16+g],
lambda / band1->threshold, INFINITY, &b2, NULL, 0);
dist2 += quantize_band_cost(s, M,
M34,
sce0->ics.swb_sizes[g],
//.........这里部分代码省略.........
示例10: search_for_quantizers_anmr
static void search_for_quantizers_anmr(AVCodecContext *avctx, AACEncContext *s,
SingleChannelElement *sce,
const float lambda)
{
int q, w, w2, g, start = 0;
int i, j;
int idx;
TrellisPath paths[TRELLIS_STAGES][TRELLIS_STATES];
int bandaddr[TRELLIS_STAGES];
int minq;
float mincost;
float q0f = FLT_MAX, q1f = 0.0f, qnrgf = 0.0f;
int q0, q1, qcnt = 0;
for (i = 0; i < 1024; i++) {
float t = fabsf(sce->coeffs[i]);
if (t > 0.0f) {
q0f = FFMIN(q0f, t);
q1f = FFMAX(q1f, t);
qnrgf += t*t;
qcnt++;
}
}
if (!qcnt) {
memset(sce->sf_idx, 0, sizeof(sce->sf_idx));
memset(sce->zeroes, 1, sizeof(sce->zeroes));
return;
}
//minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped
q0 = av_clip(coef2minsf(q0f), 0, SCALE_MAX_POS-1);
//maximum scalefactor index is when maximum coefficient after quantizing is still not zero
q1 = av_clip(coef2maxsf(q1f), 1, SCALE_MAX_POS);
if (q1 - q0 > 60) {
int q0low = q0;
int q1high = q1;
//minimum scalefactor index is when maximum nonzero coefficient after quantizing is not clipped
int qnrg = av_clip_uint8(log2f(sqrtf(qnrgf/qcnt))*4 - 31 + SCALE_ONE_POS - SCALE_DIV_512);
q1 = qnrg + 30;
q0 = qnrg - 30;
if (q0 < q0low) {
q1 += q0low - q0;
q0 = q0low;
} else if (q1 > q1high) {
q0 -= q1 - q1high;
q1 = q1high;
}
}
// q0 == q1 isn't really a legal situation
if (q0 == q1) {
// the following is indirect but guarantees q1 != q0 && q1 near q0
q1 = av_clip(q0+1, 1, SCALE_MAX_POS);
q0 = av_clip(q1-1, 0, SCALE_MAX_POS - 1);
}
for (i = 0; i < TRELLIS_STATES; i++) {
paths[0][i].cost = 0.0f;
paths[0][i].prev = -1;
}
for (j = 1; j < TRELLIS_STAGES; j++) {
for (i = 0; i < TRELLIS_STATES; i++) {
paths[j][i].cost = INFINITY;
paths[j][i].prev = -2;
}
}
idx = 1;
s->abs_pow34(s->scoefs, sce->coeffs, 1024);
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
start = w*128;
for (g = 0; g < sce->ics.num_swb; g++) {
const float *coefs = &sce->coeffs[start];
float qmin, qmax;
int nz = 0;
bandaddr[idx] = w * 16 + g;
qmin = INT_MAX;
qmax = 0.0f;
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
FFPsyBand *band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
if (band->energy <= band->threshold || band->threshold == 0.0f) {
sce->zeroes[(w+w2)*16+g] = 1;
continue;
}
sce->zeroes[(w+w2)*16+g] = 0;
nz = 1;
for (i = 0; i < sce->ics.swb_sizes[g]; i++) {
float t = fabsf(coefs[w2*128+i]);
if (t > 0.0f)
qmin = FFMIN(qmin, t);
qmax = FFMAX(qmax, t);
}
}
if (nz) {
int minscale, maxscale;
float minrd = INFINITY;
float maxval;
//minimum scalefactor index is when minimum nonzero coefficient after quantizing is not clipped
minscale = coef2minsf(qmin);
//maximum scalefactor index is when maximum coefficient after quantizing is still not zero
//.........这里部分代码省略.........
示例11: search_for_pns
static void search_for_pns(AACEncContext *s, AVCodecContext *avctx, SingleChannelElement *sce)
{
FFPsyBand *band;
int w, g, w2, i;
int wlen = 1024 / sce->ics.num_windows;
int bandwidth, cutoff;
float *PNS = &s->scoefs[0*128], *PNS34 = &s->scoefs[1*128];
float *NOR34 = &s->scoefs[3*128];
uint8_t nextband[128];
const float lambda = s->lambda;
const float freq_mult = avctx->sample_rate*0.5f/wlen;
const float thr_mult = NOISE_LAMBDA_REPLACE*(100.0f/lambda);
const float spread_threshold = FFMIN(0.75f, NOISE_SPREAD_THRESHOLD*FFMAX(0.5f, lambda/100.f));
const float dist_bias = av_clipf(4.f * 120 / lambda, 0.25f, 4.0f);
const float pns_transient_energy_r = FFMIN(0.7f, lambda / 140.f);
int refbits = avctx->bit_rate * 1024.0 / avctx->sample_rate
/ ((avctx->flags & CODEC_FLAG_QSCALE) ? 2.0f : avctx->channels)
* (lambda / 120.f);
/** Keep this in sync with twoloop's cutoff selection */
float rate_bandwidth_multiplier = 1.5f;
int prev = -1000, prev_sf = -1;
int frame_bit_rate = (avctx->flags & CODEC_FLAG_QSCALE)
? (refbits * rate_bandwidth_multiplier * avctx->sample_rate / 1024)
: (avctx->bit_rate / avctx->channels);
frame_bit_rate *= 1.15f;
if (avctx->cutoff > 0) {
bandwidth = avctx->cutoff;
} else {
bandwidth = FFMAX(3000, AAC_CUTOFF_FROM_BITRATE(frame_bit_rate, 1, avctx->sample_rate));
}
cutoff = bandwidth * 2 * wlen / avctx->sample_rate;
memcpy(sce->band_alt, sce->band_type, sizeof(sce->band_type));
ff_init_nextband_map(sce, nextband);
for (w = 0; w < sce->ics.num_windows; w += sce->ics.group_len[w]) {
int wstart = w*128;
for (g = 0; g < sce->ics.num_swb; g++) {
int noise_sfi;
float dist1 = 0.0f, dist2 = 0.0f, noise_amp;
float pns_energy = 0.0f, pns_tgt_energy, energy_ratio, dist_thresh;
float sfb_energy = 0.0f, threshold = 0.0f, spread = 2.0f;
float min_energy = -1.0f, max_energy = 0.0f;
const int start = wstart+sce->ics.swb_offset[g];
const float freq = (start-wstart)*freq_mult;
const float freq_boost = FFMAX(0.88f*freq/NOISE_LOW_LIMIT, 1.0f);
if (freq < NOISE_LOW_LIMIT || (start-wstart) >= cutoff) {
if (!sce->zeroes[w*16+g])
prev_sf = sce->sf_idx[w*16+g];
continue;
}
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
band = &s->psy.ch[s->cur_channel].psy_bands[(w+w2)*16+g];
sfb_energy += band->energy;
spread = FFMIN(spread, band->spread);
threshold += band->threshold;
if (!w2) {
min_energy = max_energy = band->energy;
} else {
min_energy = FFMIN(min_energy, band->energy);
max_energy = FFMAX(max_energy, band->energy);
}
}
/* Ramps down at ~8000Hz and loosens the dist threshold */
dist_thresh = av_clipf(2.5f*NOISE_LOW_LIMIT/freq, 0.5f, 2.5f) * dist_bias;
/* PNS is acceptable when all of these are true:
* 1. high spread energy (noise-like band)
* 2. near-threshold energy (high PE means the random nature of PNS content will be noticed)
* 3. on short window groups, all windows have similar energy (variations in energy would be destroyed by PNS)
*
* At this stage, point 2 is relaxed for zeroed bands near the noise threshold (hole avoidance is more important)
*/
if ((!sce->zeroes[w*16+g] && !ff_sfdelta_can_remove_band(sce, nextband, prev_sf, w*16+g)) ||
((sce->zeroes[w*16+g] || !sce->band_alt[w*16+g]) && sfb_energy < threshold*sqrtf(1.0f/freq_boost)) || spread < spread_threshold ||
(!sce->zeroes[w*16+g] && sce->band_alt[w*16+g] && sfb_energy > threshold*thr_mult*freq_boost) ||
min_energy < pns_transient_energy_r * max_energy ) {
sce->pns_ener[w*16+g] = sfb_energy;
if (!sce->zeroes[w*16+g])
prev_sf = sce->sf_idx[w*16+g];
continue;
}
pns_tgt_energy = sfb_energy*FFMIN(1.0f, spread*spread);
noise_sfi = av_clip(roundf(log2f(pns_tgt_energy)*2), -100, 155); /* Quantize */
noise_amp = -ff_aac_pow2sf_tab[noise_sfi + POW_SF2_ZERO]; /* Dequantize */
if (prev != -1000) {
int noise_sfdiff = noise_sfi - prev + SCALE_DIFF_ZERO;
if (noise_sfdiff < 0 || noise_sfdiff > 2*SCALE_MAX_DIFF) {
if (!sce->zeroes[w*16+g])
prev_sf = sce->sf_idx[w*16+g];
continue;
}
}
for (w2 = 0; w2 < sce->ics.group_len[w]; w2++) {
//.........这里部分代码省略.........
示例12: x8_setup_spatial_compensation
/**
Collect statistics and prepare the edge pixels required by the other spatial compensation functions.
* @param src pointer to the beginning of the processed block
* @param dst pointer to emu_edge, edge pixels are stored the way other compensation routines do.
* @param linesize byte offset between 2 vertical pixels in the source image
* @param range pointer to the variable where the edge pixel range is to be stored (max-min values)
* @param psum pointer to the variable where the edge pixel sum is to be stored
* @param edges Informs this routine that the block is on an image border, so it has to interpolate the missing edge pixels.
and some of the edge pixels should be interpolated, the flag has the following meaning:
1 - mb_x==0 - first block in the row, interpolate area #1,#2,#3;
2 - mb_y==0 - first row, interpolate area #3,#4,#5,#6;
note: 1|2 - mb_x==mb_y==0 - first block, use 0x80 value for all areas;
4 - mb_x>= (mb_width-1) last block in the row, interpolate area #5;
*/
static void x8_setup_spatial_compensation(uint8_t *src, uint8_t *dst, int linesize,
int * range, int * psum, int edges){
uint8_t * ptr;
int sum;
int i;
int min_pix,max_pix;
uint8_t c;
if((edges&3)==3){
*psum=0x80*(8+1+8+2);
*range=0;
memset(dst,0x80,16+1+16+8);
//this triggers flat_dc for sure.
//flat_dc avoids all (other) prediction modes, but requires dc_level decoding.
return;
}
min_pix=256;
max_pix=-1;
sum=0;
if(!(edges&1)){//(mb_x!=0)//there is previous block on this row
ptr=src-1;//left column, area 2
for(i=7;i>=0;i--){
c=*(ptr-1);//area1, same mb as area2, no need to check
dst[area1+i]=c;
c=*(ptr);
sum+=c;
min_pix=FFMIN(min_pix,c);
max_pix=FFMAX(max_pix,c);
dst[area2+i]=c;
ptr+=linesize;
}
}
if(!(edges&2)){ //(mb_y!=0)//there is row above
ptr=src-linesize;//top line
for(i=0;i<8;i++){
c=*(ptr+i);
sum+=c;
min_pix=FFMIN(min_pix, c);
max_pix=FFMAX(max_pix, c);
}
if(edges&4){//last block on the row?
memset(dst+area5,c,8);//set with last pixel fr
memcpy(dst+area4, ptr, 8);
}else{
memcpy(dst+area4, ptr, 16);//both area4 and 5
}
memcpy(dst+area6, ptr-linesize, 8);//area6 always present in the above block
}
//now calculate the stuff we need
if(edges&3){//mb_x==0 || mb_y==0){
int avg=(sum+4)>>3;
if(edges&1){ //(mb_x==0) {//implies mb_y!=0
memset(dst+area1,avg,8+8+1);//areas 1,2 and 3 are averaged
}else{//implies y==0 x!=0
memset(dst+area3,avg, 1+16+8);//areas 3, 4,5,6
}
sum+=avg*9;
}else{
示例13: decode_frame
static int decode_frame(AVCodecContext *avctx,
void *data, int *got_frame,
AVPacket *avpkt)
{
AnsiContext *s = avctx->priv_data;
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
const uint8_t *buf_end = buf+buf_size;
int ret, i, count;
if ((ret = ff_reget_buffer(avctx, s->frame)) < 0)
return ret;
if (!avctx->frame_number) {
for (i=0; i<avctx->height; i++)
memset(s->frame->data[0]+ i*s->frame->linesize[0], 0, avctx->width);
memset(s->frame->data[1], 0, AVPALETTE_SIZE);
}
s->frame->pict_type = AV_PICTURE_TYPE_I;
s->frame->palette_has_changed = 1;
set_palette((uint32_t *)s->frame->data[1]);
if (!s->first_frame) {
erase_screen(avctx);
s->first_frame = 1;
}
while(buf < buf_end) {
switch(s->state) {
case STATE_NORMAL:
switch (buf[0]) {
case 0x00: //NUL
case 0x07: //BEL
case 0x1A: //SUB
/* ignore */
break;
case 0x08: //BS
s->x = FFMAX(s->x - 1, 0);
break;
case 0x09: //HT
i = s->x / FONT_WIDTH;
count = ((i + 8) & ~7) - i;
for (i = 0; i < count; i++)
draw_char(avctx, ' ');
break;
case 0x0A: //LF
hscroll(avctx);
case 0x0D: //CR
s->x = 0;
break;
case 0x0C: //FF
erase_screen(avctx);
break;
case 0x1B: //ESC
s->state = STATE_ESCAPE;
break;
default:
draw_char(avctx, buf[0]);
}
break;
case STATE_ESCAPE:
if (buf[0] == '[') {
s->state = STATE_CODE;
s->nb_args = 0;
s->args[0] = -1;
} else {
s->state = STATE_NORMAL;
draw_char(avctx, 0x1B);
continue;
}
break;
case STATE_CODE:
switch(buf[0]) {
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9':
if (s->nb_args < MAX_NB_ARGS)
s->args[s->nb_args] = FFMAX(s->args[s->nb_args], 0) * 10 + buf[0] - '0';
break;
case ';':
s->nb_args++;
if (s->nb_args < MAX_NB_ARGS)
s->args[s->nb_args] = 0;
break;
case 'M':
s->state = STATE_MUSIC_PREAMBLE;
break;
case '=': case '?':
/* ignore */
break;
default:
if (s->nb_args > MAX_NB_ARGS)
av_log(avctx, AV_LOG_WARNING, "args overflow (%i)\n", s->nb_args);
if (s->nb_args < MAX_NB_ARGS && s->args[s->nb_args] >= 0)
s->nb_args++;
if ((ret = execute_code(avctx, buf[0])) < 0)
return ret;
s->state = STATE_NORMAL;
}
break;
case STATE_MUSIC_PREAMBLE:
if (buf[0] == 0x0E || buf[0] == 0x1B)
//.........这里部分代码省略.........
示例14: execute_code
/**
* Execute ANSI escape code
* @return 0 on success, negative on error
*/
static int execute_code(AVCodecContext * avctx, int c)
{
AnsiContext *s = avctx->priv_data;
int ret, i, width, height;
switch(c) {
case 'A': //Cursor Up
s->y = FFMAX(s->y - (s->nb_args > 0 ? s->args[0]*s->font_height : s->font_height), 0);
break;
case 'B': //Cursor Down
s->y = FFMIN(s->y + (s->nb_args > 0 ? s->args[0]*s->font_height : s->font_height), avctx->height - s->font_height);
break;
case 'C': //Cursor Right
s->x = FFMIN(s->x + (s->nb_args > 0 ? s->args[0]*FONT_WIDTH : FONT_WIDTH), avctx->width - FONT_WIDTH);
break;
case 'D': //Cursor Left
s->x = FFMAX(s->x - (s->nb_args > 0 ? s->args[0]*FONT_WIDTH : FONT_WIDTH), 0);
break;
case 'H': //Cursor Position
case 'f': //Horizontal and Vertical Position
s->y = s->nb_args > 0 ? av_clip((s->args[0] - 1)*s->font_height, 0, avctx->height - s->font_height) : 0;
s->x = s->nb_args > 1 ? av_clip((s->args[1] - 1)*FONT_WIDTH, 0, avctx->width - FONT_WIDTH) : 0;
break;
case 'h': //set creen mode
case 'l': //reset screen mode
if (s->nb_args < 2)
s->args[0] = DEFAULT_SCREEN_MODE;
width = avctx->width;
height = avctx->height;
switch(s->args[0]) {
case 0: case 1: case 4: case 5: case 13: case 19: //320x200 (25 rows)
s->font = avpriv_cga_font;
s->font_height = 8;
width = 40<<3;
height = 25<<3;
break;
case 2: case 3: //640x400 (25 rows)
s->font = avpriv_vga16_font;
s->font_height = 16;
width = 80<<3;
height = 25<<4;
break;
case 6: case 14: //640x200 (25 rows)
s->font = avpriv_cga_font;
s->font_height = 8;
width = 80<<3;
height = 25<<3;
break;
case 7: //set line wrapping
break;
case 15: case 16: //640x350 (43 rows)
s->font = avpriv_cga_font;
s->font_height = 8;
width = 80<<3;
height = 43<<3;
break;
case 17: case 18: //640x480 (60 rows)
s->font = avpriv_cga_font;
s->font_height = 8;
width = 80<<3;
height = 60<<4;
break;
default:
avpriv_request_sample(avctx, "Unsupported screen mode");
}
if (width != avctx->width || height != avctx->height) {
av_frame_unref(s->frame);
avcodec_set_dimensions(avctx, width, height);
if ((ret = ff_get_buffer(avctx, s->frame,
AV_GET_BUFFER_FLAG_REF)) < 0)
return ret;
s->frame->pict_type = AV_PICTURE_TYPE_I;
s->frame->palette_has_changed = 1;
set_palette((uint32_t *)s->frame->data[1]);
erase_screen(avctx);
} else if (c == 'l') {
erase_screen(avctx);
}
break;
case 'J': //Erase in Page
switch (s->args[0]) {
case 0:
erase_line(avctx, s->x, avctx->width - s->x);
if (s->y < avctx->height - s->font_height)
memset(s->frame->data[0] + (s->y + s->font_height)*s->frame->linesize[0],
DEFAULT_BG_COLOR, (avctx->height - s->y - s->font_height)*s->frame->linesize[0]);
break;
case 1:
erase_line(avctx, 0, s->x);
if (s->y > 0)
memset(s->frame->data[0], DEFAULT_BG_COLOR, s->y * s->frame->linesize[0]);
break;
case 2:
erase_screen(avctx);
}
break;
case 'K': //Erase in Line
//.........这里部分代码省略.........
示例15: config_input
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
BoxBlurContext *boxblur = ctx->priv;
const AVPixFmtDescriptor *desc = &av_pix_fmt_descriptors[inlink->format];
int w = inlink->w, h = inlink->h;
int cw, ch;
double var_values[VARS_NB], res;
char *expr;
int ret;
if (!(boxblur->temp[0] = av_malloc(FFMAX(w, h))) ||
!(boxblur->temp[1] = av_malloc(FFMAX(w, h))))
return AVERROR(ENOMEM);
boxblur->hsub = desc->log2_chroma_w;
boxblur->vsub = desc->log2_chroma_h;
var_values[VAR_W] = inlink->w;
var_values[VAR_H] = inlink->h;
var_values[VAR_CW] = cw = w>>boxblur->hsub;
var_values[VAR_CH] = ch = h>>boxblur->vsub;
var_values[VAR_HSUB] = 1<<boxblur->hsub;
var_values[VAR_VSUB] = 1<<boxblur->vsub;
#define EVAL_RADIUS_EXPR(comp) \
expr = boxblur->comp##_radius_expr; \
ret = av_expr_parse_and_eval(&res, expr, var_names, var_values, \
NULL, NULL, NULL, NULL, NULL, 0, ctx); \
boxblur->comp##_param.radius = res; \
if (ret < 0) { \
av_log(NULL, AV_LOG_ERROR, \
"Error when evaluating " #comp " radius expression '%s'\n", expr); \
return ret; \
}
EVAL_RADIUS_EXPR(luma);
EVAL_RADIUS_EXPR(chroma);
EVAL_RADIUS_EXPR(alpha);
av_log(ctx, AV_LOG_INFO,
"luma_radius:%d luma_power:%d "
"chroma_radius:%d chroma_power:%d "
"alpha_radius:%d alpha_power:%d "
"w:%d chroma_w:%d h:%d chroma_h:%d\n",
boxblur->luma_param .radius, boxblur->luma_param .power,
boxblur->chroma_param.radius, boxblur->chroma_param.power,
boxblur->alpha_param .radius, boxblur->alpha_param .power,
w, cw, h, ch);
#define CHECK_RADIUS_VAL(w_, h_, comp) \
if (boxblur->comp##_param.radius < 0 || \
2*boxblur->comp##_param.radius > FFMIN(w_, h_)) { \
av_log(ctx, AV_LOG_ERROR, \
"Invalid " #comp " radius value %d, must be >= 0 and <= %d\n", \
boxblur->comp##_param.radius, FFMIN(w_, h_)/2); \
return AVERROR(EINVAL); \
}
CHECK_RADIUS_VAL(w, h, luma);
CHECK_RADIUS_VAL(cw, ch, chroma);
CHECK_RADIUS_VAL(w, h, alpha);
boxblur->radius[Y] = boxblur->luma_param.radius;
boxblur->radius[U] = boxblur->radius[V] = boxblur->chroma_param.radius;
boxblur->radius[A] = boxblur->alpha_param.radius;
boxblur->power[Y] = boxblur->luma_param.power;
boxblur->power[U] = boxblur->power[V] = boxblur->chroma_param.power;
boxblur->power[A] = boxblur->alpha_param.power;
return 0;
}