本文整理汇总了C++中decoder_NewPicture函数的典型用法代码示例。如果您正苦于以下问题:C++ decoder_NewPicture函数的具体用法?C++ decoder_NewPicture怎么用?C++ decoder_NewPicture使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了decoder_NewPicture函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: gst_vlc_picture_plane_allocator_hold
bool gst_vlc_picture_plane_allocator_hold(
GstVlcPicturePlaneAllocator *p_allocator, GstBuffer *p_buffer )
{
picture_t* p_pic = NULL;
decoder_t* p_dec = p_allocator->p_dec;
GstVlcPicturePlane *p_mem;
int i_plane;
if( !decoder_UpdateVideoFormat( p_dec ) )
p_pic = decoder_NewPicture( p_dec );
if( !p_pic )
{
msg_Err( p_allocator->p_dec, "failed to acquire picture from vout" );
return false;
}
for( i_plane = 0; i_plane < p_pic->i_planes; i_plane++ )
{
p_mem = (GstVlcPicturePlane*) gst_buffer_peek_memory ( p_buffer,
i_plane );
p_mem->p_pic = p_pic;
p_mem->p_plane = &p_pic->p[ i_plane ];
}
return true;
}
示例2: picture_Release
/****************************************************************************
* DecodeBlock: the whole thing
****************************************************************************
* This function must be fed with a complete compressed frame.
****************************************************************************/
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
decoder_sys_t *p_sys = p_dec->p_sys;
block_t *p_block;
if( !pp_block || !*pp_block ) return NULL;
p_block = *pp_block;
// create new picture
if( p_sys->p_pic != NULL )
picture_Release( p_sys->p_pic );
p_sys->p_pic = decoder_NewPicture( p_dec );
p_sys->p_pic->b_force = true;
p_sys->p_pic->p->i_pitch = p_dec->p_sys->i_pitch;
p_sys->p_pic->date = p_block->i_pts > 0 ? p_block->i_pts : p_block->i_dts;
// lock input and copy to picture
p_sys->p_pic->p->p_pixels = p_sys->pf_lock( p_dec->p_sys->p_data );
// unlock input
p_sys->pf_unlock( p_dec->p_sys->p_data );
block_Release( *pp_block ); *pp_block = NULL;
return p_sys->p_pic;
}
示例3: theora_decode_packetin
/*****************************************************************************
* DecodePacket: decodes a Theora packet.
*****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
decoder_sys_t *p_sys = p_dec->p_sys;
picture_t *p_pic;
yuv_buffer yuv;
theora_decode_packetin( &p_sys->td, p_oggpacket );
/* Check for keyframe */
if( !(p_oggpacket->packet[0] & 0x80) /* data packet */ &&
!(p_oggpacket->packet[0] & 0x40) /* intra frame */ )
p_sys->b_decoded_first_keyframe = true;
/* If we haven't seen a single keyframe yet, don't let Theora decode
* anything, otherwise we'll get display artifacts. (This is impossible
* in the general case, but can happen if e.g. we play a network stream
* using a timed URL, such that the server doesn't start the video with a
* keyframe). */
if( p_sys->b_decoded_first_keyframe )
theora_decode_YUVout( &p_sys->td, &yuv );
else
return NULL;
/* Get a new picture */
p_pic = decoder_NewPicture( p_dec );
if( !p_pic ) return NULL;
theora_CopyPicture( p_pic, &yuv );
p_pic->date = p_sys->i_pts;
return p_pic;
}
示例4: decoder_NewPicture
/****************************************************************************
* DecodeBlock: the whole thing
****************************************************************************/
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
decoder_sys_t *p_sys = (decoder_sys_t*) p_dec->p_sys;
picture_t *p_pic;
if( pp_block == NULL || !*pp_block ) return NULL;
p_pic = decoder_NewPicture( p_dec );
if( p_pic == NULL )
{
msg_Err( p_dec, "cannot get picture" );
goto error;
}
if( p_sys->b_reload && (mdate() >= p_sys->i_next) )
{
var_TriggerCallback( p_dec, "fake-file" );
/* next period */
p_sys->i_next = (mtime_t)(p_sys->i_reload + mdate());
}
vlc_mutex_lock( &p_dec->p_sys->lock );
picture_Copy( p_pic, p_dec->p_sys->p_image );
vlc_mutex_unlock( &p_dec->p_sys->lock );
p_pic->date = (*pp_block)->i_pts;
error:
block_Release( *pp_block );
*pp_block = NULL;
return p_pic;
}
示例5: decoder_NewPicture
/*****************************************************************************
* DecodeFrame: decodes a video frame.
*****************************************************************************/
static picture_t *DecodeFrame( decoder_t *p_dec, block_t *p_block )
{
decoder_sys_t *p_sys = p_dec->p_sys;
picture_t *p_pic;
/* Get a new picture */
p_pic = decoder_NewPicture( p_dec );
if( !p_pic )
{
block_Release( p_block );
return NULL;
}
FillPicture( p_dec, p_block, p_pic );
p_pic->date = date_Get( &p_sys->pts );
if( p_block->i_flags & BLOCK_FLAG_INTERLACED_MASK )
{
p_pic->b_progressive = false;
p_pic->i_nb_fields = 2;
if( p_block->i_flags & BLOCK_FLAG_TOP_FIELD_FIRST )
p_pic->b_top_field_first = true;
else
p_pic->b_top_field_first = false;
}
else
p_pic->b_progressive = true;
block_Release( p_block );
return p_pic;
}
示例6: av_q2d
/* Returns a new picture buffer */
static inline picture_t *ffmpeg_NewPictBuf( decoder_t *p_dec,
AVCodecContext *p_context )
{
decoder_sys_t *p_sys = p_dec->p_sys;
p_dec->fmt_out.video.i_width = p_context->width;
p_dec->fmt_out.video.i_height = p_context->height;
if( !p_context->width || !p_context->height )
{
return NULL; /* invalid display size */
}
if( !p_sys->p_va && GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) )
{
/* we are doomed, but not really, because most codecs set their pix_fmt
* much later
* FIXME does it make sense here ? */
p_dec->fmt_out.video.i_chroma = VLC_CODEC_I420;
}
p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;
/* If an aspect-ratio was specified in the input format then force it */
if( p_dec->fmt_in.video.i_aspect )
{
p_dec->fmt_out.video.i_aspect = p_dec->fmt_in.video.i_aspect;
}
else
{
p_dec->fmt_out.video.i_aspect =
VOUT_ASPECT_FACTOR * ( av_q2d(p_context->sample_aspect_ratio) *
p_context->width / p_context->height );
p_dec->fmt_out.video.i_sar_num = p_context->sample_aspect_ratio.num;
p_dec->fmt_out.video.i_sar_den = p_context->sample_aspect_ratio.den;
if( p_dec->fmt_out.video.i_aspect == 0 )
{
p_dec->fmt_out.video.i_aspect =
VOUT_ASPECT_FACTOR * p_context->width / p_context->height;
}
}
if( p_dec->fmt_in.video.i_frame_rate > 0 &&
p_dec->fmt_in.video.i_frame_rate_base > 0 )
{
p_dec->fmt_out.video.i_frame_rate =
p_dec->fmt_in.video.i_frame_rate;
p_dec->fmt_out.video.i_frame_rate_base =
p_dec->fmt_in.video.i_frame_rate_base;
}
else if( p_context->time_base.num > 0 && p_context->time_base.den > 0 )
{
p_dec->fmt_out.video.i_frame_rate = p_context->time_base.den;
p_dec->fmt_out.video.i_frame_rate_base = p_context->time_base.num;
}
return decoder_NewPicture( p_dec );
}
示例7: decoder_NewPicture
/* Returns a new picture buffer */
static inline picture_t *ffmpeg_NewPictBuf( decoder_t *p_dec,
AVCodecContext *p_context )
{
bool hwaccel = p_dec->p_sys->p_va != NULL;
if (lavc_UpdateVideoFormat(p_dec, p_context, hwaccel))
return NULL;
return decoder_NewPicture( p_dec );
}
示例8: decoder_NewPicture
/*****************************************************************************
* GetNewPicture: Get a new picture from the vout and set the buf struct
*****************************************************************************/
static picture_t *GetNewPicture( decoder_t *p_dec )
{
decoder_sys_t *p_sys = p_dec->p_sys;
picture_t *p_pic;
p_dec->fmt_out.video.i_width = p_sys->p_info->sequence->width;
p_dec->fmt_out.video.i_visible_width =
p_sys->p_info->sequence->picture_width;
p_dec->fmt_out.video.i_height = p_sys->p_info->sequence->height;
p_dec->fmt_out.video.i_visible_height =
p_sys->p_info->sequence->picture_height;
p_dec->fmt_out.video.i_sar_num = p_sys->i_sar_num;
p_dec->fmt_out.video.i_sar_den = p_sys->i_sar_den;
if( p_sys->p_info->sequence->frame_period > 0 )
{
p_dec->fmt_out.video.i_frame_rate =
(uint32_t)( (uint64_t)1001000000 * 27 /
p_sys->p_info->sequence->frame_period );
p_dec->fmt_out.video.i_frame_rate_base = 1001;
}
p_dec->fmt_out.i_codec =
( p_sys->p_info->sequence->chroma_height <
p_sys->p_info->sequence->height ) ?
VLC_CODEC_I420 : VLC_CODEC_I422;
/* Get a new picture */
if( decoder_UpdateVideoFormat( p_dec ) )
return NULL;
p_pic = decoder_NewPicture( p_dec );
if( p_pic == NULL )
return NULL;
p_pic->b_progressive = p_sys->p_info->current_picture != NULL ?
p_sys->p_info->current_picture->flags & PIC_FLAG_PROGRESSIVE_FRAME : 1;
p_pic->b_top_field_first = p_sys->p_info->current_picture != NULL ?
p_sys->p_info->current_picture->flags & PIC_FLAG_TOP_FIELD_FIRST : 1;
p_pic->i_nb_fields = p_sys->p_info->current_picture != NULL ?
p_sys->p_info->current_picture->nb_fields : 2;
return p_pic;
}
示例9: DecodeFrame
/*****************************************************************************
* DecodeFrame: decodes a video frame.
*****************************************************************************/
static int DecodeFrame( decoder_t *p_dec, block_t *p_block )
{
if( p_block == NULL ) /* No Drain */
return VLCDEC_SUCCESS;
p_block = DecodeBlock( p_dec, p_block );
if( p_block == NULL )
return VLCDEC_SUCCESS;
decoder_sys_t *p_sys = p_dec->p_sys;
/* Get a new picture */
picture_t *p_pic = NULL;
if( !decoder_UpdateVideoFormat( p_dec ) )
p_pic = decoder_NewPicture( p_dec );
if( p_pic == NULL )
{
block_Release( p_block );
return VLCDEC_SUCCESS;
}
FillPicture( p_dec, p_block, p_pic );
/* Date management: 1 frame per packet */
p_pic->date = date_Get( &p_dec->p_sys->pts );
date_Increment( &p_sys->pts, 1 );
if( p_block->i_flags & BLOCK_FLAG_INTERLACED_MASK )
{
p_pic->b_progressive = false;
p_pic->i_nb_fields = (p_block->i_flags & BLOCK_FLAG_SINGLE_FIELD) ? 1 : 2;
if( p_block->i_flags & BLOCK_FLAG_TOP_FIELD_FIRST )
p_pic->b_top_field_first = true;
else
p_pic->b_top_field_first = false;
}
else
p_pic->b_progressive = true;
block_Release( p_block );
decoder_QueueVideo( p_dec, p_pic );
return VLCDEC_SUCCESS;
}
示例10: decoder_NewPicture
/*****************************************************************************
* DecodePacket: decodes a Theora packet.
*****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
decoder_sys_t *p_sys = p_dec->p_sys;
picture_t *p_pic;
th_ycbcr_buffer ycbcr;
/* TODO: Implement _granpos (3rd parameter here) and add the
* call to TH_DECCTL_SET_GRANDPOS after seek */
/* TODO: If the return is TH_DUPFRAME, we don't need to display a new
* frame, but we do need to keep displaying the previous one. */
if (th_decode_packetin( p_sys->tcx, p_oggpacket, NULL ) < 0)
return NULL; /* bad packet */
/* Check for keyframe */
if( !(p_oggpacket->packet[0] & 0x80) /* data packet */ &&
!(p_oggpacket->packet[0] & 0x40) /* intra frame */ )
p_sys->b_decoded_first_keyframe = true;
/* If we haven't seen a single keyframe yet, don't let Theora decode
* anything, otherwise we'll get display artifacts. (This is impossible
* in the general case, but can happen if e.g. we play a network stream
* using a timed URL, such that the server doesn't start the video with a
* keyframe). */
if( !p_sys->b_decoded_first_keyframe )
return NULL; /* Wait until we've decoded the first keyframe */
if( th_decode_ycbcr_out( p_sys->tcx, ycbcr ) ) /* returns 0 on success */
return NULL;
/* Get a new picture */
if( decoder_UpdateVideoFormat( p_dec ) )
return NULL;
p_pic = decoder_NewPicture( p_dec );
if( !p_pic ) return NULL;
theora_CopyPicture( p_pic, ycbcr );
p_pic->date = p_sys->i_pts;
p_pic->b_progressive = true;
return p_pic;
}
示例11: decoder_NewPicture
/*****************************************************************************
* DecodePacket: decodes a Daala packet.
*****************************************************************************/
static picture_t *DecodePacket( decoder_t *p_dec, ogg_packet *p_oggpacket )
{
decoder_sys_t *p_sys = p_dec->p_sys;
picture_t *p_pic;
od_img ycbcr;
if (daala_decode_packet_in( p_sys->dcx, &ycbcr, p_oggpacket ) < 0)
return NULL; /* bad packet */
/* Check for keyframe */
if( daala_packet_iskeyframe( p_oggpacket->packet, p_oggpacket->bytes ) )
p_sys->b_decoded_first_keyframe = true;
/* Get a new picture */
p_pic = decoder_NewPicture( p_dec );
if( !p_pic ) return NULL;
daala_CopyPicture( p_pic, &ycbcr );
p_pic->date = p_sys->i_pts;
return p_pic;
}
示例12: ourCallback
static BC_STATUS ourCallback(void *shnd, uint32_t width, uint32_t height, uint32_t stride, void *pOut)
{
VLC_UNUSED(width); VLC_UNUSED(height); VLC_UNUSED(stride);
decoder_t *p_dec = (decoder_t *)shnd;
BC_DTS_PROC_OUT *proc_out = p_dec->p_sys->proc_out;
BC_DTS_PROC_OUT *proc_in = (BC_DTS_PROC_OUT*)pOut;
/* Direct Rendering */
/* Do not allocate for the second-field in the pair, in interlaced */
if( !(proc_in->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) ||
!(proc_in->PicInfo.flags & VDEC_FLAG_FIELDPAIR) )
p_dec->p_sys->p_pic = decoder_NewPicture( p_dec );
/* */
picture_t *p_pic = p_dec->p_sys->p_pic;
if( !p_pic )
return BC_STS_ERROR;
/* Interlacing */
p_pic->b_progressive = !(proc_in->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC);
p_pic->b_top_field_first = !(proc_in->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);
p_pic->i_nb_fields = p_pic->b_progressive? 1: 2;
/* Filling out the struct */
proc_out->Ybuff = !(proc_in->PicInfo.flags & VDEC_FLAG_FIELDPAIR) ?
&p_pic->p[0].p_pixels[0] :
&p_pic->p[0].p_pixels[p_pic->p[0].i_pitch];
proc_out->YbuffSz = 2 * p_pic->p[0].i_pitch;
proc_out->StrideSz = (proc_in->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC)?
2 * (p_pic->p[0].i_pitch/2) - p_dec->fmt_out.video.i_width:
p_pic->p[0].i_pitch/2 - p_dec->fmt_out.video.i_width;
proc_out->PoutFlags |= BC_POUT_FLAGS_STRIDE; /* Trust Stride info */
return BC_STS_SUCCESS;
}
示例13: block_Release
/****************************************************************************
* DecodeBlock: the whole thing
****************************************************************************
* This function must be fed with a complete compressed frame.
****************************************************************************/
static picture_t *DecodeBlock( decoder_t *p_dec, block_t **pp_block )
{
decoder_sys_t *p_sys = p_dec->p_sys;
block_t *p_block;
picture_t *p_pic = 0;
png_uint_32 i_width, i_height;
int i_color_type, i_interlace_type, i_compression_type, i_filter_type;
int i_bit_depth, i;
png_structp p_png;
png_infop p_info, p_end_info;
png_bytep *p_row_pointers = NULL;
if( !pp_block || !*pp_block ) return NULL;
p_block = *pp_block;
p_sys->b_error = false;
if( p_block->i_flags & BLOCK_FLAG_DISCONTINUITY )
{
block_Release( p_block ); *pp_block = NULL;
return NULL;
}
p_png = png_create_read_struct( PNG_LIBPNG_VER_STRING, 0, 0, 0 );
if( p_png == NULL )
{
block_Release( p_block ); *pp_block = NULL;
return NULL;
}
p_info = png_create_info_struct( p_png );
if( p_info == NULL )
{
png_destroy_read_struct( &p_png, NULL, NULL );
block_Release( p_block ); *pp_block = NULL;
return NULL;
}
p_end_info = png_create_info_struct( p_png );
if( p_end_info == NULL )
{
png_destroy_read_struct( &p_png, &p_info, NULL );
block_Release( p_block ); *pp_block = NULL;
return NULL;
}
/* libpng longjmp's there in case of error */
if( setjmp( png_jmpbuf( p_png ) ) )
goto error;
png_set_read_fn( p_png, (void *)p_block, user_read );
png_set_error_fn( p_png, (void *)p_dec, user_error, user_warning );
png_read_info( p_png, p_info );
if( p_sys->b_error ) goto error;
png_get_IHDR( p_png, p_info, &i_width, &i_height,
&i_bit_depth, &i_color_type, &i_interlace_type,
&i_compression_type, &i_filter_type);
if( p_sys->b_error ) goto error;
/* Set output properties */
p_dec->fmt_out.i_codec = VLC_CODEC_RGBA;
p_dec->fmt_out.video.i_visible_width = p_dec->fmt_out.video.i_width = i_width;
p_dec->fmt_out.video.i_visible_height = p_dec->fmt_out.video.i_height = i_height;
p_dec->fmt_out.video.i_sar_num = 1;
p_dec->fmt_out.video.i_sar_den = 1;
p_dec->fmt_out.video.i_rmask = 0x000000ff;
p_dec->fmt_out.video.i_gmask = 0x0000ff00;
p_dec->fmt_out.video.i_bmask = 0x00ff0000;
if( i_color_type == PNG_COLOR_TYPE_PALETTE )
png_set_palette_to_rgb( p_png );
if( i_color_type == PNG_COLOR_TYPE_GRAY ||
i_color_type == PNG_COLOR_TYPE_GRAY_ALPHA )
png_set_gray_to_rgb( p_png );
/* Strip to 8 bits per channel */
if( i_bit_depth == 16 ) png_set_strip_16( p_png );
if( png_get_valid( p_png, p_info, PNG_INFO_tRNS ) )
{
png_set_tRNS_to_alpha( p_png );
}
else if( !(i_color_type & PNG_COLOR_MASK_ALPHA) )
{
p_dec->fmt_out.i_codec = VLC_CODEC_RGB24;
}
/* Get a new picture */
p_pic = decoder_NewPicture( p_dec );
if( !p_pic ) goto error;
//.........这里部分代码省略.........
示例14: DecodeBlock
/****************************************************************************
* DecodeBlock: the whole thing
****************************************************************************
* This function must be fed with a complete image.
****************************************************************************/
static int DecodeBlock( decoder_t *p_dec, block_t *p_block )
{
decoder_sys_t *p_sys = (decoder_sys_t *) p_dec->p_sys;
picture_t *p_pic = NULL;
int32_t i_width, i_height;
RsvgHandle *rsvg = NULL;
cairo_surface_t *surface = NULL;
cairo_t *cr = NULL;
if( p_block == NULL ) /* No Drain */
return VLCDEC_SUCCESS;
if( p_block->i_flags & BLOCK_FLAG_CORRUPTED)
{
block_Release( p_block );
return VLCDEC_SUCCESS;
}
rsvg = rsvg_handle_new_from_data( p_block->p_buffer, p_block->i_buffer, NULL );
if( !rsvg )
goto done;
RsvgDimensionData dim;
rsvg_handle_get_dimensions( rsvg, &dim );
if( p_sys->f_scale > 0.0 )
{
i_width = (int32_t)(p_sys->f_scale * dim.width);
i_height = (int32_t)(p_sys->f_scale * dim.height);
}
else
{
/* Keep aspect */
if( p_sys->i_width < 0 && p_sys->i_height > 0 )
{
i_width = dim.width * p_sys->i_height / dim.height;
i_height = p_sys->i_height;
}
else if( p_sys->i_width > 0 && p_sys->i_height < 0 )
{
i_width = p_sys->i_width;
i_height = dim.height * p_sys->i_width / dim.height;
}
else if( p_sys->i_width > 0 && p_sys->i_height > 0 )
{
i_width = dim.width * p_sys->i_height / dim.height;
i_height = p_sys->i_height;
}
else
{
i_width = dim.width;
i_height = dim.height;
}
}
p_dec->fmt_out.i_codec =
p_dec->fmt_out.video.i_chroma = VLC_CODEC_BGRA;
p_dec->fmt_out.video.i_width = i_width;
p_dec->fmt_out.video.i_height = i_height;
p_dec->fmt_out.video.i_visible_width = i_width;
p_dec->fmt_out.video.i_visible_height = i_height;
p_dec->fmt_out.video.i_sar_num = 1;
p_dec->fmt_out.video.i_sar_den = 1;
p_dec->fmt_out.video.i_rmask = 0x80800000; /* Since librsvg v1.0 */
p_dec->fmt_out.video.i_gmask = 0x0000ff00;
p_dec->fmt_out.video.i_bmask = 0x000000ff;
video_format_FixRgb(&p_dec->fmt_out.video);
/* Get a new picture */
if( decoder_UpdateVideoFormat( p_dec ) )
goto done;
p_pic = decoder_NewPicture( p_dec );
if( !p_pic )
goto done;
/* NOTE: Do not use the stride calculation from cairo, because it is wrong:
* stride = cairo_format_stride_for_width(CAIRO_FORMAT_ARGB32, dim.width);
* Use the stride from VLC its picture_t::p[0].i_pitch, which is correct.
*/
memset(p_pic->p[0].p_pixels, 0, p_pic->p[0].i_pitch * p_pic->p[0].i_lines);
surface = cairo_image_surface_create_for_data( p_pic->p->p_pixels,
CAIRO_FORMAT_ARGB32,
i_width, i_height,
p_pic->p[0].i_pitch );
if( !surface )
{
picture_Release( p_pic );
p_pic = NULL;
goto done;
}
/* Decode picture */
cr = cairo_create( surface );
if( !cr )
//.........这里部分代码省略.........
示例15: avcodec_align_dimensions2
/* Returns a new picture buffer */
static inline picture_t *ffmpeg_NewPictBuf( decoder_t *p_dec,
AVCodecContext *p_context )
{
decoder_sys_t *p_sys = p_dec->p_sys;
int width = p_context->coded_width;
int height = p_context->coded_height;
if( p_sys->p_va == NULL )
{
int aligns[AV_NUM_DATA_POINTERS];
avcodec_align_dimensions2(p_context, &width, &height, aligns);
}
if( width == 0 || height == 0 || width > 8192 || height > 8192 )
{
msg_Err( p_dec, "Invalid frame size %dx%d.", width, height );
return NULL; /* invalid display size */
}
p_dec->fmt_out.video.i_width = width;
p_dec->fmt_out.video.i_height = height;
if( width != p_context->width || height != p_context->height )
{
p_dec->fmt_out.video.i_visible_width = p_context->width;
p_dec->fmt_out.video.i_visible_height = p_context->height;
}
else
{
p_dec->fmt_out.video.i_visible_width = width;
p_dec->fmt_out.video.i_visible_height = height;
}
if( !p_sys->p_va && GetVlcChroma( &p_dec->fmt_out.video, p_context->pix_fmt ) )
{
/* we are doomed, but not really, because most codecs set their pix_fmt
* much later
* FIXME does it make sense here ? */
p_dec->fmt_out.video.i_chroma = VLC_CODEC_I420;
}
p_dec->fmt_out.i_codec = p_dec->fmt_out.video.i_chroma;
/* If an aspect-ratio was specified in the input format then force it */
if( p_dec->fmt_in.video.i_sar_num > 0 && p_dec->fmt_in.video.i_sar_den > 0 )
{
p_dec->fmt_out.video.i_sar_num = p_dec->fmt_in.video.i_sar_num;
p_dec->fmt_out.video.i_sar_den = p_dec->fmt_in.video.i_sar_den;
}
else
{
p_dec->fmt_out.video.i_sar_num = p_context->sample_aspect_ratio.num;
p_dec->fmt_out.video.i_sar_den = p_context->sample_aspect_ratio.den;
if( !p_dec->fmt_out.video.i_sar_num || !p_dec->fmt_out.video.i_sar_den )
{
p_dec->fmt_out.video.i_sar_num = 1;
p_dec->fmt_out.video.i_sar_den = 1;
}
}
if( p_dec->fmt_in.video.i_frame_rate > 0 &&
p_dec->fmt_in.video.i_frame_rate_base > 0 )
{
p_dec->fmt_out.video.i_frame_rate =
p_dec->fmt_in.video.i_frame_rate;
p_dec->fmt_out.video.i_frame_rate_base =
p_dec->fmt_in.video.i_frame_rate_base;
}
else if( p_context->time_base.num > 0 && p_context->time_base.den > 0 )
{
p_dec->fmt_out.video.i_frame_rate = p_context->time_base.den;
p_dec->fmt_out.video.i_frame_rate_base = p_context->time_base.num * __MAX( p_context->ticks_per_frame, 1 );
}
return decoder_NewPicture( p_dec );
}