本文整理汇总了C++中Stream::GetCodecContext方法的典型用法代码示例。如果您正苦于以下问题:C++ Stream::GetCodecContext方法的具体用法?C++ Stream::GetCodecContext怎么用?C++ Stream::GetCodecContext使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Stream
的用法示例。
在下文中一共展示了Stream::GetCodecContext方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
// Parse the options
int opt;
while ((opt = getopt(argc, argv, "")) != -1)
{
switch (opt)
{
default:
usage(argv[0]);
}
}
if (argc < optind+2) usage(argv[0]);
char* file_in = argv[optind];
char* file_out = argv[argc-1];
// Init all the crap
av_register_all();
// Open the input file and get the input format context
InputFormatContext inFormatCtx(file_in);
dump_format(inFormatCtx.get(), 0, file_in, 0); // Just for debugging
// Find the input video stream
Stream inStream = inFormatCtx.GetVideoStream();
// Get the input codec context
CodecContext inCodecCtx = inStream.GetCodecContext();
// Allocate the output format context, guess the format, set no parameters
OutputFormat outFormat;
try {
outFormat = OutputFormat(0, file_out);
} catch(std::runtime_error e) {
warn("Could not guess the format based on extension, trying the input format");
outFormat = OutputFormat(inFormatCtx.GetFormat().GetName(), file_in);
}
OutputFormatContext outFormatCtx(file_out, outFormat);
// Create the output video stream
Stream outStream = outFormatCtx.AddVideoStream();
outStream.SetFrameRate(inStream.GetFrameRate());
outStream.SetAspectRatio(inStream.GetAspectRatio());
// Set the output codec context
CodecContext outCodecCtx = outStream.GetCodecContext();
outCodecCtx.SetWidth(inCodecCtx.GetWidth());
outCodecCtx.SetHeight(inCodecCtx.GetHeight());
//outCodecCtx.SetBitrate(inCodecCtx.GetBitrate());
outCodecCtx.get()->codec_id = inCodecCtx.get()->codec_id;
//outCodecCtx.get()->codec_type = inCodecCtx.get()->codec_type;
outCodecCtx.get()->codec_tag = inCodecCtx.get()->codec_tag;
//outCodecCtx.get()->has_b_frames = inCodecCtx.get()->has_b_frames;
//outCodecCtx.get()->bit_rate = inCodecCtx.get()->bit_rate;
//outCodecCtx.get()->extradata = inCodecCtx.get()->extradata;
//outCodecCtx.get()->extradata_size = inCodecCtx.get()->extradata_size;
//if (codec) pOutCodec = avcodec_find_encoder_by_name(codec)->id;
// Open the output file, write the header
if (!(outFormatCtx.get()->oformat->flags & AVFMT_NOFILE))
{
if (url_fopen(&outFormatCtx.get()->pb, outFormatCtx.get()->filename, URL_WRONLY) < 0)
fail("Could not open the output file");
}
// Parse the video
if (outFormatCtx.get()->oformat->flags & AVFMT_RAWPICTURE) fail("Raw dump not supported yet");
long long globaloffset = 0;
while (optind < argc-1)
{
char* file_in = argv[optind];
fprintf(stderr, "Appending%s\n", file_in);
InputFormatContext inFormatCtx(file_in);
Stream inStream = inFormatCtx.GetVideoStream();
long long localoffset = -1;
long long pts = 0; // Frame presentation number (Presentation Time Stamp)
//.........这里部分代码省略.........
示例2: main
int main(int argc, char *argv[])
{
// Parse the options
Timestamp frame_start;
Timestamp frame_end;
Timestamp frame_every;
int width = 0;
int height = 0;
int opt;
while ((opt = getopt(argc, argv, "s:e:t:x:y:")) != -1)
{
switch (opt)
{
case 's':
frame_start = Timestamp(optarg);
break;
case 'e':
frame_end = Timestamp(optarg);
break;
case 't':
frame_every = Timestamp(optarg);
break;
case 'x':
width = atoi(optarg);
break;
case 'y':
height = atoi(optarg);
break;
default:
usage(argv[0]);
}
}
if (argc < optind+1) usage(argv[0]);
if ( frame_end.set && frame_end.type != 'd' && frame_end.type != 'p' && frame_end.type != 's') usage(argv[0]);
if (frame_start.set && frame_start.type != 'd' && frame_start.type != 'p' && frame_start.type != 's') usage(argv[0]);
if (frame_every.set && frame_every.type != 'd' && frame_every.type != 'p' && frame_every.type != 's') usage(argv[0]);
char* file_in = argv[optind];
// Init all the crap
av_register_all();
// Open the input file and get the input format context
InputFormatContext inFormatCtx(file_in);
dump_format(inFormatCtx.get(), 0, file_in, 0); // Just for debugging
// Find the input video stream
Stream inStream = inFormatCtx.GetVideoStream();
// Get the input codec context
CodecContext inCodecCtx = inStream.GetCodecContext();
if (width == 0 && height == 0) // Same width and height by default
{
width = inCodecCtx.GetWidth();
height = inCodecCtx.GetHeight();
} else { // Try to keep the aspect ratio
if (width == 0) width = (int)((float)inCodecCtx.GetWidth() * height/inCodecCtx.GetHeight());
else if (height == 0) height = (int)((float)inCodecCtx.GetHeight() * width/inCodecCtx.GetWidth());
}
// Set the resampling context
ScalerContext* pScalerCtx = 0;
if (inCodecCtx.GetPixelFormat() >= 0)
pScalerCtx = new ScalerContext(inCodecCtx, width, height, PIX_FMT_GRAY8);
// Set detector/descriptor stuff
Image img(width, height);
lava_ns::ImagePyramid<Image> pyramid;
pyramid.setScaleFactor(1.2);
lava_ns::SIFT<Image> descriptor;
descriptor.setImagePyramid(&pyramid);
descriptor.setMainParam(61,12,4,8);
FramePointReceiver receiver(width, height, av_q2d(inStream.GetTimeBase()), descriptor);
lava_ns::HarrisLaplace<Image> detector;
detector.setPosThreshold(300);
detector.setReceiver(&receiver);
detector.setImage(&pyramid);
// Parse the video
if (frame_start.set)
{
if (frame_start.type == 's') inFormatCtx.Seek(frame_start.value);
if (frame_start.type == 'p') inFormatCtx.Seek(inStream, frame_start.value);
}
int haveoutput = 0;
//.........这里部分代码省略.........