本文整理汇总了C++中VideoDecoder::GetFrame方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoDecoder::GetFrame方法的具体用法?C++ VideoDecoder::GetFrame怎么用?C++ VideoDecoder::GetFrame使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoDecoder
的用法示例。
在下文中一共展示了VideoDecoder::GetFrame方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: SendVideo
int MediaBridgeSession::SendVideo()
{
VideoDecoder *decoder = VideoCodecFactory::CreateDecoder(VideoCodec::SORENSON);
VideoEncoder *encoder = VideoCodecFactory::CreateEncoder(rtpVideoCodec);
DWORD width = 0;
DWORD height = 0;
DWORD numpixels = 0;
QWORD lastVideoTs = 0;
Log(">SendVideo\n");
//Set video format
if (!rtpVideo.SetSendingCodec(rtpVideoCodec))
//Error
return Error("Peer do not support [%d,%s]\n",rtpVideoCodec,VideoCodec::GetNameFor(rtpVideoCodec));
//While sending video
while (sendingVideo)
{
//Wait for next video
if (!videoFrames.Wait(0))
//Check again
continue;
//Get audio grame
RTMPVideoFrame* video = videoFrames.Pop();
//check
if (!video)
//Again
continue;
//Get time difference
DWORD diff = 0;
//Get timestam
QWORD ts = video->GetTimestamp();
//If it is not the first frame
if (lastVideoTs)
//Calculate it
diff = ts - lastVideoTs;
//Set the last audio timestamp
lastVideoTs = ts;
//Check
if (video->GetVideoCodec()!=RTMPVideoFrame::FLV1)
//Error
continue;
//Decode frame
if (!decoder->Decode(video->GetMediaData(),video->GetMediaSize()))
{
Error("decode packet error");
//Next
continue;
}
//Check size
if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
{
//Get dimension
width = decoder->GetWidth();
height = decoder->GetHeight();
//Set size
numpixels = width*height*3/2;
//Set also frame rate and bps
encoder->SetFrameRate(25,300,500);
//Set them in the encoder
encoder->SetSize(width,height);
}
//Check size
if (!numpixels)
{
Error("numpixels equals 0");
//Next
continue;
}
//Check fpu
if (sendFPU)
{
//Send it
encoder->FastPictureUpdate();
//Reset
sendFPU = false;
}
//Encode it
VideoFrame *videoFrame = encoder->EncodeFrame(decoder->GetFrame(),numpixels);
//If was failed
if (!videoFrame)
{
Log("No video frame\n");
//Next
continue;
}
//Set frame time
//.........这里部分代码省略.........
示例2: RecVideo
/****************************************
* RecVideo
* Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecVideo()
{
//Coders
VideoDecoder* decoder = NULL;
VideoEncoder* encoder = VideoCodecFactory::CreateEncoder(VideoCodec::SORENSON);
//Create new video frame
RTMPVideoFrame frame(0,262143);
//Set codec
frame.SetVideoCodec(RTMPVideoFrame::FLV1);
int width=0;
int height=0;
DWORD numpixels=0;
Log(">RecVideo\n");
//Mientras tengamos que capturar
while(receivingVideo)
{
///Obtenemos el paquete
RTPPacket* packet = rtpVideo.GetPacket();
//Check
if (!packet)
//Next
continue;
//Get type
VideoCodec::Type type = (VideoCodec::Type)packet->GetCodec();
if ((decoder==NULL) || (type!=decoder->type))
{
//Si habia uno nos lo cargamos
if (decoder!=NULL)
delete decoder;
//Creamos uno dependiendo del tipo
decoder = VideoCodecFactory::CreateDecoder(type);
//Check
if (!decoder)
{
delete(packet);
continue;
}
}
//Lo decodificamos
if(!decoder->DecodePacket(packet->GetMediaData(),packet->GetMediaLength(),0,packet->GetMark()))
{
delete(packet);
continue;
}
//Get mark
bool mark = packet->GetMark();
//Delete packet
delete(packet);
//Check if it is last one
if(!mark)
continue;
//Check size
if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
{
//Get dimension
width = decoder->GetWidth();
height = decoder->GetHeight();
//Set size
numpixels = width*height*3/2;
//Set also frame rate and bps
encoder->SetFrameRate(25,300,500);
//Set them in the encoder
encoder->SetSize(width,height);
}
//Encode next frame
VideoFrame *encoded = encoder->EncodeFrame(decoder->GetFrame(),numpixels);
//Check
if (!encoded)
break;
//Check size
if (frame.GetMaxMediaSize()<encoded->GetLength())
//Not enougth space
return Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());
//Get full frame
frame.SetVideoFrame(encoded->GetData(),encoded->GetLength());
//.........这里部分代码省略.........
示例3: RecVideo
//.........这里部分代码省略.........
buffer = red->GetPrimaryPayloadData();
size = red->GetPrimaryPayloadSize();
}
//Check codecs
if ((videoDecoder==NULL) || (type!=videoDecoder->type))
{
//If we already got one
if (videoDecoder!=NULL)
//Delete it
delete videoDecoder;
//Create video decorder for codec
videoDecoder = VideoCodecFactory::CreateDecoder(type);
//Check
if (videoDecoder==NULL)
{
Error("Error creando nuevo decodificador de video [%d]\n",type);
//Delete packet
delete(packet);
//Next
continue;
}
}
//Check if we have lost the last packet from the previous frame by comparing both timestamps
if (ts>frameTime)
{
Debug("-lost mark packet ts:%u frameTime:%u\n",ts,frameTime);
//Try to decode what is in the buffer
videoDecoder->DecodePacket(NULL,0,1,1);
//Get picture
BYTE *frame = videoDecoder->GetFrame();
DWORD width = videoDecoder->GetWidth();
DWORD height = videoDecoder->GetHeight();
//Check values
if (frame && width && height)
{
//Set frame size
videoOutput->SetVideoSize(width,height);
//Check if muted
if (!muted)
//Send it
videoOutput->NextFrame(frame);
}
}
//Update frame time
frameTime = ts;
//Decode packet
if(!videoDecoder->DecodePacket(buffer,size,lost,packet->GetMark()))
{
//Check if we got listener and more than 1/2 seconds have elapsed from last request
if (listener && getDifTime(&lastFPURequest)>minFPUPeriod)
{
//Debug
Log("-Requesting FPU decoder error\n");
//Reset count
lostCount = 0;
//Request it
listener->onRequestFPU();
//Request also over rtp
rtp.RequestFPU();
示例4: main
int main(int argc, char* argv[])
{
int i, j, k;
int width, height;
int numFleshRegions, numHands, xScale, yScale;
int left, right, top, bottom;
Image* image;
Image outlineImage;
FleshDetector* fleshDetector;
vector<ConnectedRegion*>* fleshRegionVector;
vector<Hand*> hands;
Hand* hand;
vector<HandCandidate*> handCandidates;
HandCandidate* candidate;
unsigned char angledBoxColor[] = {255, 255, 0};
unsigned char longColor[] = {0, 255, 0};
unsigned char shortColor[] = {0, 0, 255};
unsigned char offsetColor[] = {0, 255, 255};
unsigned char pointColor[] = {255, 0, 0};
unsigned char farPointColor[] = {255, 0, 255};
int numLargeRegions;
string basename;
DoublePoint centroid, center, nearEdge, farEdge;
LineSegment shortLine, longLine, offsetLine;
Rect angledBox;
double edgeAngle, offsetAngle;
CompositeClassifier postureDetector;
string features;
Matrix input;
int classIndex;
SubImage handImage;
vector<Point> farPoints;
int numFarPoints;
string inputFilename, outputFilename;
VideoDecoder decoder;
VideoEncoder encoder;
bool needInit = true;
if ( argc < 5 )
{
printf("Usage: %s <flesh classifier file> <hand classifier file> <input file> <output file>\n", argv[0]);
return 1;
}
// Either loads a real detector or gets a dummy detector if arg is "DUMMY"
fleshDetector = FleshDetector::Get(argv[1]);
if ( !fleshDetector )
{
fprintf(stderr, "Error loading flesh detector %s\n", argv[1]);
return 1;
}
if ( !postureDetector.Load(argv[2]) )
{
fprintf(stderr, "Error loading hand detector %s\n", argv[2]);
return 1;
}
features = postureDetector.GetFeatureString();
inputFilename = argv[3];
outputFilename = argv[4];
decoder.SetFilename(inputFilename);
if ( !decoder.Load() )
{
fprintf(stderr, "Error loading video %s\n", inputFilename.c_str());
return 1;
}
while ( decoder.UpdateFrame() )
{
image = decoder.GetFrame();
if ( needInit )
{
needInit = false;
width = image->GetWidth();
height = image->GetHeight();
if ( !encoder.Open(outputFilename.c_str(), width, height, 10) )
{
fprintf(stderr, "Failed opening %s\n", outputFilename.c_str());
return 1;
}
}
hands.clear();
outlineImage = *image;
fleshRegionVector = fleshDetector->GetFleshRegions(image, xScale, yScale);
if ( fleshRegionVector )
{
numFleshRegions = fleshRegionVector->size();
numLargeRegions = 0;
for (i = 0; i < numFleshRegions; i++)
{
if ( !(*fleshRegionVector)[i]->GetBounds(left, right, top, bottom) )
{
fprintf(stderr, "Error getting flesh block %d bounds\n", i);
return 1;
}
//.........这里部分代码省略.........
示例5: main
int main(int argc, char *argv[])
{
#if 0
QCoreApplication a(argc, argv);
return a.exec();
#endif
VideoDecoder* videoDecoder = new VideoDecoder;
VideoEncoder* videoEncoder = 0;
AdaboostClassifier* openClassifier = new AdaboostClassifier;
AdaboostClassifier* closedClassifier = new AdaboostClassifier;
HandyTracker tracker;
if ( argc != 5 )
{
printf("Usage: %s <open classifier> <closed classifier> <input video> <output video>\n", argv[0]);
return 0;
}
if ( !openClassifier->Load(argv[1]) )
{
fprintf(stderr, "Failed loading open classifier\n", argv[1]);
return 1;
}
if ( !tracker.SetOpenClassifier(openClassifier) )
{
fprintf(stderr, "Failed setting open classifier\n");
return 1;
}
if ( !closedClassifier->Load(argv[2]) )
{
fprintf(stderr, "Failed loading closed classifier\n", argv[2]);
return 1;
}
if ( !tracker.SetClosedClassifier(closedClassifier) )
{
fprintf(stderr, "Failed setting closed classifier\n");
return 1;
}
videoDecoder->SetFilename(argv[3]);
if ( !videoDecoder->Load() )
{
fprintf(stderr, "Failed loading video <%s>\n", argv[3]);
return 1;
}
if ( !videoDecoder->UpdateFrame() )
{
fprintf(stderr, "Failed updating frame\n");
return 1;
}
int frameNumber = 0;
bool trackingInitialized = false;
Image* img = videoDecoder->GetFrame();
while ( img )
{
if ( !videoEncoder )
{
videoEncoder = new VideoEncoder;
if ( !videoEncoder->Open(argv[4], img->GetWidth(), img->GetHeight(), 25) )
{
fprintf(stderr, "Failed opening output video <%s>\n", argv[4]);
return 1;
}
}
ProcessFrame(img, &tracker, trackingInitialized, frameNumber);
if ( trackingInitialized )
DrawResults(img, &tracker, frameNumber);
videoEncoder->AddFrame(img);
if ( frameNumber > 1 )
tracker.PurgeRegion(frameNumber - 2);
frameNumber++;
videoDecoder->UpdateFrame();
img = videoDecoder->GetFrame();
}
videoEncoder->Close();
return 0;
}
示例6: main
int main(int argc, char* argv[])
{
VideoDecoder decoder;
FleshDetector fleshDetector;
Image* inputImage;
Image* fleshImage;
Image* outlineImage;
Image* confidenceImage;
int frameNumber = 0;
string vidFilename;
char outputFilename[1024];
if ( argc < 4 )
{
printf("Usage: %s <classifier file> <video file> <output directory>\n", argv[0]);
return 1;
}
if ( !fleshDetector.Load(argv[1]) )
{
fprintf(stderr, "Error loading flesh detector %s\n", argv[1]);
return 1;
}
vidFilename = argv[2];
decoder.SetFilename(vidFilename);
if ( !decoder.Load() )
{
fprintf(stderr, "Error loading video %s\n", argv[2]);
return 1;
}
while ( decoder.UpdateFrame() )
{
inputImage = decoder.GetFrame();
TimingAnalyzer_Start(0);
if ( fleshDetector.Process(inputImage, &outlineImage, &fleshImage, &confidenceImage) )
{
TimingAnalyzer_Stop(0);
sprintf(outputFilename, "%s/flesh%05d.ppm", argv[3], frameNumber);
fleshImage->Save(outputFilename);
sprintf(outputFilename, "%s/frame%05d.ppm", argv[3], frameNumber);
outlineImage->Save(outputFilename);
sprintf(outputFilename, "%s/confidence%05d.ppm", argv[3], frameNumber);
confidenceImage->Save(outputFilename);
}
frameNumber++;
}
printf("FleshDetector Process Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(0), TimingAnalyzer_Max(0), TimingAnalyzer_Mean(0));
printf("FleshDetector GetFleshImage Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(1), TimingAnalyzer_Max(1), TimingAnalyzer_Mean(1));
printf("FleshDetector GetOutlineImage Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(2), TimingAnalyzer_Max(2), TimingAnalyzer_Mean(2));
printf("FleshDetector GetFleshConfidenceImage Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(3), TimingAnalyzer_Max(3), TimingAnalyzer_Mean(3));
printf("FleshDetector CalcConfidence Time Min: %d\tMax: %d\tMean: %d\n",
TimingAnalyzer_Min(4), TimingAnalyzer_Max(4), TimingAnalyzer_Mean(4));
return 0;
}