本文整理汇总了C++中VideoFrame::GetLength方法的典型用法代码示例。如果您正苦于以下问题:C++ VideoFrame::GetLength方法的具体用法?C++ VideoFrame::GetLength怎么用?C++ VideoFrame::GetLength使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类VideoFrame
的用法示例。
在下文中一共展示了VideoFrame::GetLength方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: RecVideo
/****************************************
* RecVideo
* Obtiene los packetes y los muestra
*****************************************/
int MediaBridgeSession::RecVideo()
{
//Coders
VideoDecoder* decoder = NULL;
VideoEncoder* encoder = VideoCodecFactory::CreateEncoder(VideoCodec::SORENSON);
//Create new video frame
RTMPVideoFrame frame(0,262143);
//Set codec
frame.SetVideoCodec(RTMPVideoFrame::FLV1);
int width=0;
int height=0;
DWORD numpixels=0;
Log(">RecVideo\n");
//Mientras tengamos que capturar
while(receivingVideo)
{
///Obtenemos el paquete
RTPPacket* packet = rtpVideo.GetPacket();
//Check
if (!packet)
//Next
continue;
//Get type
VideoCodec::Type type = (VideoCodec::Type)packet->GetCodec();
if ((decoder==NULL) || (type!=decoder->type))
{
//Si habia uno nos lo cargamos
if (decoder!=NULL)
delete decoder;
//Creamos uno dependiendo del tipo
decoder = VideoCodecFactory::CreateDecoder(type);
//Check
if (!decoder)
{
delete(packet);
continue;
}
}
//Lo decodificamos
if(!decoder->DecodePacket(packet->GetMediaData(),packet->GetMediaLength(),0,packet->GetMark()))
{
delete(packet);
continue;
}
//Get mark
bool mark = packet->GetMark();
//Delete packet
delete(packet);
//Check if it is last one
if(!mark)
continue;
//Check size
if (decoder->GetWidth()!=width || decoder->GetHeight()!=height)
{
//Get dimension
width = decoder->GetWidth();
height = decoder->GetHeight();
//Set size
numpixels = width*height*3/2;
//Set also frame rate and bps
encoder->SetFrameRate(25,300,500);
//Set them in the encoder
encoder->SetSize(width,height);
}
//Encode next frame
VideoFrame *encoded = encoder->EncodeFrame(decoder->GetFrame(),numpixels);
//Check
if (!encoded)
break;
//Check size
if (frame.GetMaxMediaSize()<encoded->GetLength())
//Not enougth space
return Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());
//Get full frame
frame.SetVideoFrame(encoded->GetData(),encoded->GetLength());
//.........这里部分代码省略.........
示例2: EncodeVideo
int FLVEncoder::EncodeVideo()
{
timeval prev;
//Start
Log(">FLVEncoder encode video\n");
//Allocate media frame
RTMPVideoFrame frame(0,262143);
//Check codec
switch(videoCodec)
{
case VideoCodec::SORENSON:
//Ser Video codec
frame.SetVideoCodec(RTMPVideoFrame::FLV1);
break;
case VideoCodec::H264:
//Ser Video codec
frame.SetVideoCodec(RTMPVideoFrame::AVC);
//Set NAL type
frame.SetAVCType(RTMPVideoFrame::AVCNALU);
//No delay
frame.SetAVCTS(0);
break;
default:
return Error("-Wrong codec type %d\n",videoCodec);
}
//Create the encoder
VideoEncoder *encoder = VideoCodecFactory::CreateEncoder(videoCodec,videoProperties);
///Set frame rate
encoder->SetFrameRate(fps,bitrate,intra);
//Set dimensions
encoder->SetSize(width,height);
//Start capturing
videoInput->StartVideoCapture(width,height,fps);
//The time of the first one
gettimeofday(&prev,NULL);
//No wait for first
DWORD frameTime = 0;
Log(">FLVEncoder encode vide\n");
//Mientras tengamos que capturar
while(encodingVideo)
{
//Nos quedamos con el puntero antes de que lo cambien
BYTE* pic=videoInput->GrabFrame(frameTime);
//Ensure we are still encoding
if (!encodingVideo)
break;
//Check pic
if (!pic)
continue;
//Check if we need to send intra
if (sendFPU)
{
//Set it
encoder->FastPictureUpdate();
//Do not send anymore
sendFPU = false;
}
//Encode next frame
VideoFrame *encoded = encoder->EncodeFrame(pic,videoInput->GetBufferSize());
//Check
if (!encoded)
break;
//Check size
if (frame.GetMaxMediaSize()<encoded->GetLength())
{
//Not enougth space
Error("Not enought space to copy FLV encodec frame [frame:%d,encoded:%d",frame.GetMaxMediaSize(),encoded->GetLength());
//NExt
continue;
}
//Check
if (frameTime)
{
timespec ts;
//Lock
pthread_mutex_lock(&mutex);
//Calculate timeout
calcAbsTimeout(&ts,&prev,frameTime);
//Wait next or stopped
int canceled = !pthread_cond_timedwait(&cond,&mutex,&ts);
//Unlock
pthread_mutex_unlock(&mutex);
//.........这里部分代码省略.........
示例3: AddBroadcastReceiver
bool MultiConf::AddBroadcastReceiver(RTMPStream *receiver)
{
broadcast.AddReceiver(receiver);
Participants::iterator itBroadcaster = participants.find(m_CurrentBroadCaster);
if(itBroadcaster != participants.end())
{
RTPParticipant *broadCaster = (RTPParticipant*)itBroadcaster->second;
Log("Send idr packet to newly broadcast reciever\n");
IDRPacketSize idrPacketSize = broadCaster->GetIdrPacketSize();
IDRPacket idrPacket = broadCaster->GetIdrPacket();
DWORD currentTimeStamp = broadCaster->GetCurrentTimestamp();
size_t packetSize = idrPacket.size();
//Crete desc frame
RTMPVideoFrame frameDesc(0,2048);
//Send
frameDesc.SetTimestamp(currentTimeStamp);
//Set type
frameDesc.SetVideoCodec(RTMPVideoFrame::AVC);
//Set type
frameDesc.SetFrameType(RTMPVideoFrame::INTRA);
//Set NALU type
frameDesc.SetAVCType(0);
//Set no delay
frameDesc.SetAVCTS(0);
//Create description
AVCDescriptor desc;
//Set values
desc.SetConfigurationVersion(1);
//desc.SetAVCProfileIndication(0x42);
//desc.SetProfileCompatibility(0x80);
//desc.SetAVCLevelIndication(0x14);
//desc.SetAVCProfileIndication(idrPacket[0][1]);
//desc.SetProfileCompatibility(idrPacket[0][2]);
//desc.SetAVCLevelIndication(idrPacket[0][3]);
desc.SetAVCProfileIndication(0x64);
desc.SetProfileCompatibility(0x00);
desc.SetAVCLevelIndication(0x28);
desc.SetNALUnitLength(3);
desc.AddSequenceParameterSet(idrPacket[0],idrPacketSize[0]);
desc.AddPictureParameterSet(idrPacket[1],idrPacketSize[1]);
//Serialize
DWORD len = desc.Serialize(frameDesc.GetMediaData(),frameDesc.GetMaxMediaSize());
//Set size
frameDesc.SetMediaSize(len);
//broadcast.OnPublishedFrame(0, &frameDesc);
receiver->PlayMediaFrame(&frameDesc);
frameDesc.Dump();
RTMPVideoFrame frame(0,65535);
//Set codec
frame.SetVideoCodec(RTMPVideoFrame::AVC);
//Set NALU type
frame.SetAVCType(1);
//Set no delay
frame.SetAVCTS(0);
frame.SetTimestamp(currentTimeStamp);
frame.SetFrameType(RTMPVideoFrame::INTRA);
VideoFrame *videoFrame;
RTPDepacketizer *depacketizer = RTPDepacketizer::Create( MediaFrame::Video, VideoCodec::H264);
for(int i = 0; i < packetSize; i++) {
BYTE *packet = idrPacket[i];
int packet_size = idrPacketSize[i];
videoFrame = (VideoFrame *)depacketizer->AddPayload(packet,packet_size);
}
frame.SetVideoFrame(videoFrame->GetData(), videoFrame->GetLength());
receiver->PlayMediaFrame(&frame);
frame.Dump();
delete depacketizer;
}
return true;
}
示例4: SendVideo
//.........这里部分代码省略.........
QWORD sleep = frameTime;
//Remove extra sleep from prev
if (overslept<sleep)
//Remove it
sleep -= overslept;
else
//Do not overflow
sleep = 1;
//Calculate timeout
calcAbsTimeoutNS(&ts,&prev,sleep);
//Wait next or stopped
int canceled = !pthread_cond_timedwait(&cond,&mutex,&ts);
//Unlock
pthread_mutex_unlock(&mutex);
//Check if we have been canceled
if (canceled)
//Exit
break;
//Get differencence
QWORD diff = getDifTime(&prev);
//If it is biffer
if (diff>frameTime)
//Get what we have slept more
overslept = diff-frameTime;
else
//No oversletp (shoulddn't be possible)
overslept = 0;
}
//Increase frame counter
fpsAcu.Update(getTime()/1000,1);
//If first
if (!frameTime)
{
//Set frame time, slower
frameTime = 5*1000000/videoFPS;
//Restore bitrate
videoEncoder->SetFrameRate(videoFPS,current,videoIntraPeriod);
} else {
//Set frame time
frameTime = 1000000/videoFPS;
}
//Add frame size in bits to bitrate calculator
bitrateAcu.Update(getDifTime(&ini)/1000,videoFrame->GetLength()*8);
//Set frame timestamp
videoFrame->SetTimestamp(getDifTime(&ini)/1000);
//Check if we have mediaListener
if (mediaListener)
//Call it
mediaListener->onMediaFrame(*videoFrame);
//Set sending time of previous frame
getUpdDifTime(&prev);
//Calculate sending times based on bitrate
DWORD sendingTime = videoFrame->GetLength()*8/current;
//Adjust to maximum time
if (sendingTime>frameTime/1000)
//Cap it
sendingTime = frameTime/1000;
//If it was a I frame
if (videoFrame->IsIntra())
//Clean rtp rtx buffer
rtp.FlushRTXPackets();
//Send it smoothly
smoother.SendFrame(videoFrame,sendingTime);
//Dump statistics
if (num && ((num%videoFPS*10)==0))
{
Debug("-Send bitrate target=%d current=%d avg=%llf rate=[%llf,%llf] fps=[%llf,%llf] limit=%d\n",target,current,bitrateAcu.GetInstantAvg()/1000,bitrateAcu.GetMinAvg()/1000,bitrateAcu.GetMaxAvg()/1000,fpsAcu.GetMinAvg(),fpsAcu.GetMaxAvg(),videoBitrateLimit);
bitrateAcu.ResetMinMax();
fpsAcu.ResetMinMax();
}
num++;
}
Log("-SendVideo out of loop\n");
//Terminamos de capturar
videoInput->StopVideoCapture();
//Check
if (videoEncoder)
//Borramos el encoder
delete videoEncoder;
//Salimos
Log("<SendVideo [%d]\n",sendingVideo);
return 0;
}
示例5: SmoothFrame
int RTPMultiplexerSmoother::SmoothFrame(const MediaFrame* frame,DWORD duration)
{
//Check
if (!frame || !frame->HasRtpPacketizationInfo())
//Error
return Error("Frame do not has packetization info");
//Get info
const MediaFrame::RtpPacketizationInfo& info = frame->GetRtpPacketizationInfo();
DWORD codec = 0;
BYTE *frameData = NULL;
DWORD frameSize = 0;
//Depending on the type
switch(frame->GetType())
{
case MediaFrame::Audio:
{
//get audio frame
AudioFrame * audio = (AudioFrame*)frame;
//Get codec
codec = audio->GetCodec();
//Get data
frameData = audio->GetData();
//Get size
frameSize = audio->GetLength();
}
break;
case MediaFrame::Video:
{
//get Video frame
VideoFrame * video = (VideoFrame*)frame;
//Get codec
codec = video->GetCodec();
//Get data
frameData = video->GetData();
//Get size
frameSize = video->GetLength();
}
break;
default:
return Error("No smoother for frame");
}
DWORD frameLength = 0;
//Calculate total length
for (int i=0;i<info.size();i++)
//Get total length
frameLength += info[i]->GetTotalLength();
//Calculate bitrate for frame
DWORD current = 0;
//For each one
for (int i=0;i<info.size();i++)
{
//Get packet
MediaFrame::RtpPacketization* rtp = info[i];
//Create rtp packet
RTPPacketSched *packet = new RTPPacketSched(frame->GetType(),codec);
//Make sure it is enought length
if (rtp->GetPrefixLen()+rtp->GetSize()>packet->GetMaxMediaLength())
//Error
continue;
//Get pointer to media data
BYTE* out = packet->GetMediaData();
//Copy prefic
memcpy(out,rtp->GetPrefixData(),rtp->GetPrefixLen());
//Copy data
memcpy(out+rtp->GetPrefixLen(),frameData+rtp->GetPos(),rtp->GetSize());
//Set length
DWORD len = rtp->GetPrefixLen()+rtp->GetSize();
//Set length
packet->SetMediaLength(len);
switch(packet->GetMedia())
{
case MediaFrame::Video:
//Set timestamp
packet->SetTimestamp(frame->GetTimeStamp()*90);
break;
case MediaFrame::Audio:
//Set timestamp
packet->SetTimestamp(frame->GetTimeStamp()*8);
break;
default:
//Set timestamp
packet->SetTimestamp(frame->GetTimeStamp());
}
//Check
if (i+1==info.size())
//last
packet->SetMark(true);
else
//No last
packet->SetMark(false);
//Calculate partial lenght
//.........这里部分代码省略.........
示例6: envir
void H264FrameSource::doGetNextFrame()
{
// 根据 fps, 计算等待时间
double delay = 1000.0 / videoFPS ;
int to_delay = delay * 1000; // us
if(!m_videoInput)
return;
BYTE *pic = m_videoInput->GrabFrame();
//Check picture
if (!pic) {
fFrameSize = 0;
m_started = 0;
return;
}
//Check if we need to send intra
if (sendFPU)
{
videoEncoder->FastPictureUpdate();
}
//if (fPresentationTime.tv_sec == 0 && fPresentationTime.tv_usec == 0) {
// This is the first frame, so use the current time:
//} else {
// Increment by the play time of the previous data:
// unsigned uSeconds = fPresentationTime.tv_usec + fLastPlayTime;
// fPresentationTime.tv_sec += uSeconds/1000000;
// fPresentationTime.tv_usec = uSeconds%1000000;
//}
// Remember the play time of this data:
//fLastPlayTime = (fPlayTimePerFrame*fFrameSize)/fPreferredFrameSize;
//fDurationInMicroseconds = fLastPlayTime;
//fDurationInMicroseconds = 1000.0 / videoFPS;
VideoFrame *videoFrame = videoEncoder->EncodeFrame(pic,m_videoInput->GetBufferSize());
//If was failed
if (!videoFrame){
//Next
fFrameSize = 0;
m_started = 0;
Log("-----Error encoding video\n");
double delay = 1000.0 / videoFPS;
int to_delay = delay * 1000; // us
nextTask() = envir().taskScheduler().scheduleDelayedTask(to_delay,
(TaskFunc*)FramedSource::afterGetting, this);
return;
}
if(sendFPU)
sendFPU = false;
//Set frame timestamp
videoFrame->SetTimestamp(getDifTime(&first)/1000);
//Set sending time of previous frame
//getUpdDifTime(&prev);
//gettimeofday(&fPresentationTime, 0);
fFrameSize = videoFrame->GetLength();
memmove(fTo, videoFrame->GetData(), fFrameSize);
if (fFrameSize > fMaxSize) {
fNumTruncatedBytes = fFrameSize - fMaxSize;
fFrameSize = fMaxSize;
}
else {
fNumTruncatedBytes = 0;
}
gettimeofday(&fPresentationTime, NULL);
//to_delay = ((1000 / videoFPS) * fFrameSize / RTPPAYLOADSIZE) * 1000; // us
nextTask() = envir().taskScheduler().scheduleDelayedTask(to_delay,
(TaskFunc*)FramedSource::afterGetting, this);
}