本文整理汇总了C++中List::AppendArray方法的典型用法代码示例。如果您正苦于以下问题:C++ List::AppendArray方法的具体用法?C++ List::AppendArray怎么用?C++ List::AppendArray使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类List
的用法示例。
在下文中一共展示了List::AppendArray方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: InitSEIUserData
void InitSEIUserData()
{
List<mfxU8> payload;
const mfxU8 UUID[] = { 0x6d, 0x1a, 0x26, 0xa0, 0xbd, 0xdc, 0x11, 0xe2, //ISO-11578 UUID
0x90, 0x24, 0x00, 0x50, 0xc2, 0x49, 0x00, 0x48 }; //6d1a26a0-bddc-11e2-9024-0050c2490048
payload.AppendArray(UUID, 16);
String str;
str << TEXT("QSV hardware encoder options:")
<< TEXT(" rate control: ") << (bUseCBR ? TEXT("cbr") : TEXT("vbr"))
<< TEXT("; target bitrate: ") << params.mfx.TargetKbps
<< TEXT("; max bitrate: ") << query.mfx.MaxKbps
<< TEXT("; buffersize: ") << query.mfx.BufferSizeInKB*8
<< TEXT("; API level: ") << ver.Major << TEXT(".") << ver.Minor;
LPSTR info = str.CreateUTF8String();
payload.AppendArray((LPBYTE)info, (unsigned)strlen(info)+1);
Free(info);
AddSEIData(payload, SEI_USER_DATA_UNREGISTERED);
}
示例2: BeginPublishingInternal
void RTMPPublisher::BeginPublishingInternal()
{
RTMPPacket packet;
char pbuf[2048], *pend = pbuf+sizeof(pbuf);
packet.m_nChannel = 0x03; // control channel (invoke)
packet.m_headerType = RTMP_PACKET_SIZE_LARGE;
packet.m_packetType = RTMP_PACKET_TYPE_INFO;
packet.m_nTimeStamp = 0;
packet.m_nInfoField2 = rtmp->m_stream_id;
packet.m_hasAbsTimestamp = TRUE;
packet.m_body = pbuf + RTMP_MAX_HEADER_SIZE;
char *enc = packet.m_body;
enc = AMF_EncodeString(enc, pend, &av_setDataFrame);
enc = AMF_EncodeString(enc, pend, &av_onMetaData);
enc = App->EncMetaData(enc, pend);
packet.m_nBodySize = enc - packet.m_body;
if(!RTMP_SendPacket(rtmp, &packet, FALSE))
{
App->PostStopMessage();
return;
}
//----------------------------------------------
List<BYTE> packetPadding;
DataPacket mediaHeaders;
//----------------------------------------------
packet.m_nChannel = 0x05; // source channel
packet.m_packetType = RTMP_PACKET_TYPE_AUDIO;
App->GetAudioHeaders(mediaHeaders);
packetPadding.SetSize(RTMP_MAX_HEADER_SIZE);
packetPadding.AppendArray(mediaHeaders.lpPacket, mediaHeaders.size);
packet.m_body = (char*)packetPadding.Array()+RTMP_MAX_HEADER_SIZE;
packet.m_nBodySize = mediaHeaders.size;
if(!RTMP_SendPacket(rtmp, &packet, FALSE))
{
App->PostStopMessage();
return;
}
//----------------------------------------------
packet.m_nChannel = 0x04; // source channel
packet.m_headerType = RTMP_PACKET_SIZE_LARGE;
packet.m_packetType = RTMP_PACKET_TYPE_VIDEO;
App->GetVideoHeaders(mediaHeaders);
packetPadding.SetSize(RTMP_MAX_HEADER_SIZE);
packetPadding.AppendArray(mediaHeaders.lpPacket, mediaHeaders.size);
packet.m_body = (char*)packetPadding.Array()+RTMP_MAX_HEADER_SIZE;
packet.m_nBodySize = mediaHeaders.size;
if(!RTMP_SendPacket(rtmp, &packet, FALSE))
{
App->PostStopMessage();
return;
}
}
示例3: GetNextBuffer
//.........这里部分代码省略.........
}
numAudioFrames = data.output_frames_gen;
}
//-----------------------------------------------------------------------------
// sort all audio frames into 10 millisecond increments (done because not all devices output in 10ms increments)
// NOTE: 0.457+ - instead of using the timestamps from windows, just compare and make sure it stays within a 100ms of their timestamps
float *newBuffer = (bResample) ? tempResampleBuffer.Array() : tempBuffer.Array();
if(storageBuffer.Num() == 0 && numAudioFrames == 441)
{
lastUsedTimestamp += 10;
if(!bBrokenTimestamp)
{
QWORD difVal = GetQWDif(newTimestamp, lastUsedTimestamp);
if(difVal > 70)
lastUsedTimestamp = newTimestamp;
}
if(lastUsedTimestamp > lastSentTimestamp)
{
QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp);
if(adjustVal < 10)
lastUsedTimestamp += 10-adjustVal;
AudioSegment &newSegment = *audioSegments.CreateNew();
newSegment.audioData.CopyArray(newBuffer, numAudioFrames*2);
newSegment.timestamp = lastUsedTimestamp;
MultiplyAudioBuffer(newSegment.audioData.Array(), numAudioFrames*2, curVolume);
lastSentTimestamp = lastUsedTimestamp;
}
}
else
{
UINT storedFrames = storageBuffer.Num();
storageBuffer.AppendArray(newBuffer, numAudioFrames*2);
if(storageBuffer.Num() >= (441*2))
{
lastUsedTimestamp += 10;
if(!bBrokenTimestamp)
{
QWORD difVal = GetQWDif(newTimestamp, lastUsedTimestamp);
if(difVal > 70)
lastUsedTimestamp = newTimestamp - (QWORD(storedFrames)/2*1000/44100);
}
//------------------------
// add new data
if(lastUsedTimestamp > lastSentTimestamp)
{
QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp);
if(adjustVal < 10)
lastUsedTimestamp += 10-adjustVal;
AudioSegment &newSegment = *audioSegments.CreateNew();
newSegment.audioData.CopyArray(storageBuffer.Array(), (441*2));
newSegment.timestamp = lastUsedTimestamp;
MultiplyAudioBuffer(newSegment.audioData.Array(), 441*2, curVolume);
storageBuffer.RemoveRange(0, (441*2));
}
//------------------------
// if still data pending (can happen)
while(storageBuffer.Num() >= (441*2))
{
lastUsedTimestamp += 10;
if(lastUsedTimestamp > lastSentTimestamp)
{
QWORD adjustVal = (lastUsedTimestamp-lastSentTimestamp);
if(adjustVal < 10)
lastUsedTimestamp += 10-adjustVal;
AudioSegment &newSegment = *audioSegments.CreateNew();
newSegment.audioData.CopyArray(storageBuffer.Array(), (441*2));
storageBuffer.RemoveRange(0, (441*2));
MultiplyAudioBuffer(newSegment.audioData.Array(), 441*2, curVolume);
newSegment.timestamp = lastUsedTimestamp;
lastSentTimestamp = lastUsedTimestamp;
}
}
}
}
//-----------------------------------------------------------------------------
return ContinueAudioRequest;
}
return NoAudioAvailable;
}