当前位置: 首页>>代码示例>>C++>>正文


C++ List::CopyArray方法代码示例

本文整理汇总了C++中List::CopyArray方法的典型用法代码示例。如果您正苦于以下问题:C++ List::CopyArray方法的具体用法?C++ List::CopyArray怎么用?C++ List::CopyArray使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在List的用法示例。


在下文中一共展示了List::CopyArray方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: Encode

    bool Encode(float *input, UINT numInputFrames, DataPacket &packet, QWORD &timestamp)
    {
        if(bFirstFrame)
        {
            curEncodeTimestamp = timestamp;
            bFirstFrame = false;
        }

        //------------------------------------------------

        UINT lastSampleSize = frameCounter;

        frameCounter += numInputFrames;
        if(frameCounter > outputFrameSize)
        {
            frameCounter -= outputFrameSize;

            bufferedTimestamps << curEncodeTimestamp;
            curEncodeTimestamp = timestamp + ((outputFrameSize-lastSampleSize)*1000/App->GetSampleRateHz());
        }

        int ret = lame_encode_buffer_interleaved_ieee_float(lgf, (float*)input, numInputFrames, MP3OutputBuffer.Array()+1, dwMP3MaxSize);

        if(ret < 0)
        {
            AppWarning(TEXT("MP3 encode failed"));
            return false;
        }

        if(ret > 0)
        {
            if(bFirstPacket)
            {
                header.CopyArray(MP3OutputBuffer.Array(), ret);
                bFirstPacket = false;
                ret = 0;
            }
            else
            {
                packet.lpPacket = MP3OutputBuffer.Array();
                packet.size     = ret+1;

                timestamp = bufferedTimestamps[0];
                bufferedTimestamps.Remove(0);
            }
        }

        return ret > 0;
    }
开发者ID:Alucard014,项目名称:OBS,代码行数:49,代码来源:Encoder_MP3.cpp

示例2: output

    ~MP4FileStream()
    {
        if(!bStreamOpened)
            return;

        App->EnableSceneSwitching(false);

        //---------------------------------------------------

        //HWND hwndProgressDialog = CreateDialog(hinstMain, MAKEINTRESOURCE(IDD_BUILDINGMP4), hwndMain, (DLGPROC)MP4ProgressDialogProc);
        //SendMessage(GetDlgItem(hwndProgressDialog, IDC_PROGRESS1), PBM_SETRANGE32, 0, 100);

        mdatStop = fileOut.GetPos();

        BufferOutputSerializer output(endBuffer);

        //set a reasonable initial buffer size
        endBuffer.SetSize((videoFrames.Num() + audioFrames.Num()) * 20 + 131072);

        UINT64 audioFrameSize = App->GetAudioEncoder()->GetFrameSize();

        DWORD macTime = fastHtonl(DWORD(GetMacTime()));
        UINT videoDuration = fastHtonl(lastVideoTimestamp + App->GetFrameTime());
        UINT audioDuration = fastHtonl(lastVideoTimestamp + DWORD(double(audioFrameSize)*1000.0/double(App->GetSampleRateHz())));
        UINT width, height;
        App->GetOutputSize(width, height);

        LPCSTR lpVideoTrack = "Video Media Handler";
        LPCSTR lpAudioTrack = "Sound Media Handler";

        const char videoCompressionName[31] = "AVC Coding";

        //-------------------------------------------
        // get video headers
        DataPacket videoHeaders;
        App->GetVideoHeaders(videoHeaders);
        List<BYTE> SPS, PPS;

        LPBYTE lpHeaderData = videoHeaders.lpPacket+11;
        SPS.CopyArray(lpHeaderData+2, fastHtons(*(WORD*)lpHeaderData));

        lpHeaderData += SPS.Num()+3;
        PPS.CopyArray(lpHeaderData+2, fastHtons(*(WORD*)lpHeaderData));

        //-------------------------------------------
        // get AAC headers if using AAC
        List<BYTE> AACHeader;
        if(!bMP3)
        {
            DataPacket data;
            App->GetAudioHeaders(data);
            AACHeader.CopyArray(data.lpPacket+2, data.size-2);
        }

        //-------------------------------------------

        EndChunkInfo(videoChunks, videoSampleToChunk, curVideoChunkOffset, numVideoSamples);
        EndChunkInfo(audioChunks, audioSampleToChunk, curAudioChunkOffset, numAudioSamples);

        if (numVideoSamples > 1)
            GetVideoDecodeTime(videoFrames.Last(), true);

        if (numAudioSamples > 1)
            GetAudioDecodeTime(audioFrames.Last(), true);

        UINT audioUnitDuration = fastHtonl(UINT(lastAudioTimeVal));

        //SendMessage(GetDlgItem(hwndProgressDialog, IDC_PROGRESS1), PBM_SETPOS, 25, 0);

        //-------------------------------------------
        // sound descriptor thingy.  this part made me die a little inside admittedly.
        UINT maxBitRate = fastHtonl(App->GetAudioEncoder()->GetBitRate()*1000);

        List<BYTE> esDecoderDescriptor;
        BufferOutputSerializer esDecoderOut(esDecoderDescriptor);
        esDecoderOut.OutputByte(bMP3 ? 107 : 64);
        esDecoderOut.OutputByte(0x15); //stream/type flags.  always 0x15 for my purposes.
        esDecoderOut.OutputByte(0); //buffer size, just set it to 1536 for both mp3 and aac
        esDecoderOut.OutputWord(WORD_BE(0x600)); 
        esDecoderOut.OutputDword(maxBitRate); //max bit rate (cue bill 'o reily meme for these two)
        esDecoderOut.OutputDword(maxBitRate); //avg bit rate

        if(!bMP3) //if AAC, put in headers
        {
            esDecoderOut.OutputByte(0x5);  //decoder specific descriptor type
            /*esDecoderOut.OutputByte(0x80); //some stuff that no one should probably care about
            esDecoderOut.OutputByte(0x80);
            esDecoderOut.OutputByte(0x80);*/
            esDecoderOut.OutputByte(AACHeader.Num());
            esDecoderOut.Serialize((LPVOID)AACHeader.Array(), AACHeader.Num());
        }


        List<BYTE> esDescriptor;
        BufferOutputSerializer esOut(esDescriptor);
        esOut.OutputWord(0); //es id
        esOut.OutputByte(0); //stream priority
        esOut.OutputByte(4); //descriptor type
        /*esOut.OutputByte(0x80); //some stuff that no one should probably care about
        esOut.OutputByte(0x80);
//.........这里部分代码省略.........
开发者ID:ArtBears,项目名称:OBS,代码行数:101,代码来源:MP4FileStream.cpp

示例3: LoadAnimations


//.........这里部分代码省略.........

                    Vect &pk   = keys.lpPosKeys[k];
                    Vect &pkp1 = keys.lpPosKeys[kp1];
                    Vect &pkm1 = keys.lpPosKeys[km1];

                    keys.lpPosTans[k] = pk.GetInterpolationTangent(pkm1, pkp1);
                }
            }
        }
    }

    //-----------------------------------------------

    int num;
    animData << num;
    BoneExtensions.SetSize(num);
    BoneExtensionNames.SetSize(num);

    for(int i=0; i<num; i++)
    {
        animData << BoneExtensionNames[i];
        animData << BoneExtensions[i];
    }

    //-----------------------------------------------

    AnimatedSections.SetSize(nSections);

    if(ver == 0x100) //remove
    {
        UINT *indices = (UINT*)IdxBuffer->GetData();

        List<UINT> adjustedIndices;
        adjustedIndices.CopyArray(indices, IdxBuffer->NumIndices());

        VBData *vbd = VertBuffer->GetData();
        List<Vect> &verts = vbd->VertList;

        //--------- 
        // get vert data
        List<VertAnimInfo> vertInfo;
        vertInfo.SetSize(nVerts);

        for(int i=0; i<nBones; i++)
        {
            Bone &bone = BoneList[i];
            for(int j=0; j<bone.Weights.Num(); j++)
            {
                VWeight &vertWeight = bone.Weights[j];

                if(!vertInfo[vertWeight.vert].bones.HasValue(i))
                {
                    vertInfo[vertWeight.vert].bones << i;
                    vertInfo[vertWeight.vert].weights << vertWeight.weight;
                }
            }
        }

        //--------- 
        // remove excess bone influence from verts (let just set the max to 3 for this)
        /*for(int i=0; i<vertInfo.Num(); i++)
        {
            VertAnimInfo &vert = vertInfo[i];

            while(vert.bones.Num() > 3)
            {
开发者ID:alanzw,项目名称:JimEngine,代码行数:67,代码来源:Mesh.cpp

示例4: MainAudioLoop

void OBS::MainAudioLoop()
{
    DWORD taskID = 0;
    HANDLE hTask = AvSetMmThreadCharacteristics(TEXT("Pro Audio"), &taskID);

    bPushToTalkOn = false;

    micMax = desktopMax = VOL_MIN;
    micPeak = desktopPeak = VOL_MIN;

    UINT audioFramesSinceMeterUpdate = 0;
    UINT audioFramesSinceMicMaxUpdate = 0;
    UINT audioFramesSinceDesktopMaxUpdate = 0;

    List<float> mixedLatestDesktopSamples;

    List<float> blank10msSample;
    blank10msSample.SetSize(882);

    QWORD lastAudioTime = 0;

    while(TRUE)
    {
        OSSleep(5); //screw it, just run it every 5ms

        if(!bRunning)
            break;

        //-----------------------------------------------

        float *desktopBuffer, *micBuffer;
        UINT desktopAudioFrames = 0, micAudioFrames = 0;
        UINT latestDesktopAudioFrames = 0, latestMicAudioFrames = 0;

        curDesktopVol = desktopVol * desktopBoost;

        if(bUsingPushToTalk)
            curMicVol = bPushToTalkOn ? micVol : 0.0f;
        else
            curMicVol = micVol;

        curMicVol *= micBoost;

        bool bDesktopMuted = (curDesktopVol < EPSILON);
        bool bMicEnabled   = (micAudio != NULL);

        QWORD timestamp;
        while(QueryNewAudio(timestamp))
        {
            if (!lastAudioTime)
                lastAudioTime = App->GetSceneTimestamp();

            if (lastAudioTime < timestamp) {
                while ((lastAudioTime+=10) < timestamp)
                    EncodeAudioSegment(blank10msSample.Array(), 441, lastAudioTime);
            }

            //----------------------------------------------------------------------------
            // get latest sample for calculating the volume levels

            float *latestDesktopBuffer = NULL, *latestMicBuffer = NULL;

            desktopAudio->GetBuffer(&desktopBuffer, &desktopAudioFrames, timestamp-10);
            desktopAudio->GetNewestFrame(&latestDesktopBuffer, &latestDesktopAudioFrames);

            UINT totalFloats = desktopAudioFrames*2;
            if(bDesktopMuted)
            {
                // Clearing the desktop audio buffer before mixing in the auxiliary audio sources.
                zero(desktopBuffer, sizeof(*desktopBuffer)*totalFloats);
            }

            if(micAudio != NULL)
            {
                micAudio->GetBuffer(&micBuffer, &micAudioFrames, timestamp-10);
                micAudio->GetNewestFrame(&latestMicBuffer, &latestMicAudioFrames);
            }

            //----------------------------------------------------------------------------
            // get latest aux volume level samples and mix

            OSEnterMutex(hAuxAudioMutex);

            mixedLatestDesktopSamples.CopyArray(latestDesktopBuffer, latestDesktopAudioFrames*2);
            for(UINT i=0; i<auxAudioSources.Num(); i++)
            {
                float *latestAuxBuffer;

                if(auxAudioSources[i]->GetNewestFrame(&latestAuxBuffer, &latestDesktopAudioFrames))
                    MixAudio(mixedLatestDesktopSamples.Array(), latestAuxBuffer, latestDesktopAudioFrames*2, false);
            }

            //----------------------------------------------------------------------------
            // mix output aux sound samples with the desktop

            for(UINT i=0; i<auxAudioSources.Num(); i++)
            {
                float *auxBuffer;

                if(auxAudioSources[i]->GetBuffer(&auxBuffer, &desktopAudioFrames, timestamp-10))
//.........这里部分代码省略.........
开发者ID:ascendedguard,项目名称:OBS,代码行数:101,代码来源:OBSCapture.cpp


注:本文中的List::CopyArray方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。