当前位置: 首页>>代码示例>>C#>>正文


C# AudioClip.SetData方法代码示例

本文整理汇总了C#中UnityEngine.AudioClip.SetData方法的典型用法代码示例。如果您正苦于以下问题:C# AudioClip.SetData方法的具体用法?C# AudioClip.SetData怎么用?C# AudioClip.SetData使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在UnityEngine.AudioClip的用法示例。


在下文中一共展示了AudioClip.SetData方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。

示例1: StartImport

    public void StartImport(string mPath)
    {
        //mPath = EditorUtility.OpenFilePanel ("Open MP3", "", "mp3");

        audioSource = (AudioSource)gameObject.GetComponent(typeof(AudioSource));
        if(audioSource==null)audioSource=(AudioSource)gameObject.AddComponent("AudioSource");

        MPGImport.mpg123_init ();
        handle_mpg = MPGImport.mpg123_new (null, errPtr);
        x = MPGImport.mpg123_open (handle_mpg, mPath);
        MPGImport.mpg123_getformat (handle_mpg, out rate, out channels, out encoding);
        intRate = rate.ToInt32 ();
        intChannels = channels.ToInt32 ();
        intEncoding = encoding.ToInt32 ();

        MPGImport.mpg123_id3 (handle_mpg, out id3v1, out id3v2);
        MPGImport.mpg123_format_none (handle_mpg);
        MPGImport.mpg123_format (handle_mpg, intRate, intChannels, 208);

        FrameSize = MPGImport.mpg123_outblock (handle_mpg);
        byte[] Buffer = new byte[FrameSize];
        lengthSamples = MPGImport.mpg123_length (handle_mpg);

        myClip = AudioClip.Create ("myClip", lengthSamples, intChannels, intRate, false, false);

        int importIndex = 0;

        while (0 == MPGImport.mpg123_read(handle_mpg, Buffer, FrameSize, out done))
        {
            float[] fArray;
            fArray = ByteToFloat (Buffer);

            myClip.SetData (fArray, (importIndex*fArray.Length)/2);

            importIndex++;
        }

        MPGImport.mpg123_close (handle_mpg);

        audioSource.clip = myClip;
        audioSource.loop = true;
        audioSource.Play ();
    }
开发者ID:C-o-r-E,项目名称:FrequencyDomain,代码行数:43,代码来源:MP3Import.cs

示例2: Start

    // Use this for initialization
    void Start()
    {
        _pxcmSenseManager = PXCMSenseManager.CreateInstance();

        if (_pxcmSenseManager == null)
        {
            Debug.LogError("SenseManager Initialization Failed");
        }
        else
        {
            int nbuffers;
            int nsamples;
            AudioSource aud = GetComponent<AudioSource>();
            String name = "test";
            Int32 sid = 1;

            _pxcmSenseManager.session.CreateImpl<PXCMSpeechSynthesis>(out tts);
            PXCMSpeechSynthesis.ProfileInfo pinfo;
            tts.QueryProfile(0, out pinfo);
            pinfo.language = PXCMSpeechSynthesis.LanguageType.LANGUAGE_US_ENGLISH;
            tts.SetProfile(pinfo);
            tts.BuildSentence(1, "The year is 2098.  Interstellar space travel was made possible in 2045.  But space travel is dangerous.  Human life support systems for interdimensional transport are costly and high risk.  The majority of trade ships are unmanned and piloted remotely using hyper dimensional virtual reality.  Use the voice command initialize to begin, or click the play button.");

            nsamples = tts.QuerySampleNum(sid);

            if (nsamples != 0)
            {
                tts.QueryProfile(out pinfo);
                clip = AudioClip.Create(
                    name,
                    nsamples * pinfo.outputs.nchannels,
                    pinfo.outputs.nchannels,
                    pinfo.outputs.sampleRate,
                    false
                );

                nbuffers = tts.QueryBufferNum(sid);

                for (int i = 0, offset = 0; i < nbuffers; i++)
                {
                    PXCMAudio audio = tts.QueryBuffer(sid, i);
                    PXCMAudio.AudioData data;
                    pxcmStatus sts = audio.AcquireAccess(
                        PXCMAudio.Access.ACCESS_READ,
                        PXCMAudio.AudioFormat.AUDIO_FORMAT_IEEE_FLOAT,
                        out data
                    );

                    if (sts < pxcmStatus.PXCM_STATUS_NO_ERROR) break;

                    float[] samples = data.ToFloatArray();

                    clip.SetData(data.ToFloatArray(), offset);

                    offset += samples.Length;

                    audio.ReleaseAccess(data);
                }

                aud.clip = clip;
                aud.Play();
            }
        }

        tts.Dispose();
    }
开发者ID:e-ln1,项目名称:RealSenseUnityDemo_byLance,代码行数:67,代码来源:SpeechTesting.cs

示例3: CreateRecordAndStopButton

    void CreateRecordAndStopButton(ref AudioClip clip, string clipName)
    {
        if (GUILayout.Button("Record", EditorStyles.miniButtonLeft)) {
            if (!micHelper_) {
                if (lipSync.useMic) {
                    lipSync.useMic = false;
                    lipSync.Stop();
                }

                // Create mic helper and start recording
                micHelper_ = new GameObject();
                micHelper_.name = "Callibration Mic Helper";
                var mic = micHelper_.AddComponent<MicHandler>();
                mic.Initialize(lipSync.sampleNum);
                mic.Record();
                Debug.Log("=== START RECORDING " + clipName + " ====");
            } else {
                Debug.LogWarning("Already recording!");
            }
        }
        if (GUILayout.Button("Stop", EditorStyles.miniButtonRight)) {
            if (micHelper_) {
                // Copy mic.clip to new AudioClip
                var mic = micHelper_.AddComponent<MicHandler>();
                var data = new float[mic.clip.samples];
                mic.clip.GetData(data, 0);
                clip = AudioClip.Create(clipName, mic.clip.samples, 1, mic.clip.frequency, true, false);
                clip.SetData(data, 0);

                // Destroy mic helper
                mic.Stop();
                Destroy(micHelper_);
                Debug.Log("=== STOP RECORDING " + clipName + " ====");
            } else {
                Debug.LogWarning("Not recording!");
            }
        }
    }
开发者ID:WondermSwift,项目名称:MMD4Mecanim-LipSync-Plugin,代码行数:38,代码来源:LipSyncCoreEditor.cs

示例4: initAudio

	public void initAudio(){
		audioClip = AudioClip.Create("FPS-BNote", bNote.Length, 1, 44100, false, false);
		audioClip.SetData(bNote, 0);

		audioSource = gameObject.AddComponent<AudioSource>();
		audioSource.loop = true;
		audioSource.clip = audioClip;
	}
开发者ID:kbefans,项目名称:kbengine_unity3d_warring,代码行数:8,代码来源:FPSGraphC.cs

示例5: Update


//.........这里部分代码省略.........
        }

        if (Input.GetButtonDown("Interaction")) {
            if(IsSenseActive(SenseController.SenseType.Feeling)) {
                if (isCarrying) {
                    isCarrying = false;
                }
                RaycastHit hit;
                Transform sightCamera = transform.root.FindChild("SenseGroup").FindChild("SightCamera");
                //Debug.DrawRay(sightCamera.position, sightCamera.TransformDirection(Vector3.forward));
                if(carryingObject == null) {
                    //Debug.DrawRay(sightCamera.position, sightCamera.TransformDirection(Vector3.forward));
                    if(Physics.Raycast(sightCamera.transform.position, sightCamera.transform.TransformDirection(Vector3.forward), out hit, 2)) {
                        carryingObject = hit.transform.root.gameObject.GetComponentInChildren<InteractableObject>();
                        if (carryingObject != null) {
                            if(carryingObject.canCarry) {
                                if(carryingObject != null) {
                                    isCarrying = !isCarrying;
                                    carryingObject.transform.parent = PlayerController.Instance.transform;
                                    carryingObject.collider.enabled = false;
                                    carryingObject.rigidbody.useGravity = false;
                                    Debug.Log ("hurrdurr");
                                }
                            } else if(carryingObject.canActivate) {
                                carryingObject.activate();
                            }
                        }
                    }
                }

                else if (!isCarrying){
                    Debug.Log ("drop");
                    //carryingObject.rigidbody.useGravity = true;
                    if(carryingObject != null) {
                        carryingObject.collider.enabled = true;
                        carryingObject.rigidbody.useGravity = true;
                        carryingObject.transform.parent = null;
                        carryingObject.rigidbody.WakeUp();
                        carryingObject = null;
                    } else {
                        Debug.Log("YIURSD");
                    }
                }
            }
        }

        RaycastHit echoHit = new RaycastHit();

        if(Application.HasUserAuthorization(UserAuthorization.Microphone) || !Application.isWebPlayer) {
            if(Input.GetButtonDown("Shout")) {
                if(IsSenseActive(SenseController.SenseType.Hearing)) {
                    Transform sightCamera = transform.root.FindChild("SenseGroup").FindChild("SightCamera");

                    Physics.Raycast(sightCamera.position, sightCamera.TransformDirection(Vector3.forward), out echoHit, 100f);
                    echoDistance = echoHit.distance;

                    audio.clip = Microphone.Start("", false, 99, AudioSettings.outputSampleRate);
                }
            }

            if(Input.GetButtonUp("Shout")) {
                if(IsSenseActive(SenseController.SenseType.Hearing)) {
                    Microphone.End("");
                    Debug.Log(echoDistance);
                    voiceClip = AudioClip.Create("MyVoice", 44100, 1, 44100, true, false);
                    AudioEchoFilter echoFilter = GameObject.Find("Player").GetComponentInChildren<AudioEchoFilter>();

                    if(echoDistance >= 10f && echoDistance <= 15f) {
                        echoFilter.enabled = true;
                        echoFilter.wetMix = 0.1f;
                        echoFilter.decayRatio = 0.1f;
                    } else if(echoDistance > 15f && echoDistance <= 20f) {
                        echoFilter.enabled = true;
                        echoFilter.wetMix = 0.1f;
                        echoFilter.decayRatio = 0.4f;
                    } else if(echoDistance > 20f) {
                        echoFilter.enabled = true;
                        echoFilter.wetMix = 0.1f;
                        echoFilter.decayRatio = 0.75f;
                    } else {
                        echoFilter.enabled = false;
                    }

                    float[] samples = new float[44100];
                    audio.clip.GetData(samples, 0);

                    voiceClip.SetData(samples, 0);
                    audio.clip = voiceClip;

                    audio.PlayOneShot(voiceClip);

                    echoDistance = 0f;
                }
            }
        }

        if (!killed) {
            _checkDeath();
        }
    }
开发者ID:kieferyap,项目名称:ggj-2014-senses,代码行数:101,代码来源:PlayerController.cs

示例6: StopRecordingFile

	public bool StopRecordingFile()
	{
		if ( device == null || device == "none" )
			return false;


		if (UseLoopMode) {
			AudioClip copyClip = AudioClip.Create("copyClip", loopClip.samples, loopClip.channels, loopClip.frequency,false,false);
			float[] loopSamples = new float[loopClip.samples];
			loopClip.GetData(loopSamples,0);
			copyClip.SetData(loopSamples,0);
//			Brain.GetInstance().QueueAudio(	copyClip,null);
			// get the data from loopClip
			isLoopRecording = false;
			float duration = Time.time-startTime;
			if (duration < 0.5f) return false;

			int sampleCount = (int)(duration*loopClip.frequency);

			loopEndPosition = UnityEngine.Microphone.GetPosition(device);

			int expectedEnd = loopStartPosition + sampleCount;
			if (expectedEnd > loopClip.samples) expectedEnd -= loopClip.samples;

			int miscount = expectedEnd - loopEndPosition;
			if (Mathf.Abs(miscount) > 1000)
				Debug.LogWarning("Microphone samples off by "+miscount);

			float[] recordedSamples = new float[sampleCount];
			copyClip.GetData(recordedSamples,loopStartPosition);

			currentClip = AudioClip.Create(loopClip.name, sampleCount, loopClip.channels, loopClip.frequency,false,false);
			currentClip.SetData(recordedSamples,0);
		} 
		else 
		{
			UnityEngine.Microphone.End (device);
		}
//		UnityEngine.Debug.Log("Microphone Device End");
		
		// if length < 0.5 then don't do anything
		if ( (Time.time-startTime) < 0.5f )
			return false;
		
/*		don't play back the recording, unless the command is not recognized...
 * 		if ( goAudioSource == null )
		{
			Camera.main.audio.PlayOneShot(currentClip);
			//Brain.GetInstance().PlayAudio(currentClip);
		}
		else
		{
			UnityEngine.Debug.Log("Microphone Play Clip from AudioSource");
			goAudioSource.clip = currentClip;
			goAudioSource.Play();
		}
*/		
		// check data
		int goodData = 0;
		float[] samples = new float[currentClip.samples * currentClip.channels];
		currentClip.GetData(samples, 0);				
		foreach( float floatVal in samples )
		{
			if ( floatVal > 0.01f )
				goodData++;
		}
//		UnityEngine.Debug.Log("Microphone goodData = <"  + goodData + ">");

		// could just tack this last bit of code onto the memory stream returned by the StopRecordingStream method when called.
		// using a file because I couldnt find the way to marshall the stream into an object implementing IStream for the SAPI to use
		// could create a class that implements IStream and just pass the recording in memory...

		MemoryStream memStream = SaveWav.Save (currentClip);//SaveWav.TrimSilence (currentClip, 0.1f));		
// TODO need to try/catch this. can fail if SAPI still has open
		try{
		FileStream file = new FileStream(filename, FileMode.Create, FileAccess.Write); 
		memStream.WriteTo(file);
		file.Close();
		}		
		catch
		{
			Debug.LogError("Error opening spoken input file - sharing ?");
		}

		status = ClipStatus ();

		return true;
	}
开发者ID:MedStarSiTEL,项目名称:UnityTrauma,代码行数:88,代码来源:Microphone.cs

示例7: StartImport

    public AudioClip StartImport(string mPath)
    {
        MPGImport.mpg123_init();
        handle_mpg = MPGImport.mpg123_new(null, errPtr);
        try
        {
            x = MPGImport.mpg123_open(handle_mpg, mPath);
            MPGImport.mpg123_getformat(handle_mpg, out rate, out channels, out encoding);
            intRate = rate.ToInt32();
            intChannels = channels.ToInt32();
            intEncoding = encoding.ToInt32();

            MPGImport.mpg123_id3(handle_mpg, out id3v1, out id3v2);
            MPGImport.mpg123_format_none(handle_mpg);
            MPGImport.mpg123_format(handle_mpg, intRate, intChannels, 208);

            Debug.Log("Getting ID3 info");
            MPGImport.mpg123_id3v1 v1 = (MPGImport.mpg123_id3v1)Marshal.PtrToStructure(id3v1, typeof(MPGImport.mpg123_id3v1));

            FrameSize = MPGImport.mpg123_outblock(handle_mpg);
            byte[] Buffer = new byte[FrameSize];
            lengthSamples = MPGImport.mpg123_length(handle_mpg);

            myClip = AudioClip.Create(new String(v1.title), lengthSamples, intChannels, intRate, false);

            int importIndex = 0;

            while (0 == MPGImport.mpg123_read(handle_mpg, Buffer, FrameSize, out done))
            {
                float[] fArray;
                fArray = ByteToFloat(Buffer);
                float offset = (importIndex * fArray.Length) / 2;
                if (offset > lengthSamples)
                {
                    Debug.LogWarning("[STED] MP3 file " + mPath + " is of an unexpected length and was truncated.");
                    break; // File was reported as shorter than it is. Salvage what we have and return.
                }
                myClip.SetData(fArray, (int)offset);
                importIndex++;
            }
        }
        catch (Exception ex)
        {
            // Attempt to dump any used memory before continuing.
            // TODO: Still holds onto memory when repeatedy failing.
            myClip.UnloadAudioData();
            myClip = null;
            throw ex;
        }
        finally
        {
            MPGImport.mpg123_close(handle_mpg);
        }
        return myClip;
    }
开发者ID:pizzaoverhead,项目名称:SoundtrackEditor,代码行数:55,代码来源:MP3Import.cs

示例8: StartImport

    public AudioClip StartImport(string mPath)
    {
        MPGImport.mpg123_init();
        handle_mpg = MPGImport.mpg123_new(null, errPtr);
        x = MPGImport.mpg123_open(handle_mpg, mPath);
        MPGImport.mpg123_getformat(handle_mpg, out rate, out channels, out encoding);
        intRate = rate.ToInt32();
        intChannels = channels.ToInt32();
        intEncoding = encoding.ToInt32();

        MPGImport.mpg123_id3(handle_mpg, out id3v1, out id3v2);
        MPGImport.mpg123_format_none(handle_mpg);
        MPGImport.mpg123_format(handle_mpg, intRate, intChannels, 208);

        FrameSize = MPGImport.mpg123_outblock(handle_mpg);
        byte[] Buffer = new byte[FrameSize];
        lengthSamples = MPGImport.mpg123_length(handle_mpg);

        myClip = AudioClip.Create("myClip", lengthSamples, intChannels, intRate, false, false);

        int importIndex = 0;

        while (0 == MPGImport.mpg123_read(handle_mpg, Buffer, FrameSize, out done))
        {
            float[] fArray;
            fArray = ByteToFloat(Buffer);

            myClip.SetData(fArray, (importIndex * fArray.Length) / 2);

            importIndex++;
        }

        MPGImport.mpg123_close(handle_mpg);
        return myClip;
    }
开发者ID:Kerbas-ad-astra,项目名称:SoundtrackEditor,代码行数:35,代码来源:MP3Import.cs


注:本文中的UnityEngine.AudioClip.SetData方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。