当前位置: 首页>>代码示例>>Java>>正文


Java AudioTrack.write方法代码示例

本文整理汇总了Java中android.media.AudioTrack.write方法的典型用法代码示例。如果您正苦于以下问题:Java AudioTrack.write方法的具体用法?Java AudioTrack.write怎么用?Java AudioTrack.write使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在android.media.AudioTrack的用法示例。


在下文中一共展示了AudioTrack.write方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: playSound

import android.media.AudioTrack; //导入方法依赖的package包/类
/**
 * This method plays the sound data in the specified buffer.
 *
 * @param buffer specifies the sound data buffer.
 */
public void playSound(short[] buffer)
{
    final String funcName = "playSound";

    if (debugEnabled)
    {
        dbgTrace.traceEnter(funcName, TrcDbgTrace.TraceLevel.API);
        dbgTrace.traceExit(funcName, TrcDbgTrace.TraceLevel.API);
    }

    audioTrack = new AudioTrack(
            AudioManager.STREAM_MUSIC,
            sampleRate,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT,
            buffer.length*2,    //buffer length in bytes
            AudioTrack.MODE_STATIC);
    audioTrack.write(buffer, 0, buffer.length);
    audioTrack.setNotificationMarkerPosition(buffer.length);
    audioTrack.setPlaybackPositionUpdateListener(this);
    audioTrack.play();
    playing = true;
}
 
开发者ID:trc492,项目名称:Ftc2018RelicRecovery,代码行数:29,代码来源:FtcAndroidTone.java

示例2: run

import android.media.AudioTrack; //导入方法依赖的package包/类
@Override
public void run() {
    super.run();
    isRunning = true;
    int buffsize = AudioTrack.getMinBufferSize(sr,
            AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
    // create an audiotrack object
    AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            sr, AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT, buffsize,
            AudioTrack.MODE_STREAM);

    short samples[] = new short[buffsize];
    int amp = 10000;
    double twopi = 8.*Math.atan(1.);
    double ph = 0.0;

    // start audio
    audioTrack.play();

    // synthesis loop
    while(isRunning){
        double fr = tuneFreq;
        for(int i=0; i < buffsize; i++){
            samples[i] = (short) (amp*Math.sin(ph));
            ph += twopi*fr/sr;
        }
        audioTrack.write(samples, 0, buffsize);
    }
    audioTrack.stop();
    audioTrack.release();
}
 
开发者ID:karlotoy,项目名称:perfectTune,代码行数:33,代码来源:TuneThread.java

示例3: generateTrack

import android.media.AudioTrack; //导入方法依赖的package包/类
public AudioTrack generateTrack(int sampleRate, short[] buf, int len) {
    int end = len;

    int c = 0;

    if (RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_MONO)
        c = AudioFormat.CHANNEL_OUT_MONO;

    if (RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_STEREO)
        c = AudioFormat.CHANNEL_OUT_STEREO;

    // old phones bug.
    // http://stackoverflow.com/questions/27602492
    //
    // with MODE_STATIC setNotificationMarkerPosition not called
    AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate,
            c, RawSamples.AUDIO_FORMAT,
            len * (Short.SIZE / 8), AudioTrack.MODE_STREAM);
    track.write(buf, 0, len);
    if (track.setNotificationMarkerPosition(end) != AudioTrack.SUCCESS)
        throw new RuntimeException("unable to set marker");
    return track;
}
 
开发者ID:NandagopalR,项目名称:Android-Audio-Recorder,代码行数:24,代码来源:Sound.java

示例4: generateTone

import android.media.AudioTrack; //导入方法依赖的package包/类
static private AudioTrack generateTone(double freqHz, int durationMs) {
	int count = (int) (44100.0 * 2.0 * (durationMs / 1000.0)) & ~1;
	short[] samples = new short[count];
	int size = count * (Short.SIZE / 8);
	Log.d(TAG, freqHz + "Hz for " + durationMs + "ms = " + count + " samples at 44.1Khz 2ch = " + size + " bytes");
	for (int i = 0; i < count; i += 2) {
		short sample = (short) (Math.sin(2 * Math.PI * i / (44100.0 / freqHz)) * 0x7FFF * .75);
		samples[i + 0] = sample;
		samples[i + 1] = sample;
	}
	AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, 44100,
			AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT,
			size, AudioTrack.MODE_STATIC);
	track.setNotificationMarkerPosition(count / 2);
	track.write(samples, 0, count);
	return track;
}
 
开发者ID:tharvey,项目名称:BlocklyBot,代码行数:18,代码来源:Tone.java

示例5: encodeMessage

import android.media.AudioTrack; //导入方法依赖的package包/类
private void encodeMessage(int value) {
	// audio initialization
	int AUDIO_BUFFER_SIZE = 4096;
	int minBufferSize = AudioTrack.getMinBufferSize(AUDIO_SAMPLE_FREQ,
			AudioFormat.CHANNEL_CONFIGURATION_MONO,
			AudioFormat.ENCODING_PCM_16BIT);
	if (AUDIO_BUFFER_SIZE < minBufferSize)
		AUDIO_BUFFER_SIZE = minBufferSize;
	AudioTrack aT = new AudioTrack(AudioManager.STREAM_MUSIC,
			AUDIO_SAMPLE_FREQ, AudioFormat.CHANNEL_CONFIGURATION_MONO,
			AudioFormat.ENCODING_PCM_16BIT, AUDIO_BUFFER_SIZE,
			AudioTrack.MODE_STREAM);
	aT.play();

	// error detection encoding
	Log.i("TAG", "encodeMessage() value=" + value);
	value = ErrorDetection.createMessage(value);
	Log.i("TAG", "encodeMessage() message=" + value);
	// sound encoding
	double[] sound = FSKModule.encode(value);

	ByteBuffer buf = ByteBuffer.allocate(4 * sound.length);
	buf.order(ByteOrder.LITTLE_ENDIAN);
	for (int i = 0; i < sound.length; i++) {
		int yInt = (int) sound[i];
		buf.putInt(yInt);
	}
	byte[] tone = buf.array();
	// play message
	int nBytes = aT.write(tone, 0, tone.length);
	aT.stop();
	aT.release();
}
 
开发者ID:quake0day,项目名称:Jigglypuff,代码行数:34,代码来源:SenderActivity.java

示例6: playSound

import android.media.AudioTrack; //导入方法依赖的package包/类
void playSound() {
    AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            sampleRate, AudioFormat.CHANNEL_CONFIGURATION_MONO,
            AudioFormat.ENCODING_PCM_16BIT, numSamples,
            AudioTrack.MODE_STATIC);
    audioTrack.write(generatedSnd, 0, generatedSnd.length);
    audioTrack.play();
}
 
开发者ID:zh-h,项目名称:IoTApp,代码行数:9,代码来源:PlaySoundActivity.java

示例7: send

import android.media.AudioTrack; //导入方法依赖的package包/类
public void send(byte[] bytes_pkg) {
    int bufsize = AudioTrack.getMinBufferSize(8000,
            AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT);

    AudioTrack trackplayer = new AudioTrack(AudioManager.STREAM_MUSIC,
            8000, AudioFormat.CHANNEL_OUT_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufsize,
            AudioTrack.MODE_STREAM);

    trackplayer.play();
    trackplayer.write(bytes_pkg, 0, bytes_pkg.length);
}
 
开发者ID:m-abboud,项目名称:hair-o-matic,代码行数:14,代码来源:AudioDeviceCom.java

示例8: onCompleted

import android.media.AudioTrack; //导入方法依赖的package包/类
@Override
public void onCompleted() {
    // create a new AudioTrack to workaround audio routing issues.
    AudioTrack audioTrack = new AudioTrack.Builder()
            .setAudioFormat(mAudioOutputFormat)
            .setBufferSizeInBytes(mAudioOutputBufferSize)
            .setTransferMode(AudioTrack.MODE_STREAM)
            .build();
    if (mAudioOutputDevice != null) {
        audioTrack.setPreferredDevice(mAudioOutputDevice);
    }
    audioTrack.setVolume(AudioTrack.getMaxVolume() * mVolume / 100.0f);
    audioTrack.play();
    mConversationHandler.post(new Runnable() {
        @Override
        public void run() {
            mConversationCallback.onResponseStarted();
        }
    });
    for (ByteBuffer audioData : mAssistantResponses) {
        final ByteBuffer buf = audioData;
        mConversationHandler.post(new Runnable() {
            @Override
            public void run() {
                mConversationCallback.onAudioSample(buf);
            }
        });
        audioTrack.write(buf, buf.remaining(),
                AudioTrack.WRITE_BLOCKING);
    }
    mAssistantResponses.clear();
    audioTrack.stop();
    audioTrack.release();

    mConversationHandler.post(new Runnable() {
        @Override
        public void run() {
            mConversationCallback.onResponseFinished();
        }
    });
    if (mMicrophoneMode == MicrophoneMode.DIALOG_FOLLOW_ON) {
        // Automatically start a new request
        startConversation();
    } else {
        // The conversation is done
        mConversationHandler.post(new Runnable() {
            @Override
            public void run() {
                mConversationCallback.onConversationFinished();
            }
        });
    }
}
 
开发者ID:androidthings,项目名称:sample-googleassistant,代码行数:54,代码来源:EmbeddedAssistant.java

示例9: writeOnLollipop

import android.media.AudioTrack; //导入方法依赖的package包/类
@TargetApi(21)
private int writeOnLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
  return audioTrack.write(byteBuffer, sizeInBytes, AudioTrack.WRITE_BLOCKING);
}
 
开发者ID:Piasy,项目名称:AppRTC-Android,代码行数:5,代码来源:WebRtcAudioTrack.java

示例10: writePreLollipop

import android.media.AudioTrack; //导入方法依赖的package包/类
private int writePreLollipop(AudioTrack audioTrack, ByteBuffer byteBuffer, int sizeInBytes) {
  return audioTrack.write(byteBuffer.array(), byteBuffer.arrayOffset(), sizeInBytes);
}
 
开发者ID:Piasy,项目名称:AppRTC-Android,代码行数:4,代码来源:WebRtcAudioTrack.java

示例11: run

import android.media.AudioTrack; //导入方法依赖的package包/类
public void run()
{
	// from http://www.mail-archive.com/[email protected]/msg76498.html
	
	isRecording = true;
	
	android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);

       int buffersize = AudioRecord.getMinBufferSize(11025,
       		AudioFormat.CHANNEL_CONFIGURATION_MONO,
       		AudioFormat.ENCODING_PCM_16BIT);

       if( buffersize < 1 )
       {
       	// parameters not supported by hardware, probably
       	isRecording = false;
       	return;
       }
       	
    AudioRecord arec = new	AudioRecord(MediaRecorder.AudioSource.MIC,
                                11025,
                                AudioFormat.CHANNEL_CONFIGURATION_MONO,
                                AudioFormat.ENCODING_PCM_16BIT,
                                buffersize);

    AudioTrack atrack = new AudioTrack(AudioManager.STREAM_MUSIC,
                                11025,
                                AudioFormat.CHANNEL_CONFIGURATION_MONO,
                                AudioFormat.ENCODING_PCM_16BIT,
                                buffersize, //ba.size(),
                                AudioTrack.MODE_STREAM);

    atrack.setPlaybackRate(11025);

       byte[] buffer = new byte[buffersize];
       
       arec.startRecording();
       atrack.play();

       while(isRecording) {
               arec.read(buffer, 0, buffersize);
               try {
                       atrack.write(buffer, 0, buffer.length);
               } catch (Exception e) {

                       e.printStackTrace();
               }
       }

       arec.stop();
       atrack.stop();
}
 
开发者ID:quake0day,项目名称:Jigglypuff,代码行数:53,代码来源:Loopback.java

示例12: play

import android.media.AudioTrack; //导入方法依赖的package包/类
/**
 * Generates a tone at desired frequency and volume.
 *
 * @return  1 if successful, 0 if not
 */
public int play() {

    try {
        isRunning = true;
        t = new Thread() {
            public void run() {
                // set process priority
                setPriority(Thread.MAX_PRIORITY);
                int buffsize = AudioTrack.getMinBufferSize(sampleRate,
                        AudioFormat.CHANNEL_OUT_MONO,
                        AudioFormat.ENCODING_PCM_16BIT);
                // create an audiotrack object
                AudioTrack audioTrack = new AudioTrack(
                        AudioManager.STREAM_MUSIC, sampleRate,
                        AudioFormat.CHANNEL_OUT_MONO,
                        AudioFormat.ENCODING_PCM_16BIT, buffsize,
                        AudioTrack.MODE_STREAM);

                short samples[] = new short[buffsize];
                double twopi = 8. * Math.atan(1.);
                double ph = 0.0;
                // start audio
                audioTrack.play();

                while (isRunning) {
                    int amp = volume * 128;
                    double fr = frequency * 1.0;
                    for (int i = 0; i < buffsize; i++) {
                        samples[i] = (short) (amp * Math.sin(ph));
                        ph += twopi * fr / sampleRate;
                    }
                    audioTrack.write(samples, 0, buffsize);
                }
                audioTrack.stop();
                audioTrack.release();
            }
        };
        t.start();

    }

    // If error notify consumer
    catch (Exception ex) {
        return 0;
    }

    return 1;
}
 
开发者ID:sdesalas,项目名称:cordova-plugin-tonegenerator,代码行数:54,代码来源:ToneGenerator.java


注:本文中的android.media.AudioTrack.write方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。