本文整理匯總了Java中android.media.AudioTrack.stop方法的典型用法代碼示例。如果您正苦於以下問題:Java AudioTrack.stop方法的具體用法?Java AudioTrack.stop怎麽用?Java AudioTrack.stop使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類android.media.AudioTrack
的用法示例。
在下文中一共展示了AudioTrack.stop方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: run
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void run() {
super.run();
isRunning = true;
int buffsize = AudioTrack.getMinBufferSize(sr,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
// create an audiotrack object
AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
sr, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
short samples[] = new short[buffsize];
int amp = 10000;
double twopi = 8.*Math.atan(1.);
double ph = 0.0;
// start audio
audioTrack.play();
// synthesis loop
while(isRunning){
double fr = tuneFreq;
for(int i=0; i < buffsize; i++){
samples[i] = (short) (amp*Math.sin(ph));
ph += twopi*fr/sr;
}
audioTrack.write(samples, 0, buffsize);
}
audioTrack.stop();
audioTrack.release();
}
示例2: encodeMessage
import android.media.AudioTrack; //導入方法依賴的package包/類
private void encodeMessage(int value) {
// audio initialization
int AUDIO_BUFFER_SIZE = 4096;
int minBufferSize = AudioTrack.getMinBufferSize(AUDIO_SAMPLE_FREQ,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if (AUDIO_BUFFER_SIZE < minBufferSize)
AUDIO_BUFFER_SIZE = minBufferSize;
AudioTrack aT = new AudioTrack(AudioManager.STREAM_MUSIC,
AUDIO_SAMPLE_FREQ, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, AUDIO_BUFFER_SIZE,
AudioTrack.MODE_STREAM);
aT.play();
// error detection encoding
Log.i("TAG", "encodeMessage() value=" + value);
value = ErrorDetection.createMessage(value);
Log.i("TAG", "encodeMessage() message=" + value);
// sound encoding
double[] sound = FSKModule.encode(value);
ByteBuffer buf = ByteBuffer.allocate(4 * sound.length);
buf.order(ByteOrder.LITTLE_ENDIAN);
for (int i = 0; i < sound.length; i++) {
int yInt = (int) sound[i];
buf.putInt(yInt);
}
byte[] tone = buf.array();
// play message
int nBytes = aT.write(tone, 0, tone.length);
aT.stop();
aT.release();
}
示例3: onCompleted
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void onCompleted() {
// create a new AudioTrack to workaround audio routing issues.
AudioTrack audioTrack = new AudioTrack.Builder()
.setAudioFormat(mAudioOutputFormat)
.setBufferSizeInBytes(mAudioOutputBufferSize)
.setTransferMode(AudioTrack.MODE_STREAM)
.build();
if (mAudioOutputDevice != null) {
audioTrack.setPreferredDevice(mAudioOutputDevice);
}
audioTrack.setVolume(AudioTrack.getMaxVolume() * mVolume / 100.0f);
audioTrack.play();
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onResponseStarted();
}
});
for (ByteBuffer audioData : mAssistantResponses) {
final ByteBuffer buf = audioData;
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onAudioSample(buf);
}
});
audioTrack.write(buf, buf.remaining(),
AudioTrack.WRITE_BLOCKING);
}
mAssistantResponses.clear();
audioTrack.stop();
audioTrack.release();
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onResponseFinished();
}
});
if (mMicrophoneMode == MicrophoneMode.DIALOG_FOLLOW_ON) {
// Automatically start a new request
startConversation();
} else {
// The conversation is done
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onConversationFinished();
}
});
}
}
示例4: run
import android.media.AudioTrack; //導入方法依賴的package包/類
public void run()
{
// from http://www.mail-archive.com/[email protected]/msg76498.html
isRecording = true;
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
int buffersize = AudioRecord.getMinBufferSize(11025,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if( buffersize < 1 )
{
// parameters not supported by hardware, probably
isRecording = false;
return;
}
AudioRecord arec = new AudioRecord(MediaRecorder.AudioSource.MIC,
11025,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
buffersize);
AudioTrack atrack = new AudioTrack(AudioManager.STREAM_MUSIC,
11025,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
buffersize, //ba.size(),
AudioTrack.MODE_STREAM);
atrack.setPlaybackRate(11025);
byte[] buffer = new byte[buffersize];
arec.startRecording();
atrack.play();
while(isRecording) {
arec.read(buffer, 0, buffersize);
try {
atrack.write(buffer, 0, buffer.length);
} catch (Exception e) {
e.printStackTrace();
}
}
arec.stop();
atrack.stop();
}
示例5: play
import android.media.AudioTrack; //導入方法依賴的package包/類
/**
* Generates a tone at desired frequency and volume.
*
* @return 1 if successful, 0 if not
*/
public int play() {
try {
isRunning = true;
t = new Thread() {
public void run() {
// set process priority
setPriority(Thread.MAX_PRIORITY);
int buffsize = AudioTrack.getMinBufferSize(sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT);
// create an audiotrack object
AudioTrack audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC, sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
short samples[] = new short[buffsize];
double twopi = 8. * Math.atan(1.);
double ph = 0.0;
// start audio
audioTrack.play();
while (isRunning) {
int amp = volume * 128;
double fr = frequency * 1.0;
for (int i = 0; i < buffsize; i++) {
samples[i] = (short) (amp * Math.sin(ph));
ph += twopi * fr / sampleRate;
}
audioTrack.write(samples, 0, buffsize);
}
audioTrack.stop();
audioTrack.release();
}
};
t.start();
}
// If error notify consumer
catch (Exception ex) {
return 0;
}
return 1;
}