本文整理匯總了Java中android.media.AudioTrack.release方法的典型用法代碼示例。如果您正苦於以下問題:Java AudioTrack.release方法的具體用法?Java AudioTrack.release怎麽用?Java AudioTrack.release使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類android.media.AudioTrack
的用法示例。
在下文中一共展示了AudioTrack.release方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: run
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void run() {
super.run();
isRunning = true;
int buffsize = AudioTrack.getMinBufferSize(sr,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
// create an audiotrack object
AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
sr, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
short samples[] = new short[buffsize];
int amp = 10000;
double twopi = 8.*Math.atan(1.);
double ph = 0.0;
// start audio
audioTrack.play();
// synthesis loop
while(isRunning){
double fr = tuneFreq;
for(int i=0; i < buffsize; i++){
samples[i] = (short) (amp*Math.sin(ph));
ph += twopi*fr/sr;
}
audioTrack.write(samples, 0, buffsize);
}
audioTrack.stop();
audioTrack.release();
}
示例2: createCodec
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
protected MediaCodec createCodec(final MediaExtractor media_extractor, final int track_index, final MediaFormat format)
throws IOException, IllegalArgumentException {
final MediaCodec codec = super.createCodec(media_extractor, track_index, format);
if (codec != null) {
final ByteBuffer[] buffers = codec.getOutputBuffers();
int sz = buffers[0].capacity();
if (sz <= 0)
sz = mAudioInputBufSize;
if (DEBUG) Log.v(TAG, "AudioOutputBufSize:" + sz);
mAudioOutTempBuf = new byte[sz];
try {
mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
mAudioSampleRate,
(mAudioChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO),
AudioFormat.ENCODING_PCM_16BIT,
mAudioInputBufSize,
AudioTrack.MODE_STREAM);
mAudioTrack.play();
} catch (final Exception e) {
Log.e(TAG, "failed to start audio track playing", e);
if (mAudioTrack != null) {
mAudioTrack.release();
mAudioTrack = null;
}
throw e;
}
}
return codec;
}
示例3: encodeMessage
import android.media.AudioTrack; //導入方法依賴的package包/類
private void encodeMessage(int value) {
// audio initialization
int AUDIO_BUFFER_SIZE = 4096;
int minBufferSize = AudioTrack.getMinBufferSize(AUDIO_SAMPLE_FREQ,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if (AUDIO_BUFFER_SIZE < minBufferSize)
AUDIO_BUFFER_SIZE = minBufferSize;
AudioTrack aT = new AudioTrack(AudioManager.STREAM_MUSIC,
AUDIO_SAMPLE_FREQ, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, AUDIO_BUFFER_SIZE,
AudioTrack.MODE_STREAM);
aT.play();
// error detection encoding
Log.i("TAG", "encodeMessage() value=" + value);
value = ErrorDetection.createMessage(value);
Log.i("TAG", "encodeMessage() message=" + value);
// sound encoding
double[] sound = FSKModule.encode(value);
ByteBuffer buf = ByteBuffer.allocate(4 * sound.length);
buf.order(ByteOrder.LITTLE_ENDIAN);
for (int i = 0; i < sound.length; i++) {
int yInt = (int) sound[i];
buf.putInt(yInt);
}
byte[] tone = buf.array();
// play message
int nBytes = aT.write(tone, 0, tone.length);
aT.stop();
aT.release();
}
示例4: onCompleted
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void onCompleted() {
// create a new AudioTrack to workaround audio routing issues.
AudioTrack audioTrack = new AudioTrack.Builder()
.setAudioFormat(mAudioOutputFormat)
.setBufferSizeInBytes(mAudioOutputBufferSize)
.setTransferMode(AudioTrack.MODE_STREAM)
.build();
if (mAudioOutputDevice != null) {
audioTrack.setPreferredDevice(mAudioOutputDevice);
}
audioTrack.setVolume(AudioTrack.getMaxVolume() * mVolume / 100.0f);
audioTrack.play();
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onResponseStarted();
}
});
for (ByteBuffer audioData : mAssistantResponses) {
final ByteBuffer buf = audioData;
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onAudioSample(buf);
}
});
audioTrack.write(buf, buf.remaining(),
AudioTrack.WRITE_BLOCKING);
}
mAssistantResponses.clear();
audioTrack.stop();
audioTrack.release();
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onResponseFinished();
}
});
if (mMicrophoneMode == MicrophoneMode.DIALOG_FOLLOW_ON) {
// Automatically start a new request
startConversation();
} else {
// The conversation is done
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onConversationFinished();
}
});
}
}
示例5: onMarkerReached
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void onMarkerReached(AudioTrack track) {
track.flush();
track.release();
Timber.d("Playback Complete");
}
示例6: play
import android.media.AudioTrack; //導入方法依賴的package包/類
/**
* Generates a tone at desired frequency and volume.
*
* @return 1 if successful, 0 if not
*/
public int play() {
try {
isRunning = true;
t = new Thread() {
public void run() {
// set process priority
setPriority(Thread.MAX_PRIORITY);
int buffsize = AudioTrack.getMinBufferSize(sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT);
// create an audiotrack object
AudioTrack audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC, sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
short samples[] = new short[buffsize];
double twopi = 8. * Math.atan(1.);
double ph = 0.0;
// start audio
audioTrack.play();
while (isRunning) {
int amp = volume * 128;
double fr = frequency * 1.0;
for (int i = 0; i < buffsize; i++) {
samples[i] = (short) (amp * Math.sin(ph));
ph += twopi * fr / sampleRate;
}
audioTrack.write(samples, 0, buffsize);
}
audioTrack.stop();
audioTrack.release();
}
};
t.start();
}
// If error notify consumer
catch (Exception ex) {
return 0;
}
return 1;
}