本文整理匯總了Java中android.media.AudioTrack.play方法的典型用法代碼示例。如果您正苦於以下問題:Java AudioTrack.play方法的具體用法?Java AudioTrack.play怎麽用?Java AudioTrack.play使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類android.media.AudioTrack
的用法示例。
在下文中一共展示了AudioTrack.play方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: playSound
import android.media.AudioTrack; //導入方法依賴的package包/類
/**
* This method plays the sound data in the specified buffer.
*
* @param buffer specifies the sound data buffer.
*/
public void playSound(short[] buffer)
{
final String funcName = "playSound";
if (debugEnabled)
{
dbgTrace.traceEnter(funcName, TrcDbgTrace.TraceLevel.API);
dbgTrace.traceExit(funcName, TrcDbgTrace.TraceLevel.API);
}
audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC,
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
buffer.length*2, //buffer length in bytes
AudioTrack.MODE_STATIC);
audioTrack.write(buffer, 0, buffer.length);
audioTrack.setNotificationMarkerPosition(buffer.length);
audioTrack.setPlaybackPositionUpdateListener(this);
audioTrack.play();
playing = true;
}
示例2: onCreate
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void onCreate() {
super.onCreate();
mHandler = new Handler();
fetchAccessToken();
int outputBufferSize = AudioTrack.getMinBufferSize(16000,
AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.ENCODING_PCM_16BIT);
try {
mAudioTrack = new AudioTrack(AudioManager.USE_DEFAULT_STREAM_TYPE, 16000, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, outputBufferSize, AudioTrack.MODE_STREAM);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
mAudioTrack.setVolume(DEFAULT_VOLUME);
}
mAudioTrack.play();
}catch (Exception e){
e.printStackTrace();
}
}
示例3: run
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void run() {
super.run();
isRunning = true;
int buffsize = AudioTrack.getMinBufferSize(sr,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
// create an audiotrack object
AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
sr, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
short samples[] = new short[buffsize];
int amp = 10000;
double twopi = 8.*Math.atan(1.);
double ph = 0.0;
// start audio
audioTrack.play();
// synthesis loop
while(isRunning){
double fr = tuneFreq;
for(int i=0; i < buffsize; i++){
samples[i] = (short) (amp*Math.sin(ph));
ph += twopi*fr/sr;
}
audioTrack.write(samples, 0, buffsize);
}
audioTrack.stop();
audioTrack.release();
}
示例4: PWave
import android.media.AudioTrack; //導入方法依賴的package包/類
public PWave(AppRunner appRunner) {
super(appRunner);
appRunner.whatIsRunning.add(this);
// set the buffer size
buffsize = AudioTrack.getMinBufferSize(mSampleRate,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
samples = new short[buffsize];
// create an audiotrack object
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
mSampleRate, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
// start audio
audioTrack.play();
}
示例5: start
import android.media.AudioTrack; //導入方法依賴的package包/類
/**
* 設置頻率
* @param rate
*/
@SuppressWarnings("deprecation")
public void start(int rate){
stop();
if(rate>0){
Hz=rate;
waveLen = RATE / Hz;
length = waveLen * Hz;
audioTrack=new AudioTrack(AudioManager.STREAM_MUSIC, RATE,
AudioFormat.CHANNEL_CONFIGURATION_STEREO, // CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_8BIT, length, AudioTrack.MODE_STREAM);
//生成正弦波
wave=SinWave.sin(wave, waveLen, length);
if(audioTrack!=null){
audioTrack.play();
}
}else{
return;
}
}
示例6: createAudioTrack
import android.media.AudioTrack; //導入方法依賴的package包/類
private AudioTrack createAudioTrack(int sampleRate) {
int encoding = AudioFormat.ENCODING_PCM_16BIT;
// 得到一個滿足最小要求的緩衝區的大小
int minBufferSize = getMinBufferSize(sampleRate, mChannelConfig, encoding);
Log.d(TAG, "Decoder-AudioTrack-minBufferSize=" + minBufferSize);
AudioTrack audioTrack =
new AudioTrack(mStreamType,
sampleRate,
mChannelConfig,
encoding,
minBufferSize,
AudioTrack.MODE_STREAM);
audioTrack.play();
return audioTrack;
}
示例7: AndroidAudioPlayer
import android.media.AudioTrack; //導入方法依賴的package包/類
/**
* Constructs a new AndroidAudioPlayer from an audio format, default buffer size and stream type.
*
* @param audioFormat The audio format of the stream that this AndroidAudioPlayer will process.
* This can only be 1 channel, PCM 16 bit.
* @param bufferSizeInSamples The requested buffer size in samples.
* @param streamType The type of audio stream that the internal AudioTrack should use. For
* example, {@link AudioManager#STREAM_MUSIC}.
* @throws IllegalArgumentException if audioFormat is not valid or if the requested buffer size is invalid.
* @see AudioTrack
*/
public AndroidAudioPlayer(TarsosDSPAudioFormat audioFormat, int bufferSizeInSamples, int streamType) {
if (audioFormat.getChannels() != 1) {
throw new IllegalArgumentException("TarsosDSP only supports mono audio channel count: " + audioFormat.getChannels());
}
// The requested sample rate
int sampleRate = (int) audioFormat.getSampleRate();
//The buffer size in bytes is twice the buffer size expressed in samples if 16bit samples are used:
int bufferSizeInBytes = bufferSizeInSamples * audioFormat.getSampleSizeInBits()/8;
// From the Android API about getMinBufferSize():
// The total size (in bytes) of the internal buffer where audio data is read from for playback.
// If track's creation mode is MODE_STREAM, you can write data into this buffer in chunks less than or equal to this size,
// and it is typical to use chunks of 1/2 of the total size to permit double-buffering. If the track's creation mode is MODE_STATIC,
// this is the maximum length sample, or audio clip, that can be played by this instance. See getMinBufferSize(int, int, int) to determine
// the minimum required buffer size for the successful creation of an AudioTrack instance in streaming mode. Using values smaller
// than getMinBufferSize() will result in an initialization failure.
int minBufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
if(minBufferSizeInBytes > bufferSizeInBytes){
throw new IllegalArgumentException("The buffer size should be at least " + (minBufferSizeInBytes/(audioFormat.getSampleSizeInBits()/8)) + " (samples) according to AudioTrack.getMinBufferSize().");
}
//http://developer.android.com/reference/android/media/AudioTrack.html#AudioTrack(int, int, int, int, int, int)
audioTrack = new AudioTrack(streamType, sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSizeInBytes,AudioTrack.MODE_STREAM);
audioTrack.play();
}
示例8: audioDecoderTest
import android.media.AudioTrack; //導入方法依賴的package包/類
public void audioDecoderTest(String filePath) throws IOException {
AudioDecoder audioDecoderThread = new AudioDecoder(this, this);
audioDecoderThread.initExtractor(filePath);
audioDecoderThread.prepareAudio();
int buffsize = AudioTrack.getMinBufferSize(audioDecoderThread.getSampleRate(),
AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, audioDecoderThread.getSampleRate(),
AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
audioTrack.play();
audioDecoderThread.start();
}
示例9: init
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void init(int samples) {
mAudioBuffer = new short[(5 * (int) mSampleRate) / 2]; // 2.5 seconds of buffer
mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
(int) mSampleRate, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, mAudioBuffer.length * 2,
AudioTrack.MODE_STREAM);
mAudioTrack.play();
}
示例10: createCodec
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
protected MediaCodec createCodec(final MediaExtractor media_extractor, final int track_index, final MediaFormat format)
throws IOException, IllegalArgumentException {
final MediaCodec codec = super.createCodec(media_extractor, track_index, format);
if (codec != null) {
final ByteBuffer[] buffers = codec.getOutputBuffers();
int sz = buffers[0].capacity();
if (sz <= 0)
sz = mAudioInputBufSize;
if (DEBUG) Log.v(TAG, "AudioOutputBufSize:" + sz);
mAudioOutTempBuf = new byte[sz];
try {
mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
mAudioSampleRate,
(mAudioChannels == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO),
AudioFormat.ENCODING_PCM_16BIT,
mAudioInputBufSize,
AudioTrack.MODE_STREAM);
mAudioTrack.play();
} catch (final Exception e) {
Log.e(TAG, "failed to start audio track playing", e);
if (mAudioTrack != null) {
mAudioTrack.release();
mAudioTrack = null;
}
throw e;
}
}
return codec;
}
示例11: start
import android.media.AudioTrack; //導入方法依賴的package包/類
@Override
public void start() {
minBufferSize = AudioTrack.getMinBufferSize(frameRate,
AudioFormat.CHANNEL_OUT_STEREO,
AudioFormat.ENCODING_PCM_16BIT);
System.out.println("Audio minBufferSize = " + minBufferSize);
bufferSize = (3 * (minBufferSize / 2)) & ~3;
System.out.println("Audio bufferSize = " + bufferSize);
audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC, frameRate,
AudioFormat.CHANNEL_OUT_STEREO,
AudioFormat.ENCODING_PCM_16BIT, bufferSize,
AudioTrack.MODE_STREAM);
audioTrack.play();
}
示例12: encodeMessage
import android.media.AudioTrack; //導入方法依賴的package包/類
private void encodeMessage(int value) {
// audio initialization
int AUDIO_BUFFER_SIZE = 4096;
int minBufferSize = AudioTrack.getMinBufferSize(AUDIO_SAMPLE_FREQ,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if (AUDIO_BUFFER_SIZE < minBufferSize)
AUDIO_BUFFER_SIZE = minBufferSize;
AudioTrack aT = new AudioTrack(AudioManager.STREAM_MUSIC,
AUDIO_SAMPLE_FREQ, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, AUDIO_BUFFER_SIZE,
AudioTrack.MODE_STREAM);
aT.play();
// error detection encoding
Log.i("TAG", "encodeMessage() value=" + value);
value = ErrorDetection.createMessage(value);
Log.i("TAG", "encodeMessage() message=" + value);
// sound encoding
double[] sound = FSKModule.encode(value);
ByteBuffer buf = ByteBuffer.allocate(4 * sound.length);
buf.order(ByteOrder.LITTLE_ENDIAN);
for (int i = 0; i < sound.length; i++) {
int yInt = (int) sound[i];
buf.putInt(yInt);
}
byte[] tone = buf.array();
// play message
int nBytes = aT.write(tone, 0, tone.length);
aT.stop();
aT.release();
}
示例13: playSound
import android.media.AudioTrack; //導入方法依賴的package包/類
void playSound() {
AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
sampleRate, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, numSamples,
AudioTrack.MODE_STATIC);
audioTrack.write(generatedSnd, 0, generatedSnd.length);
audioTrack.play();
}
示例14: initPlayer
import android.media.AudioTrack; //導入方法依賴的package包/類
/**
* Initialize AudioTrack by getting buffersize
*/
private void initPlayer() {
synchronized (this) {
int bs = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, bs, AudioTrack.MODE_STREAM);
if (audioTrack != null)
audioTrack.play();
}
}
示例15: initPlayer
import android.media.AudioTrack; //導入方法依賴的package包/類
/**
* Initialize AudioTrack by getting buffersize
*/
private void initPlayer() {
synchronized (this) {
int bs = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
bs,
AudioTrack.MODE_STREAM);
if (audioTrack != null)
audioTrack.play();
}
}