本文整理汇总了Java中android.media.AudioFormat类的典型用法代码示例。如果您正苦于以下问题:Java AudioFormat类的具体用法?Java AudioFormat怎么用?Java AudioFormat使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
AudioFormat类属于android.media包,在下文中一共展示了AudioFormat类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: playSound
import android.media.AudioFormat; //导入依赖的package包/类
/**
* This method plays the sound data in the specified buffer.
*
* @param buffer specifies the sound data buffer.
*/
public void playSound(short[] buffer)
{
final String funcName = "playSound";
if (debugEnabled)
{
dbgTrace.traceEnter(funcName, TrcDbgTrace.TraceLevel.API);
dbgTrace.traceExit(funcName, TrcDbgTrace.TraceLevel.API);
}
audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC,
sampleRate,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
buffer.length*2, //buffer length in bytes
AudioTrack.MODE_STATIC);
audioTrack.write(buffer, 0, buffer.length);
audioTrack.setNotificationMarkerPosition(buffer.length);
audioTrack.setPlaybackPositionUpdateListener(this);
audioTrack.play();
playing = true;
}
示例2: onCreate
import android.media.AudioFormat; //导入依赖的package包/类
@Override
public void onCreate() {
super.onCreate();
mHandler = new Handler();
fetchAccessToken();
int outputBufferSize = AudioTrack.getMinBufferSize(16000,
AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.ENCODING_PCM_16BIT);
try {
mAudioTrack = new AudioTrack(AudioManager.USE_DEFAULT_STREAM_TYPE, 16000, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, outputBufferSize, AudioTrack.MODE_STREAM);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
mAudioTrack.setVolume(DEFAULT_VOLUME);
}
mAudioTrack.play();
}catch (Exception e){
e.printStackTrace();
}
}
示例3: onCreate
import android.media.AudioFormat; //导入依赖的package包/类
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
bufferSize = AudioRecord.getMinBufferSize(16000,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
setButtonHandlers();
enableButtons(false);
inferenceInterface = new TensorFlowInferenceInterface();
inferenceInterface.initializeTensorFlow(getAssets(), MODEL_FILE);
// tensorFlowSample();
}
示例4: AudioSink
import android.media.AudioFormat; //导入依赖的package包/类
/**
* Constructor. Will create a new AudioSink.
*
* @param packetSize size of the incoming packets
* @param sampleRate sample rate of the audio signal
*/
public AudioSink (int packetSize, int sampleRate) {
this.packetSize = packetSize;
this.sampleRate = sampleRate;
// Create the queues and fill them with
this.inputQueue = new ArrayBlockingQueue<SamplePacket>(QUEUE_SIZE);
this.outputQueue = new ArrayBlockingQueue<SamplePacket>(QUEUE_SIZE);
for (int i = 0; i < QUEUE_SIZE; i++)
this.outputQueue.offer(new SamplePacket(packetSize));
// Create an instance of the AudioTrack class:
int bufferSize = AudioTrack.getMinBufferSize(sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
this.audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM);
// Create the audio filters:
this.audioFilter1 = FirFilter.createLowPass(2, 1, 1, 0.1f, 0.15f, 30);
Log.d(LOGTAG,"constructor: created audio filter 1 with " + audioFilter1.getNumberOfTaps() + " Taps.");
this.audioFilter2 = FirFilter.createLowPass(4, 1, 1, 0.1f, 0.1f, 30);
Log.d(LOGTAG,"constructor: created audio filter 2 with " + audioFilter2.getNumberOfTaps() + " Taps.");
this.tmpAudioSamples = new SamplePacket(packetSize);
}
示例5: init_
import android.media.AudioFormat; //导入依赖的package包/类
private void init_(boolean eccEnabled) {
mEccEncoder = EccInstanceProvider.getEncoder(eccEnabled);
int minBufferSizeInBytes = AudioTrack.getMinBufferSize(
RATE,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT);
// 44.1kHz mono 16bit
mAudioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC,
RATE,
AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT,
minBufferSizeInBytes,
AudioTrack.MODE_STREAM);
mExecutorService = Executors.newSingleThreadExecutor();
}
示例6: initAudioTrack
import android.media.AudioFormat; //导入依赖的package包/类
private void initAudioTrack(int sampleRate, int channels) {
if (sampleRate <= 0) {
sampleRate = AUDIO_FORMAT_PCM8K;
}
if (channels <= 0) {
channels = 1;
}
if (channels == 1) {
mChannelConfig = AudioFormat.CHANNEL_OUT_MONO;
} else if (channels == 2) {
mChannelConfig = AudioFormat.CHANNEL_OUT_STEREO;
}
if (iCurrentQueueAudioFormat == sampleRate) {
if (mAudioTrack == null) {
mAudioTrack = createAudioTrack(iCurrentQueueAudioFormat);
}
} else {
Log.d(TAG, "Decoder-initAudioTrack-sampleRate=" + sampleRate);
Log.d(TAG, "Decoder-initAudioTrack-channels=" + channels);
mAudioTrack = createAudioTrack(sampleRate);
iCurrentQueueAudioFormat = sampleRate;
}
}
示例7: getMinBufferSize
import android.media.AudioFormat; //导入依赖的package包/类
private int getMinBufferSize(int sampleRate, int channelConfig, int audioFormat) {
minBufferSize = AudioTrack.getMinBufferSize(sampleRate, channelConfig, audioFormat);
// 解决异常IllegalArgumentException: Invalid audio buffer size
int channelCount = 1;
switch (channelConfig) {
// AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
case AudioFormat.CHANNEL_OUT_DEFAULT:
case AudioFormat.CHANNEL_OUT_MONO:
case AudioFormat.CHANNEL_CONFIGURATION_MONO:
channelCount = 1;
break;
case AudioFormat.CHANNEL_OUT_STEREO:
case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
channelCount = 2;
break;
default:
channelCount = Integer.bitCount(channelConfig);
}
// 判断minBufferSize是否在范围内,如果不在设定默认值为1152
int frameSizeInBytes = channelCount * (audioFormat == AudioFormat.ENCODING_PCM_8BIT ? 1 : 2);
if ((minBufferSize % frameSizeInBytes != 0) || (minBufferSize < 1)) {
minBufferSize = 1152;
}
return minBufferSize;
}
示例8: PcmPlayer
import android.media.AudioFormat; //导入依赖的package包/类
public PcmPlayer(Context context, Handler handler) {
this.mContext = context;
this.audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate, AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, wBufferSize, AudioTrack.MODE_STREAM);
this.handler = handler;
audioTrack.setPlaybackPositionUpdateListener(this, handler);
cacheDir = context.getExternalFilesDir(Environment.DIRECTORY_MUSIC);
}
示例9: checkMediaCodecAudioEncoderSupport
import android.media.AudioFormat; //导入依赖的package包/类
@TargetApi(MIN_API_LEVEL_AUDIO)
public static int checkMediaCodecAudioEncoderSupport(){
if(getApiLevel()<MIN_API_LEVEL_AUDIO){
Log.d(TAG, "checkMediaCodecAudioEncoderSupport: Min API is 16");
return CODEC_REQ_API_NOT_SATISFIED;
}
final MediaFormat audioFormat = MediaFormat.createAudioFormat(MIME_TYPE_AUDIO, TEST_SAMPLE_RATE, 1);
audioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, AudioFormat.CHANNEL_IN_MONO);
audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, TEST_AUDIO_BIT_RATE);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 1);
MediaCodec mediaCodec;
try {
mediaCodec = MediaCodec.createEncoderByType(MIME_TYPE_AUDIO);
mediaCodec.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mediaCodec.start();
mediaCodec.stop();
mediaCodec.release();
mediaCodec = null;
} catch (Exception ex) {
Log.e(TAG, "Failed on creation of codec #", ex);
return CODEC_ERROR;
}
return CODEC_SUPPORTED;
}
示例10: AudioEncoderCore
import android.media.AudioFormat; //导入依赖的package包/类
public AudioEncoderCore(MMediaMuxer MMediaMuxer) throws IOException {
super(MMediaMuxer);
final MediaFormat audioFormat = MediaFormat.createAudioFormat(MIME_TYPE, SAMPLE_RATE, 1);
audioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_MASK, AudioFormat.CHANNEL_IN_MONO);
audioFormat.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);
audioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 1);
// audioFormat.setLong(MediaFormat.KEY_MAX_INPUT_SIZE, inputFile.length());
// audioFormat.setLong(MediaFormat.KEY_DURATION, (long)durationInMs );
if (VERBOSE) Log.i(TAG, "format: " + audioFormat);
mEncoder = MediaCodec.createEncoderByType(MIME_TYPE);
mEncoder.configure(audioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mEncoder.start();
if (mAudioThread == null) {
mAudioThread = new AudioThread();
mAudioThread.start();
capturing=true;
stopped=false;
}
}
示例11: findAudioRecord
import android.media.AudioFormat; //导入依赖的package包/类
public AudioRecord findAudioRecord() {
for (int rate : mSampleRates) {
for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT }) {
for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO }) {
try {
Log.d("C.TAG", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
+ channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, AudioFormat.CHANNEL_IN_MONO , AudioFormat.ENCODING_PCM_16BIT);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(AudioSource.MIC, DEFAULT_RATE, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
return recorder;
}
} catch (Exception e) {
Log.e("C.TAG", rate + "Exception, keep trying.",e);
}
}
}
}
return null;
}
示例12: createAudioTrack
import android.media.AudioFormat; //导入依赖的package包/类
public AudioTrack createAudioTrack(int frameRate) {
int minBufferSizeBytes = AudioTrack.getMinBufferSize(frameRate,
AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_FLOAT);
Log.i(TAG, "AudioTrack.minBufferSize = " + minBufferSizeBytes
+ " bytes = " + (minBufferSizeBytes / BYTES_PER_FRAME)
+ " frames");
int bufferSize = 8 * minBufferSizeBytes / 8;
int outputBufferSizeFrames = bufferSize / BYTES_PER_FRAME;
Log.i(TAG, "actual bufferSize = " + bufferSize + " bytes = "
+ outputBufferSizeFrames + " frames");
AudioTrack player = new AudioTrack(AudioManager.STREAM_MUSIC,
mFrameRate, AudioFormat.CHANNEL_OUT_STEREO,
AudioFormat.ENCODING_PCM_FLOAT, bufferSize,
AudioTrack.MODE_STREAM);
Log.i(TAG, "created AudioTrack");
return player;
}
示例13: getInstanse
import android.media.AudioFormat; //导入依赖的package包/类
@SuppressWarnings("deprecation")
public static ExtAudioRecorder getInstanse(Boolean recordingCompressed) {
ExtAudioRecorder result = null;
if (recordingCompressed) {
result = new ExtAudioRecorder(false, AudioSource.MIC,
sampleRates[3], AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
} else {
int i = 0;
do {
result = new ExtAudioRecorder(true, AudioSource.MIC,
sampleRates[3], AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
} while ((++i < sampleRates.length)
& !(result.getState() == ExtAudioRecorder.State.INITIALIZING));
}
return result;
}
示例14: run
import android.media.AudioFormat; //导入依赖的package包/类
@Override
public void run() {
super.run();
isRunning = true;
int buffsize = AudioTrack.getMinBufferSize(sr,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
// create an audiotrack object
AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
sr, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
short samples[] = new short[buffsize];
int amp = 10000;
double twopi = 8.*Math.atan(1.);
double ph = 0.0;
// start audio
audioTrack.play();
// synthesis loop
while(isRunning){
double fr = tuneFreq;
for(int i=0; i < buffsize; i++){
samples[i] = (short) (amp*Math.sin(ph));
ph += twopi*fr/sr;
}
audioTrack.write(samples, 0, buffsize);
}
audioTrack.stop();
audioTrack.release();
}
示例15: PWave
import android.media.AudioFormat; //导入依赖的package包/类
public PWave(AppRunner appRunner) {
super(appRunner);
appRunner.whatIsRunning.add(this);
// set the buffer size
buffsize = AudioTrack.getMinBufferSize(mSampleRate,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
samples = new short[buffsize];
// create an audiotrack object
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
mSampleRate, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, buffsize,
AudioTrack.MODE_STREAM);
// start audio
audioTrack.play();
}