本文整理匯總了Java中android.media.AudioRecord.getMinBufferSize方法的典型用法代碼示例。如果您正苦於以下問題:Java AudioRecord.getMinBufferSize方法的具體用法?Java AudioRecord.getMinBufferSize怎麽用?Java AudioRecord.getMinBufferSize使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類android.media.AudioRecord
的用法示例。
在下文中一共展示了AudioRecord.getMinBufferSize方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: createAudioRecord
import android.media.AudioRecord; //導入方法依賴的package包/類
/**
* Creates a new {@link AudioRecord}.
*
* @return A newly created {@link AudioRecord}, or null if it cannot be created (missing
* permissions?).
*/
private AudioRecord createAudioRecord() {
for (int sampleRate : SAMPLE_RATE_CANDIDATES) {
final int sizeInBytes = AudioRecord.getMinBufferSize(sampleRate, CHANNEL, ENCODING);
if (sizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
continue;
}
final AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
sampleRate, CHANNEL, ENCODING, sizeInBytes);
if (audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
mBuffer = new byte[sizeInBytes];
return audioRecord;
} else {
audioRecord.release();
}
}
return null;
}
示例2: onCreate
import android.media.AudioRecord; //導入方法依賴的package包/類
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
bufferSize = AudioRecord.getMinBufferSize(16000,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
setButtonHandlers();
enableButtons(false);
inferenceInterface = new TensorFlowInferenceInterface();
inferenceInterface.initializeTensorFlow(getAssets(), MODEL_FILE);
// tensorFlowSample();
}
示例3: fromDefaultMicrophone
import android.media.AudioRecord; //導入方法依賴的package包/類
/**
* Create a new AudioDispatcher connected to the default microphone.
*
* @param sampleRate
* The requested sample rate.
* @param audioBufferSize
* The size of the audio buffer (in samples).
*
* @param bufferOverlap
* The size of the overlap (in samples).
* @return A new AudioDispatcher
*/
public static AudioDispatcher fromDefaultMicrophone(final int sampleRate,
final int audioBufferSize, final int bufferOverlap) {
int minAudioBufferSize = AudioRecord.getMinBufferSize(sampleRate,
android.media.AudioFormat.CHANNEL_IN_MONO,
android.media.AudioFormat.ENCODING_PCM_16BIT);
int minAudioBufferSizeInSamples = minAudioBufferSize/2;
if(minAudioBufferSizeInSamples <= audioBufferSize ){
AudioRecord audioInputStream = new AudioRecord(
MediaRecorder.AudioSource.MIC, sampleRate,
android.media.AudioFormat.CHANNEL_IN_MONO,
android.media.AudioFormat.ENCODING_PCM_16BIT,
audioBufferSize * 2);
TarsosDSPAudioFormat format = new TarsosDSPAudioFormat(sampleRate, 16,1, true, false);
TarsosDSPAudioInputStream audioStream = new AndroidAudioInputStream(audioInputStream, format);
//start recording ! Opens the stream.
audioInputStream.startRecording();
return new AudioDispatcher(audioStream,audioBufferSize,bufferOverlap);
}else{
throw new IllegalArgumentException("Buffer size too small should be at least " + (minAudioBufferSize *2));
}
}
示例4: getMinInputFrameSize
import android.media.AudioRecord; //導入方法依賴的package包/類
private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
final int channelConfig =
(numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
return AudioRecord.getMinBufferSize(
sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
/ bytesPerFrame;
}
示例5: AudioRecordThread
import android.media.AudioRecord; //導入方法依賴的package包/類
public AudioRecordThread(LinkedBlockingDeque<byte[]> linkedBlockingDeque) {
this.linkedBlockingDeque = linkedBlockingDeque;
bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE_HZ, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE_HZ, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT, bufferSize);
}
示例6: getRecordBufferSize
import android.media.AudioRecord; //導入方法依賴的package包/類
public static int getRecordBufferSize() {
int frequency = Options.getInstance().audio.frequency;
int audioEncoding = Options.getInstance().audio.encoding;
int channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
if(Options.getInstance().audio.channelCount == 2) {
channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
}
return AudioRecord.getMinBufferSize(frequency, channelConfiguration, audioEncoding);
}
示例7: AudioCodec
import android.media.AudioRecord; //導入方法依賴的package包/類
public AudioCodec() throws IOException {
this.bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
this.audioRecord = createAudioRecord(this.bufferSize);
this.mediaCodec = createMediaCodec(this.bufferSize);
this.mediaCodec.start();
try {
audioRecord.startRecording();
} catch (Exception e) {
Log.w(TAG, e);
mediaCodec.release();
throw new IOException(e);
}
}
示例8: initRecorder
import android.media.AudioRecord; //導入方法依賴的package包/類
public void initRecorder() {
SAMPLE_RATE = getValidSampleRates();
int bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
mBuffer = new short[bufferSize];
mRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT, bufferSize);
new File(RECORD_WAV_PATH).mkdir();
}
示例9: startCapture
import android.media.AudioRecord; //導入方法依賴的package包/類
public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
if (mIsCaptureStarted) {
Log.e(TAG, "Capture already started !");
return false;
}
int minBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
if (minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "Invalid parameter !");
return false;
}
mAudioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, minBufferSize * 4);
if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
Log.e(TAG, "AudioRecord initialize fail !");
return false;
}
mAudioRecord.startRecording();
mIsLoopExit = false;
mCaptureThread = new Thread(new AudioCaptureRunnable());
mCaptureThread.start();
mIsCaptureStarted = true;
Log.d(TAG, "Start audio capture success !");
return true;
}
示例10: createAudioRecord
import android.media.AudioRecord; //導入方法依賴的package包/類
private void createAudioRecord() throws InitializationException {
// The AudioRecord configurations parameters used here, are guaranteed
// to be supported on all devices.
// AudioFormat.CHANNEL_IN_MONO should be used in place of deprecated
// AudioFormat.CHANNEL_CONFIGURATION_MONO, but it is not available for
// API level 3.
// Unlike AudioTrack buffer, AudioRecord buffer could be larger than
// minimum without causing any problems. But minimum works well.
final int audioRecordBufferSizeInBytes = AudioRecord.getMinBufferSize(
SpeechTrainerConfig.SAMPLE_RATE_HZ, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if (audioRecordBufferSizeInBytes <= 0) {
throw new InitializationException("Failed to initialize recording.");
}
// CHANNEL_IN_MONO is guaranteed to work on all devices.
// ENCODING_PCM_16BIT is guaranteed to work on all devices.
audioRecord = new AudioRecord(AudioSource.MIC, SpeechTrainerConfig.SAMPLE_RATE_HZ,
AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
audioRecordBufferSizeInBytes);
if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
audioRecord = null;
throw new InitializationException("Failed to initialize recording.");
}
}
示例11: initAudioRecorder
import android.media.AudioRecord; //導入方法依賴的package包/類
/**
* Initialize audio recorder
*/
private void initAudioRecorder() throws IOException {
mBufferSize = AudioRecord.getMinBufferSize(DEFAULT_SAMPLING_RATE,
DEFAULT_CHANNEL_CONFIG, DEFAULT_AUDIO_FORMAT.getAudioFormat());
int bytesPerFrame = DEFAULT_AUDIO_FORMAT.getBytesPerFrame();
/* Get number of samples. Calculate the buffer size
* (round up to the factor of given frame size)
* 使能被整除,方便下麵的周期性通知
* */
int frameSize = mBufferSize / bytesPerFrame;
if (frameSize % FRAME_COUNT != 0) {
frameSize += (FRAME_COUNT - frameSize % FRAME_COUNT);
mBufferSize = frameSize * bytesPerFrame;
}
/* Setup audio recorder */
mAudioRecord = new AudioRecord(DEFAULT_AUDIO_SOURCE,
DEFAULT_SAMPLING_RATE, DEFAULT_CHANNEL_CONFIG, DEFAULT_AUDIO_FORMAT.getAudioFormat(),
mBufferSize);
mPCMBuffer = new short[mBufferSize];
/*
* Initialize lame buffer
* mp3 sampling rate is the same as the recorded pcm sampling rate
* The bit rate is 32kbps
*
*/
Mp3NativeUtil.init(DEFAULT_SAMPLING_RATE, DEFAULT_LAME_IN_CHANNEL, DEFAULT_SAMPLING_RATE, DEFAULT_LAME_MP3_BIT_RATE, DEFAULT_LAME_MP3_QUALITY);
// Create and run thread used to encode data
// The thread will
mEncodeThread = new DataEncodeThread(mRecordFile, mBufferSize);
mEncodeThread.start();
mAudioRecord.setRecordPositionUpdateListener(mEncodeThread, mEncodeThread.getHandler());
mAudioRecord.setPositionNotificationPeriod(FRAME_COUNT);
}
示例12: AudioRecordRunnable
import android.media.AudioRecord; //導入方法依賴的package包/類
private AudioRecordRunnable() {
bufferSize = AudioRecord.getMinBufferSize(sampleRate,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRate,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize);
audioData = new short[bufferSize];
}
示例13: start
import android.media.AudioRecord; //導入方法依賴的package包/類
public void start(){
if(!isStarted){
stopFlag=false;
mRecordBufferSize = AudioRecord.getMinBufferSize(mRecordSampleRate,
mRecordChannelConfig, mRecordAudioFormat)*2;
mRecord=new AudioRecord(MediaRecorder.AudioSource.MIC,mRecordSampleRate,mRecordChannelConfig,
mRecordAudioFormat,mRecordBufferSize);
mRecord.startRecording();
try {
MediaFormat format=convertAudioConfigToFormat(mConfig.mAudio);
mAudioEncoder=MediaCodec.createEncoderByType(format.getString(MediaFormat.KEY_MIME));
mAudioEncoder.configure(format,null,null,MediaCodec.CONFIGURE_FLAG_ENCODE);
mAudioEncoder.start();
} catch (IOException e) {
e.printStackTrace();
}
Thread thread=new Thread(new Runnable() {
@Override
public void run() {
while (!stopFlag&&!audioEncodeStep(false)){};
audioEncodeStep(true);
Log.e("wuwang","audio stop");
if(isStarted){
mRecord.stop();
mRecord.release();
mRecord=null;
}
if(mAudioEncoder!=null){
mAudioEncoder.stop();
mAudioEncoder.release();
mAudioEncoder=null;
}
isStarted=false;
}
});
thread.start();
startTime=SystemClock.elapsedRealtimeNanos();
isStarted=true;
}
}
示例14: AudioRecordManager
import android.media.AudioRecord; //導入方法依賴的package包/類
public AudioRecordManager() {
bufferSize = AudioRecord.getMinBufferSize(8000, AudioFormat.CHANNEL_IN_MONO, AudioFormat
.ENCODING_PCM_16BIT);
mRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC, 8000, AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT, bufferSize * 2);
}
示例15: ExtAudioRecorder
import android.media.AudioRecord; //導入方法依賴的package包/類
/**
*
*
* Default constructor
*
* Instantiates a new recorder, in case of compressed recording the
* parameters can be left as 0. In case of errors, no exception is thrown,
* but the state is set to ERROR
*
*/
@SuppressWarnings("deprecation")
public ExtAudioRecorder(boolean uncompressed, int audioSource,
int sampleRate, int channelConfig, int audioFormat) {
try {
rUncompressed = uncompressed;
if (rUncompressed) { // RECORDING_UNCOMPRESSED
if (audioFormat == AudioFormat.ENCODING_PCM_16BIT) {
bSamples = 16;
} else {
bSamples = 8;
}
if (channelConfig == AudioFormat.CHANNEL_CONFIGURATION_MONO) {
nChannels = 1;
} else {
nChannels = 2;
}
aSource = audioSource;
sRate = sampleRate;
aFormat = audioFormat;
framePeriod = sampleRate * TIMER_INTERVAL / 1000;
bufferSize = framePeriod * 2 * bSamples * nChannels / 8;
if (bufferSize < AudioRecord.getMinBufferSize(sampleRate,
channelConfig, audioFormat)) { // Check to make sure
// buffer size is not
// smaller than the
// smallest allowed one
bufferSize = AudioRecord.getMinBufferSize(sampleRate,
channelConfig, audioFormat);
// Set frame period and timer interval accordingly
framePeriod = bufferSize / (2 * bSamples * nChannels / 8);
Log.w(ExtAudioRecorder.class.getName(),
"Increasing buffer size to "
+ Integer.toString(bufferSize));
}
audioRecorder = new AudioRecord(audioSource, sampleRate,
channelConfig, audioFormat, bufferSize);
if (audioRecorder.getState() != AudioRecord.STATE_INITIALIZED)
throw new Exception("AudioRecord initialization failed");
audioRecorder.setRecordPositionUpdateListener(updateListener);
audioRecorder.setPositionNotificationPeriod(framePeriod);
} else { // RECORDING_COMPRESSED
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder
.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder
.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
cAmplitude = 0;
filePath = null;
state = State.INITIALIZING;
} catch (Exception e) {
if (e.getMessage() != null) {
Log.e(ExtAudioRecorder.class.getName(), e.getMessage());
} else {
Log.e(ExtAudioRecorder.class.getName(),
"Unknown error occured while initializing recording");
}
state = State.ERROR;
}
}