本文整理汇总了Java中android.media.AudioRecord.getState方法的典型用法代码示例。如果您正苦于以下问题:Java AudioRecord.getState方法的具体用法?Java AudioRecord.getState怎么用?Java AudioRecord.getState使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类android.media.AudioRecord
的用法示例。
在下文中一共展示了AudioRecord.getState方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createAudioRecord
import android.media.AudioRecord; //导入方法依赖的package包/类
/**
* Creates a new {@link AudioRecord}.
*
* @return A newly created {@link AudioRecord}, or null if it cannot be created (missing
* permissions?).
*/
private AudioRecord createAudioRecord() {
for (int sampleRate : SAMPLE_RATE_CANDIDATES) {
final int sizeInBytes = AudioRecord.getMinBufferSize(sampleRate, CHANNEL, ENCODING);
if (sizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
continue;
}
final AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
sampleRate, CHANNEL, ENCODING, sizeInBytes);
if (audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
mBuffer = new byte[sizeInBytes];
return audioRecord;
} else {
audioRecord.release();
}
}
return null;
}
示例2: startRecording
import android.media.AudioRecord; //导入方法依赖的package包/类
private void startRecording(){
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE, RECORDER_CHANNELS,RECORDER_AUDIO_ENCODING, bufferSize);
int i = recorder.getState();
if(i==1)
recorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
@Override
public void run() {
writeAudioDataToArrayList();
reorganiseDataArray(flattenArray());
}
},"AudioRecorder Thread");
recordingThread.start();
}
示例3: RecognizerThread
import android.media.AudioRecord; //导入方法依赖的package包/类
public RecognizerThread(int timeout) {
if (timeout != NO_TIMEOUT) {
this.timeoutSamples = timeout * sampleRate / 1000;
} else {
this.timeoutSamples = NO_TIMEOUT;
}
this.remainingSamples = this.timeoutSamples;
recorder = new AudioRecord(6, sampleRate, 16, 2, bufferSize * 2);
if (recorder.getState() == AudioRecord.STATE_UNINITIALIZED) {
recorder.release();
try {
throw new IOException(
"Failed to initialize recorder. Microphone might be already in use.");
} catch (IOException e) {
e.printStackTrace();
}
}
}
示例4: findAudioRecord
import android.media.AudioRecord; //导入方法依赖的package包/类
public AudioRecord findAudioRecord() {
for (int rate : mSampleRates) {
for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT }) {
for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO }) {
try {
Log.d("C.TAG", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
+ channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, AudioFormat.CHANNEL_IN_MONO , AudioFormat.ENCODING_PCM_16BIT);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(AudioSource.MIC, DEFAULT_RATE, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
return recorder;
}
} catch (Exception e) {
Log.e("C.TAG", rate + "Exception, keep trying.",e);
}
}
}
}
return null;
}
示例5: startRecording
import android.media.AudioRecord; //导入方法依赖的package包/类
private void startRecording(final String filename) {
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE,
RECORDER_CHANNELS,
RECORDER_AUDIO_ENCODING,
bufferSize);
int i = recorder.getState();
if (i==1)
recorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
@Override
public void run() {
writeAudioDataToFile(filename);
}
}, "AudioRecorder Thread");
recordingThread.start();
}
示例6: run
import android.media.AudioRecord; //导入方法依赖的package包/类
@Override
public void run() {
byte[] buffer = new byte[audioBufferSize];
recorder = new AudioRecord(audioSource, sampleRate, channel, audioFormat, audioBufferSize);
try {
while (recorder.getState() != AudioRecord.STATE_INITIALIZED)
Thread.sleep(100, 0);
} catch (InterruptedException e) {
recorder.release();
return;
}
recorder.startRecording();
for (; ; ) {
int length = recorder.read(buffer, 0, buffer.length);
if (length < 0)
Log.e("Record", "error: " + Integer.toString(length));
else {
for (WebSocket ws : wss)
ws.sendBinary(buffer);
}
if (Thread.interrupted()) {
recorder.stop();
return;
}
}
}
示例7: RawAudioRecorder
import android.media.AudioRecord; //导入方法依赖的package包/类
/**
* <p>Instantiates a new recorder and sets the state to INITIALIZING.
* In case of errors, no exception is thrown, but the state is set to ERROR.</p>
*
* <p>Android docs say: 44100Hz is currently the only rate that is guaranteed to work on all devices,
* but other rates such as 22050, 16000, and 11025 may work on some devices.</p>
*
* @param audioSource Identifier of the audio source (e.g. microphone)
* @param sampleRate Sample rate (e.g. 16000)
*/
public RawAudioRecorder(int audioSource, int sampleRate) {
mSampleRate = sampleRate;
// E.g. 1 second of 16kHz 16-bit mono audio takes 32000 bytes.
mOneSec = RESOLUTION_IN_BYTES * CHANNELS * mSampleRate;
mRecording = new byte[mOneSec * MAX_RECORDING_TIME_IN_SECS];
try {
setBufferSizeAndFramePeriod();
mRecorder = new AudioRecord(audioSource, mSampleRate, AudioFormat.CHANNEL_IN_MONO, RESOLUTION, mBufferSize);
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
boolean agcAvailable = AutomaticGainControl.isAvailable();
if(agcAvailable) {
AutomaticGainControl.create(mRecorder.getAudioSessionId());
}
}
if (mRecorder.getState() != AudioRecord.STATE_INITIALIZED) {
throw new Exception("AudioRecord initialization failed");
}
mBuffer = new byte[mFramePeriod * RESOLUTION_IN_BYTES * CHANNELS];
setState(State.READY);
} catch (Exception e) {
release();
setState(State.ERROR);
if (e.getMessage() == null) {
Log.e(LOG_TAG, "Unknown error occured while initializing recording");
} else {
Log.e(LOG_TAG, e.getMessage());
}
}
}
示例8: startCapture
import android.media.AudioRecord; //导入方法依赖的package包/类
public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
if (mIsCaptureStarted) {
Log.e(TAG, "Capture already started !");
return false;
}
int minBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
if (minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "Invalid parameter !");
return false;
}
mAudioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, minBufferSize * 4);
if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
Log.e(TAG, "AudioRecord initialize fail !");
return false;
}
mAudioRecord.startRecording();
mIsLoopExit = false;
mCaptureThread = new Thread(new AudioCaptureRunnable());
mCaptureThread.start();
mIsCaptureStarted = true;
Log.d(TAG, "Start audio capture success !");
return true;
}
示例9: createAudioRecord
import android.media.AudioRecord; //导入方法依赖的package包/类
private void createAudioRecord() throws InitializationException {
// The AudioRecord configurations parameters used here, are guaranteed
// to be supported on all devices.
// AudioFormat.CHANNEL_IN_MONO should be used in place of deprecated
// AudioFormat.CHANNEL_CONFIGURATION_MONO, but it is not available for
// API level 3.
// Unlike AudioTrack buffer, AudioRecord buffer could be larger than
// minimum without causing any problems. But minimum works well.
final int audioRecordBufferSizeInBytes = AudioRecord.getMinBufferSize(
SpeechTrainerConfig.SAMPLE_RATE_HZ, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
if (audioRecordBufferSizeInBytes <= 0) {
throw new InitializationException("Failed to initialize recording.");
}
// CHANNEL_IN_MONO is guaranteed to work on all devices.
// ENCODING_PCM_16BIT is guaranteed to work on all devices.
audioRecord = new AudioRecord(AudioSource.MIC, SpeechTrainerConfig.SAMPLE_RATE_HZ,
AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
audioRecordBufferSizeInBytes);
if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
audioRecord = null;
throw new InitializationException("Failed to initialize recording.");
}
}
示例10: findAudioRecord
import android.media.AudioRecord; //导入方法依赖的package包/类
/**
* 查找可用的音频录制器
*
* @return
*/
private AudioRecord findAudioRecord() {
int[] samplingRates = new int[]{44100, 22050, 11025, 8000};
int[] audioFormats = new int[]{
AudioFormat.ENCODING_PCM_16BIT,
AudioFormat.ENCODING_PCM_8BIT};
int[] channelConfigs = new int[]{
AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.CHANNEL_IN_MONO};
for (int rate : samplingRates) {
for (int format : audioFormats) {
for (int config : channelConfigs) {
try {
int bufferSize = AudioRecord.getMinBufferSize(rate, config, format);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
for (int source : AUDIO_SOURCES) {
AudioRecord recorder = new AudioRecord(source, rate, config, format, bufferSize * 4);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
return recorder;
}
}
}
} catch (Exception e) {
Log.e(TAG, "Init AudioRecord Error." + Log.getStackTraceString(e));
}
}
}
}
return null;
}
示例11: initAudioRecord
import android.media.AudioRecord; //导入方法依赖的package包/类
private boolean initAudioRecord(Context context, int audioSource, int sampleRate, int channelConfig, int chanelNumber, int audioFormat, int nbBitsPerSample, int bufferSize)
{
witchRecorder = 1;
isAudioRecordRunning = false;
try
{
audioRecordSampleRate = sampleRate;
int timerInterval = 120;
audioRecordPeriodInFrames = sampleRate * timerInterval / 1000;
bufferSize = audioRecordPeriodInFrames * 2 * chanelNumber * nbBitsPerSample / 8;
if(bufferSize < AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat))
{
bufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig, audioFormat);
audioRecordPeriodInFrames = bufferSize / ( 2 * nbBitsPerSample * chanelNumber / 8 );
}
audioRecord = new AudioRecord(audioSource, sampleRate, channelConfig, audioFormat, bufferSize);
if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED)
{
Log.w("RecordFileWriter", "initAudioRecord : " + context.getString(R.string.log_record_file_writer_error_audiorecord_init));
databaseManager.insertLog(context, "" + context.getString(R.string.log_record_file_writer_error_audiorecord_init), new Date().getTime(), 2, false);
return false;
}
audioRecord.setRecordPositionUpdateListener(onAudioRecordPositionUpdateListener);
audioRecord.setPositionNotificationPeriod(audioRecordPeriodInFrames);
}
catch (Exception e)
{
Log.w("RecordFileWriter", "initAudioRecord : " + context.getString(R.string.log_record_file_writer_error_audiorecord_init) + " : " + e);
databaseManager.insertLog(context, "" + context.getString(R.string.log_record_file_writer_error_audiorecord_init), new Date().getTime(), 2, false);
return false;
}
return true;
}
示例12: ExtAudioRecorder
import android.media.AudioRecord; //导入方法依赖的package包/类
/**
*
*
* Default constructor
*
* Instantiates a new recorder, in case of compressed recording the
* parameters can be left as 0. In case of errors, no exception is thrown,
* but the state is set to ERROR
*
*/
@SuppressWarnings("deprecation")
public ExtAudioRecorder(boolean uncompressed, int audioSource,
int sampleRate, int channelConfig, int audioFormat) {
try {
rUncompressed = uncompressed;
if (rUncompressed) { // RECORDING_UNCOMPRESSED
if (audioFormat == AudioFormat.ENCODING_PCM_16BIT) {
bSamples = 16;
} else {
bSamples = 8;
}
if (channelConfig == AudioFormat.CHANNEL_CONFIGURATION_MONO) {
nChannels = 1;
} else {
nChannels = 2;
}
aSource = audioSource;
sRate = sampleRate;
aFormat = audioFormat;
framePeriod = sampleRate * TIMER_INTERVAL / 1000;
bufferSize = framePeriod * 2 * bSamples * nChannels / 8;
if (bufferSize < AudioRecord.getMinBufferSize(sampleRate,
channelConfig, audioFormat)) { // Check to make sure
// buffer size is not
// smaller than the
// smallest allowed one
bufferSize = AudioRecord.getMinBufferSize(sampleRate,
channelConfig, audioFormat);
// Set frame period and timer interval accordingly
framePeriod = bufferSize / (2 * bSamples * nChannels / 8);
Log.w(ExtAudioRecorder.class.getName(),
"Increasing buffer size to "
+ Integer.toString(bufferSize));
}
audioRecorder = new AudioRecord(audioSource, sampleRate,
channelConfig, audioFormat, bufferSize);
if (audioRecorder.getState() != AudioRecord.STATE_INITIALIZED)
throw new Exception("AudioRecord initialization failed");
audioRecorder.setRecordPositionUpdateListener(updateListener);
audioRecorder.setPositionNotificationPeriod(framePeriod);
} else { // RECORDING_COMPRESSED
mediaRecorder = new MediaRecorder();
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mediaRecorder
.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mediaRecorder
.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
}
cAmplitude = 0;
filePath = null;
state = State.INITIALIZING;
} catch (Exception e) {
if (e.getMessage() != null) {
Log.e(ExtAudioRecorder.class.getName(), e.getMessage());
} else {
Log.e(ExtAudioRecorder.class.getName(),
"Unknown error occured while initializing recording");
}
state = State.ERROR;
}
}
示例13: startCapture
import android.media.AudioRecord; //导入方法依赖的package包/类
public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
if (mIsCaptureStarted) {
Log.e(TAG, "hujd Capture already started !");
return false;
}
mMinBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
if (mMinBufferSize == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "hujd Invalid parameter !");
return false;
}
Log.e(TAG, "hujd getMinBufferSize = " + mMinBufferSize + " bytes !");
mAudioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, mMinBufferSize);
if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
Log.e(TAG, "hujd AudioRecord initialize fail !");
return false;
}
mAudioRecord.startRecording();
mIsLoopExit = false;
mCaptureThread = new Thread(new AudioCaptureRunnable());
mCaptureThread.start();
mIsCaptureStarted = true;
Log.e(TAG, "hujd Start audio capture success !");
return true;
}