本文整理汇总了Java中android.media.AudioRecord.ERROR_BAD_VALUE属性的典型用法代码示例。如果您正苦于以下问题:Java AudioRecord.ERROR_BAD_VALUE属性的具体用法?Java AudioRecord.ERROR_BAD_VALUE怎么用?Java AudioRecord.ERROR_BAD_VALUE使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类android.media.AudioRecord
的用法示例。
在下文中一共展示了AudioRecord.ERROR_BAD_VALUE属性的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createAudioRecord
/**
* Creates a new {@link AudioRecord}.
*
* @return A newly created {@link AudioRecord}, or null if it cannot be created (missing
* permissions?).
*/
private AudioRecord createAudioRecord() {
for (int sampleRate : SAMPLE_RATE_CANDIDATES) {
final int sizeInBytes = AudioRecord.getMinBufferSize(sampleRate, CHANNEL, ENCODING);
if (sizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
continue;
}
final AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
sampleRate, CHANNEL, ENCODING, sizeInBytes);
if (audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
mBuffer = new byte[sizeInBytes];
return audioRecord;
} else {
audioRecord.release();
}
}
return null;
}
示例2: run
@Override
public void run() {
try {
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
int bytesRecord;
byte[] tempBuffer = new byte[bufferSize];
mRecorder.startRecording();
while (isStart) {
if (mRecorder != null) {
bytesRecord = mRecorder.read(tempBuffer, 0, bufferSize);
if (bytesRecord == AudioRecord.ERROR_INVALID_OPERATION || bytesRecord ==
AudioRecord.ERROR_BAD_VALUE) {
continue;
}
if (bytesRecord != 0 && bytesRecord != -1) {
dos.write(tempBuffer, 0, bytesRecord);
} else {
break;
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
示例3: run
@Override
public void run() {
while (!mIsLoopExit) {
byte[] buffer = new byte[mMinBufferSize];
int ret = mAudioRecord.read(buffer, 0, mMinBufferSize);
if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
Log.e(TAG, "Error ERROR_INVALID_OPERATION");
} else if (ret == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "Error ERROR_BAD_VALUE");
} else {
if (mAudioFrameCapturedListener != null) {
mAudioFrameCapturedListener.onAudioFrameCaptured(buffer);
}
Log.d(TAG, "OK, Captured " + ret + " bytes !");
}
SystemClock.sleep(10);
}
}
示例4: read
private int read(AudioRecord recorder) {
int numberOfBytes = recorder.read(mBuffer, 0, mBuffer.length); // Fill buffer
// Some error checking
if (numberOfBytes == AudioRecord.ERROR_INVALID_OPERATION) {
Log.e(LOG_TAG, "The AudioRecord object was not properly initialized");
return -1;
} else if (numberOfBytes == AudioRecord.ERROR_BAD_VALUE) {
Log.e(LOG_TAG, "The parameters do not resolve to valid data and indexes.");
return -2;
} else if (numberOfBytes > mBuffer.length) {
Log.e(LOG_TAG, "Read more bytes than is buffer length:" + numberOfBytes + ": " + mBuffer.length);
return -3;
} else if (numberOfBytes == 0) {
Log.e(LOG_TAG, "Read zero bytes");
return -4;
}
// Everything seems to be OK, adding the buffer to the recording.
add(mBuffer);
return 0;
}
示例5: run
@Override
public void run() {
while (!mIsLoopExit) {
byte[] buffer = new byte[SAMPLES_PER_FRAME * 2];
int ret = mAudioRecord.read(buffer, 0, buffer.length);
if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
Log.e(TAG, "Error ERROR_INVALID_OPERATION");
} else if (ret == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "Error ERROR_BAD_VALUE");
} else {
if (mOnAudioFrameCapturedListener != null) {
mOnAudioFrameCapturedListener.onAudioFrameCaptured(buffer, System.nanoTime());
}
}
}
}
示例6: findAudioRecord
public AudioRecord findAudioRecord() {
for (int rate : mSampleRates) {
for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT }) {
for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_MONO }) {
try {
Log.d("C.TAG", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
+ channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, AudioFormat.CHANNEL_IN_MONO , AudioFormat.ENCODING_PCM_16BIT);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(AudioSource.MIC, DEFAULT_RATE, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
return recorder;
}
} catch (Exception e) {
Log.e("C.TAG", rate + "Exception, keep trying.",e);
}
}
}
}
return null;
}
示例7: record
public void record() {
final ByteBuffer bytebuffer = ByteBuffer.allocateDirect(SAMPLES_PER_FRAME);
int bufferReadResult;
while (isRecording) {
bytebuffer.clear();
bufferReadResult = mAudioRecord.read(bytebuffer, SAMPLES_PER_FRAME);
if (bufferReadResult == AudioRecord.ERROR_INVALID_OPERATION || bufferReadResult == AudioRecord.ERROR_BAD_VALUE) {
} else if (bufferReadResult >= 0) {
//LogUtil.d(TAG, "bytes read "+bufferReadResult);
// todo send this byte array to an audio encoder
bytebuffer.position(bufferReadResult);
bytebuffer.flip();
byte[] bytes = new byte[bytebuffer.remaining()];
bytebuffer.get(bytes);
bytebuffer.position(bufferReadResult);
bytebuffer.flip();
yixiaAudioEncoder.encode(bytebuffer, bufferReadResult, yixiaAudioEncoder.getPTSUs());
}
}
}
示例8: setBufferSizeAndFramePeriod
private void setBufferSizeAndFramePeriod() {
int minBufferSizeInBytes = AudioRecord.getMinBufferSize(mSampleRate, AudioFormat.CHANNEL_IN_DEFAULT, RESOLUTION);
if (minBufferSizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
throw new IllegalArgumentException("AudioRecord.getMinBufferSize: parameters not supported by hardware");
} else if (minBufferSizeInBytes == AudioRecord.ERROR) {
Log.e(LOG_TAG, "AudioRecord.getMinBufferSize: unable to query hardware for output properties");
minBufferSizeInBytes = mSampleRate * (120 / 1000) * RESOLUTION_IN_BYTES * CHANNELS;
}
mBufferSize = 2 * minBufferSizeInBytes;
mFramePeriod = mBufferSize / ( 2 * RESOLUTION_IN_BYTES * CHANNELS );
Log.i(LOG_TAG, "AudioRecord buffer size: " + mBufferSize + ", min size = " + minBufferSizeInBytes);
}
示例9: startCapture
public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
if (mIsCaptureStarted) {
Log.e(TAG, "Capture already started !");
return false;
}
int minBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
if (minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "Invalid parameter !");
return false;
}
mAudioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, minBufferSize * 4);
if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
Log.e(TAG, "AudioRecord initialize fail !");
return false;
}
mAudioRecord.startRecording();
mIsLoopExit = false;
mCaptureThread = new Thread(new AudioCaptureRunnable());
mCaptureThread.start();
mIsCaptureStarted = true;
Log.d(TAG, "Start audio capture success !");
return true;
}
示例10: getBufferSize
private int getBufferSize(){
//the function below gives min buffer size which is necesaary for audio recording
int minBufferSizeInBytes = AudioRecord.getMinBufferSize(mSampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
if (minBufferSizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
throw new IllegalArgumentException("SpeechRecord.getMinBufferSize: parameters not supported by hardware");
} else if (minBufferSizeInBytes == AudioRecord.ERROR) {
// Log.e("SpeechRecord.getMinBufferSize: unable to query hardware for output properties");
minBufferSizeInBytes = mSampleRate * (120 / 1000) * RESOLUTION_IN_BYTES * CHANNELS;
}
//buffer_size_multiplier=4 ka reason nhi pata hai
int bufferSize = BUFFER_SIZE_MUTLIPLIER * minBufferSizeInBytes;
//Log.i("SpeechRecord buffer size: " + bufferSize + ", min size = " + minBufferSizeInBytes);
return bufferSize;
}
示例11: start
public void start() {
mAudioRecord.startRecording();
mMediaCodec.start();
final long startWhen = System.nanoTime();
final ByteBuffer[] inputBuffers = mMediaCodec.getInputBuffers();
mThread = new Thread(new Runnable() {
@Override
public void run() {
int len, bufferIndex;
while (isStart && !Thread.interrupted()) {
synchronized (mMediaCodec) {
if (!isStart) return;
bufferIndex = mMediaCodec.dequeueInputBuffer(10000);
if (bufferIndex >= 0) {
inputBuffers[bufferIndex].clear();
long presentationTimeNs = System.nanoTime();
len = mAudioRecord.read(inputBuffers[bufferIndex], bufferSize);
presentationTimeNs -= (len / samplingRate ) / 1000000000;
Loggers.i(TAG, "Index: " + bufferIndex + " len: " + len + " buffer_capacity: " + inputBuffers[bufferIndex].capacity());
long presentationTimeUs = (presentationTimeNs - startWhen) / 1000;
if (len == AudioRecord.ERROR_INVALID_OPERATION || len == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "An error occured with the AudioRecord API !");
} else {
mMediaCodec.queueInputBuffer(bufferIndex, 0, len, presentationTimeUs, 0);
if (onDataComingCallback != null) {
onDataComingCallback.onComing();
}
}
}
}
}
}
});
mThread.start();
isStart = true;
}
示例12: findAudioRecord
/**
* 查找可用的音频录制器
*
* @return
*/
private AudioRecord findAudioRecord() {
int[] samplingRates = new int[]{44100, 22050, 11025, 8000};
int[] audioFormats = new int[]{
AudioFormat.ENCODING_PCM_16BIT,
AudioFormat.ENCODING_PCM_8BIT};
int[] channelConfigs = new int[]{
AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.CHANNEL_IN_MONO};
for (int rate : samplingRates) {
for (int format : audioFormats) {
for (int config : channelConfigs) {
try {
int bufferSize = AudioRecord.getMinBufferSize(rate, config, format);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
for (int source : AUDIO_SOURCES) {
AudioRecord recorder = new AudioRecord(source, rate, config, format, bufferSize * 4);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
return recorder;
}
}
}
} catch (Exception e) {
Log.e(TAG, "Init AudioRecord Error." + Log.getStackTraceString(e));
}
}
}
}
return null;
}
示例13: calculateBufferSize
/**
* Calculate the buffer size.
*
* @return the calculated buffer size
*/
private int calculateBufferSize() {
framePeriod = sampleRateInHz * TIMER_INTERVAL / 1000;
int bufferSize = framePeriod * 2 * bSamples * nChannels / 8;
if (DEBUG) {
MyLog.i(CLS_NAME, "bufferSize: " + bufferSize);
}
final int minBuff = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
switch (minBuff) {
case AudioRecord.ERROR:
case AudioRecord.ERROR_BAD_VALUE:
if (DEBUG) {
MyLog.w(CLS_NAME, "AudioRecord.ERROR/ERROR_BAD_VALUE");
}
break;
default:
if (DEBUG) {
MyLog.i(CLS_NAME, "minBuff: " + minBuff);
}
if (bufferSize < minBuff) {
bufferSize = minBuff;
// Unused for now
framePeriod = bufferSize / (2 * bSamples * nChannels / 8);
}
break;
}
if (DEBUG) {
MyLog.i(CLS_NAME, "bufferSize returning: " + bufferSize);
}
return bufferSize;
}
示例14: startCapture
public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
if (mIsCaptureStarted) {
Log.e(TAG, "hujd Capture already started !");
return false;
}
mMinBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
if (mMinBufferSize == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "hujd Invalid parameter !");
return false;
}
Log.e(TAG, "hujd getMinBufferSize = " + mMinBufferSize + " bytes !");
mAudioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, mMinBufferSize);
if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
Log.e(TAG, "hujd AudioRecord initialize fail !");
return false;
}
mAudioRecord.startRecording();
mIsLoopExit = false;
mCaptureThread = new Thread(new AudioCaptureRunnable());
mCaptureThread.start();
mIsCaptureStarted = true;
Log.e(TAG, "hujd Start audio capture success !");
return true;
}