本文整理匯總了Java中android.media.AudioRecord類的典型用法代碼示例。如果您正苦於以下問題:Java AudioRecord類的具體用法?Java AudioRecord怎麽用?Java AudioRecord使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
AudioRecord類屬於android.media包,在下文中一共展示了AudioRecord類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: start
import android.media.AudioRecord; //導入依賴的package包/類
/**
* <p>Starts the recording, and sets the state to RECORDING.</p>
*/
public void start() {
if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
mRecorder.startRecording();
if (mRecorder.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
setState(State.RECORDING);
new Thread() {
public void run() {
while (mRecorder != null && mRecorder.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
int status = read(mRecorder);
if (status < 0) {
break;
}
}
}
}.start();
} else {
Log.e(LOG_TAG, "startRecording() failed");
setState(State.ERROR);
}
} else {
Log.e(LOG_TAG, "start() called on illegal state");
setState(State.ERROR);
}
}
示例2: createAudioRecord
import android.media.AudioRecord; //導入依賴的package包/類
/**
* Creates a new {@link AudioRecord}.
*
* @return A newly created {@link AudioRecord}, or null if it cannot be created (missing
* permissions?).
*/
private AudioRecord createAudioRecord() {
for (int sampleRate : SAMPLE_RATE_CANDIDATES) {
final int sizeInBytes = AudioRecord.getMinBufferSize(sampleRate, CHANNEL, ENCODING);
if (sizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
continue;
}
final AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
sampleRate, CHANNEL, ENCODING, sizeInBytes);
if (audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
mBuffer = new byte[sizeInBytes];
return audioRecord;
} else {
audioRecord.release();
}
}
return null;
}
示例3: run
import android.media.AudioRecord; //導入依賴的package包/類
@Override
public void run() {
try {
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
int bytesRecord;
byte[] tempBuffer = new byte[bufferSize];
mRecorder.startRecording();
while (isStart) {
if (mRecorder != null) {
bytesRecord = mRecorder.read(tempBuffer, 0, bufferSize);
if (bytesRecord == AudioRecord.ERROR_INVALID_OPERATION || bytesRecord ==
AudioRecord.ERROR_BAD_VALUE) {
continue;
}
if (bytesRecord != 0 && bytesRecord != -1) {
dos.write(tempBuffer, 0, bytesRecord);
} else {
break;
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
}
示例4: run
import android.media.AudioRecord; //導入依賴的package包/類
@Override
public void run() {
ByteBuffer audioData = ByteBuffer.allocateDirect(SAMPLE_BLOCK_SIZE);
if (mAudioInputDevice != null) {
mAudioRecord.setPreferredDevice(mAudioInputDevice);
}
int result =
mAudioRecord.read(audioData, audioData.capacity(), AudioRecord.READ_BLOCKING);
if (result < 0) {
Log.e(TAG, "error reading from audio stream:" + result);
return;
}
Log.d(TAG, "streaming ConverseRequest: " + result);
mAssistantRequestObserver.onNext(ConverseRequest.newBuilder()
.setAudioIn(ByteString.copyFrom(audioData))
.build());
mAssistantHandler.post(mStreamAssistantRequest);
}
示例5: run
import android.media.AudioRecord; //導入依賴的package包/類
@Override
public void run() {
while (!mIsLoopExit) {
byte[] buffer = new byte[mMinBufferSize];
int ret = mAudioRecord.read(buffer, 0, mMinBufferSize);
if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
Log.e(TAG, "Error ERROR_INVALID_OPERATION");
} else if (ret == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "Error ERROR_BAD_VALUE");
} else {
if (mAudioFrameCapturedListener != null) {
mAudioFrameCapturedListener.onAudioFrameCaptured(buffer);
}
Log.d(TAG, "OK, Captured " + ret + " bytes !");
}
SystemClock.sleep(10);
}
}
示例6: stopRecord
import android.media.AudioRecord; //導入依賴的package包/類
/**
* stop record
*
* @throws IOException
* @throws InterruptedException
*/
public void stopRecord() throws IOException, InterruptedException {
// specially for OPPO、XIAOMI、MEIZU、HUAWEI and so on
Thread.sleep(250);
destroyThread();
if (mRecorder != null) {
if (mRecorder.getState() == AudioRecord.STATE_INITIALIZED) {
mRecorder.stop();
}
if (mRecorder != null) {
mRecorder.release();
}
}
if (dos != null) {
dos.flush();
dos.close();
}
length = file.length();
deleteFile();
}
示例7: onCreate
import android.media.AudioRecord; //導入依賴的package包/類
@Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
bufferSize = AudioRecord.getMinBufferSize(16000,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT);
setButtonHandlers();
enableButtons(false);
inferenceInterface = new TensorFlowInferenceInterface();
inferenceInterface.initializeTensorFlow(getAssets(), MODEL_FILE);
// tensorFlowSample();
}
示例8: startRecording
import android.media.AudioRecord; //導入依賴的package包/類
private void startRecording(){
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE, RECORDER_CHANNELS,RECORDER_AUDIO_ENCODING, bufferSize);
int i = recorder.getState();
if(i==1)
recorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
@Override
public void run() {
writeAudioDataToArrayList();
reorganiseDataArray(flattenArray());
}
},"AudioRecorder Thread");
recordingThread.start();
}
示例9: listen
import android.media.AudioRecord; //導入依賴的package包/類
public byte[] listen() {
mReceivedBytes = new byte[]{};
mFinished = false;
if (mAudioRec.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
mAudioRec.startRecording();
}
while (true) {
if (mFinished) break;
// short[] audioData = readAudioData();
// window(audioData);
//
// int[] powerlist = new int[18];
// for (int i = 0; i < powerlist.length; i++) {
// powerlist[i] = goertzel(CHAR_FREQ[i]);
// }
// int base = goertzel(BASELINE);
//
// updateState(powerlist, base);
// signalToBits();
// processByte();
}
return mReceivedBytes;
}
示例10: read
import android.media.AudioRecord; //導入依賴的package包/類
private int read(AudioRecord recorder) {
int numberOfBytes = recorder.read(mBuffer, 0, mBuffer.length); // Fill buffer
// Some error checking
if (numberOfBytes == AudioRecord.ERROR_INVALID_OPERATION) {
Log.e(LOG_TAG, "The AudioRecord object was not properly initialized");
return -1;
} else if (numberOfBytes == AudioRecord.ERROR_BAD_VALUE) {
Log.e(LOG_TAG, "The parameters do not resolve to valid data and indexes.");
return -2;
} else if (numberOfBytes > mBuffer.length) {
Log.e(LOG_TAG, "Read more bytes than is buffer length:" + numberOfBytes + ": " + mBuffer.length);
return -3;
} else if (numberOfBytes == 0) {
Log.e(LOG_TAG, "Read zero bytes");
return -4;
}
// Everything seems to be OK, adding the buffer to the recording.
add(mBuffer);
return 0;
}
示例11: stopAudioRecord
import android.media.AudioRecord; //導入依賴的package包/類
private boolean stopAudioRecord(Context context)
{
try
{
if (audioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING)
{
isAudioRecordRunning = false;
audioRecord.setRecordPositionUpdateListener(null);
audioRecord.stop();
audioRecordRandomAccessFile.seek(4);
audioRecordRandomAccessFile.writeInt(Integer.reverseBytes(36 + audioRecordPayloadSize)); // 04 - Size of the overall file
audioRecordRandomAccessFile.seek(40);
audioRecordRandomAccessFile.writeInt(Integer.reverseBytes(audioRecordPayloadSize)); // 40 - Size of the data section
audioRecordRandomAccessFile.close();
}
}
catch (Exception e)
{
Log.w("RecordFileWriter", "stopAudioRecord : " + context.getString(R.string.log_record_file_writer_error_stop_audiorecord) + " : " + e);
databaseManager.insertLog(context, "" + context.getString(R.string.log_record_file_writer_error_stop_audiorecord), new Date().getTime(), 2, false);
return false;
}
return true;
}
示例12: isRecodingStopped
import android.media.AudioRecord; //導入依賴的package包/類
private boolean isRecodingStopped() {
if (mRecordState == RecordState.STOPPING) {
// AudioRecord has been released, it means recorder thread is stopped.
if (mRecord == null) {
return true;
} else {
// Check AudioRecord state if recorder thread is still running.
if (mRecord.getState() != AudioRecord.RECORDSTATE_RECORDING) {
return true;
}
}
} else if (mRecordState == RecordState.STOPPED) {
return true;
}
return false;
}
示例13: startListening
import android.media.AudioRecord; //導入依賴的package包/類
public void startListening() {
synchronized (API_LOCK) {
if (this.disabled) {
return;
}
if (!isListening) {
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE, RECORDER_CHANNELS,
RECORDER_AUDIO_ENCODING, ELEMENTS_TO_RECORD * BYTES_PER_ELEMENT);
recorder.startRecording();
isListening = true;
if (!isRecording) {
isRecording = true;
recordingThread = new Thread(new Runnable() {
public void run() {
detectHotword();
}
}, "AudioRecorder Thread");
recordingThread.start();
}
}
}
}
示例14: stopListening
import android.media.AudioRecord; //導入依賴的package包/類
public void stopListening() {
synchronized (API_LOCK) {
// stops the recording activity
isListening = false;
isRecording = false;
if (recordingThread != null) {
try {
recordingThread.join();
} catch (InterruptedException e) {
Log.e(TAG, "Failed to join recordingThread", e);
}
recordingThread = null;
}
if (null != recorder) {
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
recorder.stop();
}
recorder.release();
}
}
}
示例15: run
import android.media.AudioRecord; //導入依賴的package包/類
@Override
public void run() {
ByteBuffer audioData = ByteBuffer.allocateDirect(AUDIO_RECORD_BLOCK_SIZE);
int result = mAudioRecord.read(audioData, audioData.capacity(),
AudioRecord.READ_BLOCKING);
if (result < 0) {
return;
}
mRequestHandler.post(new Runnable() {
@Override
public void run() {
mRequestCallback.onAudioRecording();
}
});
mAssistantRequestObserver.onNext(ConverseRequest.newBuilder()
.setAudioIn(ByteString.copyFrom(audioData))
.build());
mAssistantHandler.post(mStreamAssistantRequest);
}