本文整理汇总了Java中android.media.AudioRecord.startRecording方法的典型用法代码示例。如果您正苦于以下问题:Java AudioRecord.startRecording方法的具体用法?Java AudioRecord.startRecording怎么用?Java AudioRecord.startRecording使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类android.media.AudioRecord
的用法示例。
在下文中一共展示了AudioRecord.startRecording方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: startRecording
import android.media.AudioRecord; //导入方法依赖的package包/类
private void startRecording(final String filename) {
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE,
RECORDER_CHANNELS,
RECORDER_AUDIO_ENCODING,
bufferSize);
int i = recorder.getState();
if (i==1)
recorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
@Override
public void run() {
writeAudioDataToFile(filename);
}
}, "AudioRecorder Thread");
recordingThread.start();
}
示例2: startRecording
import android.media.AudioRecord; //导入方法依赖的package包/类
private void startRecording(){
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE, RECORDER_CHANNELS,RECORDER_AUDIO_ENCODING, bufferSize);
int i = recorder.getState();
if(i==1)
recorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
@Override
public void run() {
writeAudioDataToArrayList();
reorganiseDataArray(flattenArray());
}
},"AudioRecorder Thread");
recordingThread.start();
}
示例3: startListening
import android.media.AudioRecord; //导入方法依赖的package包/类
public void startListening() {
synchronized (API_LOCK) {
if (this.disabled) {
return;
}
if (!isListening) {
recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE, RECORDER_CHANNELS,
RECORDER_AUDIO_ENCODING, ELEMENTS_TO_RECORD * BYTES_PER_ELEMENT);
recorder.startRecording();
isListening = true;
if (!isRecording) {
isRecording = true;
recordingThread = new Thread(new Runnable() {
public void run() {
detectHotword();
}
}, "AudioRecorder Thread");
recordingThread.start();
}
}
}
}
示例4: fromDefaultMicrophone
import android.media.AudioRecord; //导入方法依赖的package包/类
/**
* Create a new AudioDispatcher connected to the default microphone.
*
* @param sampleRate
* The requested sample rate.
* @param audioBufferSize
* The size of the audio buffer (in samples).
*
* @param bufferOverlap
* The size of the overlap (in samples).
* @return A new AudioDispatcher
*/
public static AudioDispatcher fromDefaultMicrophone(final int sampleRate,
final int audioBufferSize, final int bufferOverlap) {
int minAudioBufferSize = AudioRecord.getMinBufferSize(sampleRate,
android.media.AudioFormat.CHANNEL_IN_MONO,
android.media.AudioFormat.ENCODING_PCM_16BIT);
int minAudioBufferSizeInSamples = minAudioBufferSize/2;
if(minAudioBufferSizeInSamples <= audioBufferSize ){
AudioRecord audioInputStream = new AudioRecord(
MediaRecorder.AudioSource.MIC, sampleRate,
android.media.AudioFormat.CHANNEL_IN_MONO,
android.media.AudioFormat.ENCODING_PCM_16BIT,
audioBufferSize * 2);
TarsosDSPAudioFormat format = new TarsosDSPAudioFormat(sampleRate, 16,1, true, false);
TarsosDSPAudioInputStream audioStream = new AndroidAudioInputStream(audioInputStream, format);
//start recording ! Opens the stream.
audioInputStream.startRecording();
return new AudioDispatcher(audioStream,audioBufferSize,bufferOverlap);
}else{
throw new IllegalArgumentException("Buffer size too small should be at least " + (minAudioBufferSize *2));
}
}
示例5: run
import android.media.AudioRecord; //导入方法依赖的package包/类
@Override
public void run() {
byte[] buffer = new byte[audioBufferSize];
recorder = new AudioRecord(audioSource, sampleRate, channel, audioFormat, audioBufferSize);
try {
while (recorder.getState() != AudioRecord.STATE_INITIALIZED)
Thread.sleep(100, 0);
} catch (InterruptedException e) {
recorder.release();
return;
}
recorder.startRecording();
for (; ; ) {
int length = recorder.read(buffer, 0, buffer.length);
if (length < 0)
Log.e("Record", "error: " + Integer.toString(length));
else {
for (WebSocket ws : wss)
ws.sendBinary(buffer);
}
if (Thread.interrupted()) {
recorder.stop();
return;
}
}
}
示例6: StartRecord
import android.media.AudioRecord; //导入方法依赖的package包/类
public void StartRecord() {
Log.i(TAG,"开始录音");
isRecording=true;
//生成PCM文件
File file = audioFile;//new File(Environment.getExternalStorageDirectory().getAbsolutePath() + "/reverseme.pcm");
Log.i(TAG,"生成文件");
//如果存在,就先删除再创建
if (file.exists())
file.delete();
Log.i(TAG,"删除文件");
try {
file.createNewFile();
Log.i(TAG,"创建文件");
} catch (IOException e) {
Log.i(TAG,"未能创建");
throw new IllegalStateException("未能创建" + file.toString());
}
try {
//输出流
OutputStream os = new FileOutputStream(file);
BufferedOutputStream bos = new BufferedOutputStream(os);
DataOutputStream dos = new DataOutputStream(bos);
int bufferSize = AudioRecord.getMinBufferSize(frequency, channelConfiguration, audioEncoding);
AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, frequency, channelConfiguration, audioEncoding, bufferSize);
byte[] buffer = new byte[bufferSize];
audioRecord.startRecording();
Log.i(TAG, "开始录音");
isRecording = true;
while (isRecording) {
int bufferReadResult = audioRecord.read(buffer, 0, bufferSize);
dos.write(buffer,0,bufferReadResult);
}
audioRecord.stop();
dos.close();
} catch (Throwable t) {
Log.e(TAG, "录音失败");
}
}
示例7: startRecord
import android.media.AudioRecord; //导入方法依赖的package包/类
public void startRecord() throws IOException {
synchronized (REC_LOCK){
isRecordStarted=true;
MediaFormat audioFormat=mConfig.getAudioFormat();
mAudioEncoder=MediaCodec.createEncoderByType(audioFormat.getString(MediaFormat.KEY_MIME));
mAudioEncoder.configure(audioFormat,null,null,MediaCodec.CONFIGURE_FLAG_ENCODE);
MediaFormat videoFormat=mConfig.getVideoFormat();
mVideoEncoder=MediaCodec.createEncoderByType(videoFormat.getString(MediaFormat.KEY_MIME));
//此处不能用mOutputSurface,会configure失败
mVideoEncoder.configure(videoFormat,null,null,MediaCodec.CONFIGURE_FLAG_ENCODE);
mEncodeSurface=mVideoEncoder.createInputSurface();
mAudioEncoder.start();
mVideoEncoder.start();
mMuxer=new MediaMuxer(mOutputPath,MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
mRecordBufferSize = AudioRecord.getMinBufferSize(mRecordSampleRate,
mRecordChannelConfig, mRecordAudioFormat)*2;
// buffer=new byte[bufferSize];
mAudioRecord=new AudioRecord(MediaRecorder.AudioSource.MIC,mRecordSampleRate,mRecordChannelConfig,
mRecordAudioFormat,mRecordBufferSize);
mAudioThread=new Thread(new Runnable() {
@Override
public void run() {
mAudioRecord.startRecording();
while (!audioEncodeStep(isTryStopAudio)){};
mAudioRecord.stop();
}
});
mAudioThread.start();
isRecordAudioStarted=true;
}
}
示例8: startCapture
import android.media.AudioRecord; //导入方法依赖的package包/类
public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
if (mIsCaptureStarted) {
Log.e(TAG, "Capture already started !");
return false;
}
int minBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
if (minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "Invalid parameter !");
return false;
}
mAudioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, minBufferSize * 4);
if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
Log.e(TAG, "AudioRecord initialize fail !");
return false;
}
mAudioRecord.startRecording();
mIsLoopExit = false;
mCaptureThread = new Thread(new AudioCaptureRunnable());
mCaptureThread.start();
mIsCaptureStarted = true;
Log.d(TAG, "Start audio capture success !");
return true;
}
示例9: start
import android.media.AudioRecord; //导入方法依赖的package包/类
public void start(){
if(!isStarted){
stopFlag=false;
mRecordBufferSize = AudioRecord.getMinBufferSize(mRecordSampleRate,
mRecordChannelConfig, mRecordAudioFormat)*2;
mRecord=new AudioRecord(MediaRecorder.AudioSource.MIC,mRecordSampleRate,mRecordChannelConfig,
mRecordAudioFormat,mRecordBufferSize);
mRecord.startRecording();
try {
MediaFormat format=convertAudioConfigToFormat(mConfig.mAudio);
mAudioEncoder=MediaCodec.createEncoderByType(format.getString(MediaFormat.KEY_MIME));
mAudioEncoder.configure(format,null,null,MediaCodec.CONFIGURE_FLAG_ENCODE);
mAudioEncoder.start();
} catch (IOException e) {
e.printStackTrace();
}
Thread thread=new Thread(new Runnable() {
@Override
public void run() {
while (!stopFlag&&!audioEncodeStep(false)){};
audioEncodeStep(true);
Log.e("wuwang","audio stop");
if(isStarted){
mRecord.stop();
mRecord.release();
mRecord=null;
}
if(mAudioEncoder!=null){
mAudioEncoder.stop();
mAudioEncoder.release();
mAudioEncoder=null;
}
isStarted=false;
}
});
thread.start();
startTime=SystemClock.elapsedRealtimeNanos();
isStarted=true;
}
}
示例10: isCanUseAudio
import android.media.AudioRecord; //导入方法依赖的package包/类
/**
* 判断是是否有录音权限
*/
public static boolean isCanUseAudio(Activity activity) {
/* if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M){//6.0以上系统
if (ContextCompat.checkSelfPermission(activity,Manifest.permission.RECORD_AUDIO)!= PackageManager.PERMISSION_GRANTED) {
return false;
}else{
return true;
}
}else {//6.0以下系统*/
// 音频获取源
int audioSource = MediaRecorder.AudioSource.MIC;
// 设置音频采样率,44100是目前的标准,但是某些设备仍然支持22050,16000,11025
int sampleRateInHz = 44100;
// 设置音频的录制的声道CHANNEL_IN_STEREO为双声道,CHANNEL_CONFIGURATION_MONO为单声道
int channelConfig = AudioFormat.CHANNEL_IN_STEREO;
// 音频数据格式:PCM 16位每个样本。保证设备支持。PCM 8位每个样本。不一定能得到设备支持。
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
// 缓冲区字节大小
int bufferSizeInBytes;
bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz,
channelConfig, audioFormat);
AudioRecord audioRecord = new AudioRecord(audioSource, sampleRateInHz,
channelConfig, audioFormat, bufferSizeInBytes);
//开始录制音频
try {
// 防止某些手机崩溃,例如联想
audioRecord.startRecording();
} catch (IllegalStateException e) {
e.printStackTrace();
}
//根据开始录音判断是否有录音权限
if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
// context.startActivity(new Intent(Settings.ACTION_MANAGE_APPLICATIONS_SETTINGS));
return false;
} else {
audioRecord.stop();
audioRecord.release();
return true;
}
// }
}
示例11: startCapture
import android.media.AudioRecord; //导入方法依赖的package包/类
public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
if (mIsCaptureStarted) {
Log.e(TAG, "hujd Capture already started !");
return false;
}
mMinBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
if (mMinBufferSize == AudioRecord.ERROR_BAD_VALUE) {
Log.e(TAG, "hujd Invalid parameter !");
return false;
}
Log.e(TAG, "hujd getMinBufferSize = " + mMinBufferSize + " bytes !");
mAudioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, mMinBufferSize);
if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
Log.e(TAG, "hujd AudioRecord initialize fail !");
return false;
}
mAudioRecord.startRecording();
mIsLoopExit = false;
mCaptureThread = new Thread(new AudioCaptureRunnable());
mCaptureThread.start();
mIsCaptureStarted = true;
Log.e(TAG, "hujd Start audio capture success !");
return true;
}