当前位置: 首页>>代码示例>>Java>>正文


Java AudioRecord.getMinBufferSize方法代码示例

本文整理汇总了Java中android.media.AudioRecord.getMinBufferSize方法的典型用法代码示例。如果您正苦于以下问题:Java AudioRecord.getMinBufferSize方法的具体用法?Java AudioRecord.getMinBufferSize怎么用?Java AudioRecord.getMinBufferSize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在android.media.AudioRecord的用法示例。


在下文中一共展示了AudioRecord.getMinBufferSize方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createAudioRecord

import android.media.AudioRecord; //导入方法依赖的package包/类
/**
 * Creates a new {@link AudioRecord}.
 *
 * @return A newly created {@link AudioRecord}, or null if it cannot be created (missing
 * permissions?).
 */
private AudioRecord createAudioRecord() {
    for (int sampleRate : SAMPLE_RATE_CANDIDATES) {
        final int sizeInBytes = AudioRecord.getMinBufferSize(sampleRate, CHANNEL, ENCODING);
        if (sizeInBytes == AudioRecord.ERROR_BAD_VALUE) {
            continue;
        }
        final AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC,
                sampleRate, CHANNEL, ENCODING, sizeInBytes);
        if (audioRecord.getState() == AudioRecord.STATE_INITIALIZED) {
            mBuffer = new byte[sizeInBytes];
            return audioRecord;
        } else {
            audioRecord.release();
        }
    }
    return null;
}
 
开发者ID:hsavaliya,项目名称:GoogleAssistantSDK,代码行数:24,代码来源:VoiceRecorder_.java

示例2: onCreate

import android.media.AudioRecord; //导入方法依赖的package包/类
@Override
    public void onCreate(Bundle savedInstanceState) {
        super.onCreate(savedInstanceState);
        setContentView(R.layout.activity_main);

        bufferSize = AudioRecord.getMinBufferSize(16000,
                AudioFormat.CHANNEL_IN_MONO,
                AudioFormat.ENCODING_PCM_16BIT);

        setButtonHandlers();
        enableButtons(false);

        inferenceInterface = new TensorFlowInferenceInterface();
        inferenceInterface.initializeTensorFlow(getAssets(), MODEL_FILE);
//        tensorFlowSample();

    }
 
开发者ID:ranatrk,项目名称:AudioGenderIdentifier,代码行数:18,代码来源:MainActivity.java

示例3: fromDefaultMicrophone

import android.media.AudioRecord; //导入方法依赖的package包/类
/**
 * Create a new AudioDispatcher connected to the default microphone.
 * 
 * @param sampleRate
 *            The requested sample rate.
 * @param audioBufferSize
 *            The size of the audio buffer (in samples).
 * 
 * @param bufferOverlap
 *            The size of the overlap (in samples).
 * @return A new AudioDispatcher
 */
public static AudioDispatcher fromDefaultMicrophone(final int sampleRate,
		final int audioBufferSize, final int bufferOverlap) {
	int minAudioBufferSize = AudioRecord.getMinBufferSize(sampleRate,
			android.media.AudioFormat.CHANNEL_IN_MONO,
			android.media.AudioFormat.ENCODING_PCM_16BIT);
	int minAudioBufferSizeInSamples =  minAudioBufferSize/2;
	if(minAudioBufferSizeInSamples <= audioBufferSize ){
	AudioRecord audioInputStream = new AudioRecord(
			MediaRecorder.AudioSource.MIC, sampleRate,
			android.media.AudioFormat.CHANNEL_IN_MONO,
			android.media.AudioFormat.ENCODING_PCM_16BIT,
			audioBufferSize * 2);

	TarsosDSPAudioFormat format = new TarsosDSPAudioFormat(sampleRate, 16,1, true, false);
	
	TarsosDSPAudioInputStream audioStream = new AndroidAudioInputStream(audioInputStream, format);
	//start recording ! Opens the stream.
	audioInputStream.startRecording();
	return new AudioDispatcher(audioStream,audioBufferSize,bufferOverlap);
	}else{
		throw new IllegalArgumentException("Buffer size too small should be at least " + (minAudioBufferSize *2));
	}
}
 
开发者ID:gstraube,项目名称:cythara,代码行数:36,代码来源:AudioDispatcherFactory.java

示例4: getMinInputFrameSize

import android.media.AudioRecord; //导入方法依赖的package包/类
private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
  final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
  final int channelConfig =
      (numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
  return AudioRecord.getMinBufferSize(
             sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
      / bytesPerFrame;
}
 
开发者ID:Piasy,项目名称:AppRTC-Android,代码行数:9,代码来源:WebRtcAudioManager.java

示例5: AudioRecordThread

import android.media.AudioRecord; //导入方法依赖的package包/类
public AudioRecordThread(LinkedBlockingDeque<byte[]> linkedBlockingDeque) {
    this.linkedBlockingDeque = linkedBlockingDeque;
    bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE_HZ, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE_HZ, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize);
}
 
开发者ID:dueros,项目名称:dcs-sdk-java,代码行数:8,代码来源:AudioRecordThread.java

示例6: getRecordBufferSize

import android.media.AudioRecord; //导入方法依赖的package包/类
public static int getRecordBufferSize() {
    int frequency = Options.getInstance().audio.frequency;
    int audioEncoding = Options.getInstance().audio.encoding;
    int channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
    if(Options.getInstance().audio.channelCount == 2) {
        channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_STEREO;
    }
    return AudioRecord.getMinBufferSize(frequency, channelConfiguration, audioEncoding);
}
 
开发者ID:wuyisheng,项目名称:libRtmp,代码行数:10,代码来源:AndroidUntil.java

示例7: AudioCodec

import android.media.AudioRecord; //导入方法依赖的package包/类
public AudioCodec() throws IOException {
  this.bufferSize  = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
  this.audioRecord = createAudioRecord(this.bufferSize);
  this.mediaCodec  = createMediaCodec(this.bufferSize);

  this.mediaCodec.start();

  try {
    audioRecord.startRecording();
  } catch (Exception e) {
    Log.w(TAG, e);
    mediaCodec.release();
    throw new IOException(e);
  }
}
 
开发者ID:XecureIT,项目名称:PeSanKita-android,代码行数:16,代码来源:AudioCodec.java

示例8: initRecorder

import android.media.AudioRecord; //导入方法依赖的package包/类
public void initRecorder() {
    SAMPLE_RATE = getValidSampleRates();
    int bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    mBuffer = new short[bufferSize];
    mRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize);
    new File(RECORD_WAV_PATH).mkdir();
}
 
开发者ID:timstableford,项目名称:P-BrainAndroid,代码行数:10,代码来源:RecordWavMaster.java

示例9: startCapture

import android.media.AudioRecord; //导入方法依赖的package包/类
public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
    if (mIsCaptureStarted) {
        Log.e(TAG, "Capture already started !");
        return false;
    }

    int minBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
    if (minBufferSize == AudioRecord.ERROR_BAD_VALUE) {
        Log.e(TAG, "Invalid parameter !");
        return false;
    }

    mAudioRecord = new AudioRecord(audioSource, sampleRateInHz, channelConfig, audioFormat, minBufferSize * 4);
    if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
        Log.e(TAG, "AudioRecord initialize fail !");
        return false;
    }

    mAudioRecord.startRecording();

    mIsLoopExit = false;
    mCaptureThread = new Thread(new AudioCaptureRunnable());
    mCaptureThread.start();

    mIsCaptureStarted = true;

    Log.d(TAG, "Start audio capture success !");

    return true;
}
 
开发者ID:pili-engineering,项目名称:PLDroidRTCStreaming,代码行数:31,代码来源:ExtAudioCapture.java

示例10: createAudioRecord

import android.media.AudioRecord; //导入方法依赖的package包/类
private void createAudioRecord() throws InitializationException {
    // The AudioRecord configurations parameters used here, are guaranteed
    // to be supported on all devices.

    // AudioFormat.CHANNEL_IN_MONO should be used in place of deprecated
    // AudioFormat.CHANNEL_CONFIGURATION_MONO, but it is not available for
    // API level 3.

    // Unlike AudioTrack buffer, AudioRecord buffer could be larger than
    // minimum without causing any problems. But minimum works well.
    final int audioRecordBufferSizeInBytes = AudioRecord.getMinBufferSize(
            SpeechTrainerConfig.SAMPLE_RATE_HZ, AudioFormat.CHANNEL_CONFIGURATION_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    if (audioRecordBufferSizeInBytes <= 0) {
        throw new InitializationException("Failed to initialize recording.");
    }

    // CHANNEL_IN_MONO is guaranteed to work on all devices.
    // ENCODING_PCM_16BIT is guaranteed to work on all devices.
    audioRecord = new AudioRecord(AudioSource.MIC, SpeechTrainerConfig.SAMPLE_RATE_HZ,
            AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
            audioRecordBufferSizeInBytes);
    if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
        audioRecord = null;
        throw new InitializationException("Failed to initialize recording.");
    }
}
 
开发者ID:sdrausty,项目名称:buildAPKsApps,代码行数:28,代码来源:ControllerFactory.java

示例11: initAudioRecorder

import android.media.AudioRecord; //导入方法依赖的package包/类
/**
   * Initialize audio recorder
   */
  private void initAudioRecorder() throws IOException {
      mBufferSize = AudioRecord.getMinBufferSize(DEFAULT_SAMPLING_RATE,
              DEFAULT_CHANNEL_CONFIG, DEFAULT_AUDIO_FORMAT.getAudioFormat());

      int bytesPerFrame = DEFAULT_AUDIO_FORMAT.getBytesPerFrame();
      /* Get number of samples. Calculate the buffer size
 * (round up to the factor of given frame size) 
 * 使能被整除,方便下面的周期性通知
 * */
      int frameSize = mBufferSize / bytesPerFrame;
      if (frameSize % FRAME_COUNT != 0) {
          frameSize += (FRAME_COUNT - frameSize % FRAME_COUNT);
          mBufferSize = frameSize * bytesPerFrame;
      }

/* Setup audio recorder */
      mAudioRecord = new AudioRecord(DEFAULT_AUDIO_SOURCE,
              DEFAULT_SAMPLING_RATE, DEFAULT_CHANNEL_CONFIG, DEFAULT_AUDIO_FORMAT.getAudioFormat(),
              mBufferSize);

      mPCMBuffer = new short[mBufferSize];
/*
 * Initialize lame buffer
 * mp3 sampling rate is the same as the recorded pcm sampling rate 
 * The bit rate is 32kbps
 * 
 */
      Mp3NativeUtil.init(DEFAULT_SAMPLING_RATE, DEFAULT_LAME_IN_CHANNEL, DEFAULT_SAMPLING_RATE, DEFAULT_LAME_MP3_BIT_RATE, DEFAULT_LAME_MP3_QUALITY);
      // Create and run thread used to encode data
      // The thread will
      mEncodeThread = new DataEncodeThread(mRecordFile, mBufferSize);
      mEncodeThread.start();
      mAudioRecord.setRecordPositionUpdateListener(mEncodeThread, mEncodeThread.getHandler());
      mAudioRecord.setPositionNotificationPeriod(FRAME_COUNT);
  }
 
开发者ID:hushengjun,项目名称:FastAndroid,代码行数:39,代码来源:MP3Recorder.java

示例12: AudioRecordRunnable

import android.media.AudioRecord; //导入方法依赖的package包/类
private AudioRecordRunnable() {
    bufferSize = AudioRecord.getMinBufferSize(sampleRate,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
    audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, sampleRate,
            AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize);
    audioData = new short[bufferSize];
}
 
开发者ID:feigxj,项目名称:VideoRecorder-master,代码行数:8,代码来源:FFmpegRecorderActivity.java

示例13: start

import android.media.AudioRecord; //导入方法依赖的package包/类
public void start(){
    if(!isStarted){
        stopFlag=false;

        mRecordBufferSize = AudioRecord.getMinBufferSize(mRecordSampleRate,
                mRecordChannelConfig, mRecordAudioFormat)*2;
        mRecord=new AudioRecord(MediaRecorder.AudioSource.MIC,mRecordSampleRate,mRecordChannelConfig,
                mRecordAudioFormat,mRecordBufferSize);
        mRecord.startRecording();
        try {
            MediaFormat format=convertAudioConfigToFormat(mConfig.mAudio);
            mAudioEncoder=MediaCodec.createEncoderByType(format.getString(MediaFormat.KEY_MIME));
            mAudioEncoder.configure(format,null,null,MediaCodec.CONFIGURE_FLAG_ENCODE);
            mAudioEncoder.start();
        } catch (IOException e) {
            e.printStackTrace();
        }
        Thread thread=new Thread(new Runnable() {
            @Override
            public void run() {
                while (!stopFlag&&!audioEncodeStep(false)){};
                audioEncodeStep(true);
                Log.e("wuwang","audio stop");
                if(isStarted){
                    mRecord.stop();
                    mRecord.release();
                    mRecord=null;
                }
                if(mAudioEncoder!=null){
                    mAudioEncoder.stop();
                    mAudioEncoder.release();
                    mAudioEncoder=null;
                }
                isStarted=false;
            }
        });
        thread.start();
        startTime=SystemClock.elapsedRealtimeNanos();
        isStarted=true;
    }
}
 
开发者ID:aiyaapp,项目名称:AAVT,代码行数:42,代码来源:SoundRecorder.java

示例14: AudioRecordManager

import android.media.AudioRecord; //导入方法依赖的package包/类
public AudioRecordManager() {
    bufferSize = AudioRecord.getMinBufferSize(8000, AudioFormat.CHANNEL_IN_MONO, AudioFormat
            .ENCODING_PCM_16BIT);
    mRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC, 8000, AudioFormat.CHANNEL_IN_MONO,
            AudioFormat.ENCODING_PCM_16BIT, bufferSize * 2);
}
 
开发者ID:jokermonn,项目名称:permissions4m,代码行数:7,代码来源:AudioRecordManager.java

示例15: ExtAudioRecorder

import android.media.AudioRecord; //导入方法依赖的package包/类
/**
 * 
 * 
 * Default constructor
 * 
 * Instantiates a new recorder, in case of compressed recording the
 * parameters can be left as 0. In case of errors, no exception is thrown,
 * but the state is set to ERROR
 * 
 */
@SuppressWarnings("deprecation")
public ExtAudioRecorder(boolean uncompressed, int audioSource,
		int sampleRate, int channelConfig, int audioFormat) {
	try {
		rUncompressed = uncompressed;
		if (rUncompressed) { // RECORDING_UNCOMPRESSED
			if (audioFormat == AudioFormat.ENCODING_PCM_16BIT) {
				bSamples = 16;
			} else {
				bSamples = 8;
			}

			if (channelConfig == AudioFormat.CHANNEL_CONFIGURATION_MONO) {
				nChannels = 1;
			} else {
				nChannels = 2;
			}

			aSource = audioSource;
			sRate = sampleRate;
			aFormat = audioFormat;

			framePeriod = sampleRate * TIMER_INTERVAL / 1000;
			bufferSize = framePeriod * 2 * bSamples * nChannels / 8;
			if (bufferSize < AudioRecord.getMinBufferSize(sampleRate,
					channelConfig, audioFormat)) { // Check to make sure
													// buffer size is not
													// smaller than the
													// smallest allowed one
				bufferSize = AudioRecord.getMinBufferSize(sampleRate,
						channelConfig, audioFormat);
				// Set frame period and timer interval accordingly
				framePeriod = bufferSize / (2 * bSamples * nChannels / 8);
				Log.w(ExtAudioRecorder.class.getName(),
						"Increasing buffer size to "
								+ Integer.toString(bufferSize));
			}

			audioRecorder = new AudioRecord(audioSource, sampleRate,
					channelConfig, audioFormat, bufferSize);

			if (audioRecorder.getState() != AudioRecord.STATE_INITIALIZED)
				throw new Exception("AudioRecord initialization failed");
			audioRecorder.setRecordPositionUpdateListener(updateListener);
			audioRecorder.setPositionNotificationPeriod(framePeriod);
		} else { // RECORDING_COMPRESSED
			mediaRecorder = new MediaRecorder();
			mediaRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
			mediaRecorder
					.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
			mediaRecorder
					.setAudioEncoder(MediaRecorder.AudioEncoder.AMR_NB);
		}
		cAmplitude = 0;
		filePath = null;
		state = State.INITIALIZING;
	} catch (Exception e) {
		if (e.getMessage() != null) {
			Log.e(ExtAudioRecorder.class.getName(), e.getMessage());
		} else {
			Log.e(ExtAudioRecorder.class.getName(),
					"Unknown error occured while initializing recording");
		}
		state = State.ERROR;
	}
}
 
开发者ID:fengdongfei,项目名称:CXJPadProject,代码行数:77,代码来源:ExtAudioRecorder.java


注:本文中的android.media.AudioRecord.getMinBufferSize方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。