本文整理匯總了Java中android.media.AudioFormat.CHANNEL_IN_STEREO屬性的典型用法代碼示例。如果您正苦於以下問題:Java AudioFormat.CHANNEL_IN_STEREO屬性的具體用法?Java AudioFormat.CHANNEL_IN_STEREO怎麽用?Java AudioFormat.CHANNEL_IN_STEREO使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在類android.media.AudioFormat
的用法示例。
在下文中一共展示了AudioFormat.CHANNEL_IN_STEREO屬性的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: generateTrack
public AudioTrack generateTrack(int sampleRate, short[] buf, int len) {
int end = len;
int c = 0;
if (RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_MONO)
c = AudioFormat.CHANNEL_OUT_MONO;
if (RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_STEREO)
c = AudioFormat.CHANNEL_OUT_STEREO;
// old phones bug.
// http://stackoverflow.com/questions/27602492
//
// with MODE_STATIC setNotificationMarkerPosition not called
AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate,
c, RawSamples.AUDIO_FORMAT,
len * (Short.SIZE / 8), AudioTrack.MODE_STREAM);
track.write(buf, 0, len);
if (track.setNotificationMarkerPosition(end) != AudioTrack.SUCCESS)
throw new RuntimeException("unable to set marker");
return track;
}
示例2: start
public void start() {
int minBufferSize = AudioRecord.getMinBufferSize(mSampleRate, AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT);
int targetSize = mSampleRate * mChannels; // 1 seconds buffer size
if (targetSize < minBufferSize) {
targetSize = minBufferSize;
}
if (audioCapture == null) {
try {
audioCapture = new AudioRecord(MediaRecorder.AudioSource.MIC,
mSampleRate,
AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.ENCODING_PCM_16BIT,
targetSize);
} catch (IllegalArgumentException e) {
audioCapture = null;
}
}
LiveJniLib.native_audio_init(mSampleRate, mChannels);
if ( audioCapture != null) {
audioCapture.startRecording();
AudioEncoder audioEncoder = new AudioEncoder();
audioEncoder.start();
}
}
示例3: AudioRecordTask
public AudioRecordTask(GnMusicIdStream musicIdStream) {
Log.d(TAG, "AudioRecordTask - BufferSize: " + MIN_RAW_BUFFER_SIZE);
this.musicIdStream = musicIdStream;
this.audioRecord = new AudioRecord(
AUDIO_SOURCE,
AUDIO_SAMPLE_RATE,
AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.ENCODING_PCM_16BIT,
MIN_RAW_BUFFER_SIZE);
this.musicDetectThread = new Thread(new MusicDetectRunnable(), "MusicDetect");
}
示例4: AACEncoder
public AACEncoder(final StreamPublisher.StreamPublisherParam params) throws IOException {
this.samplingRate = params.samplingRate;
bufferSize = params.audioBufferSize;
mMediaCodec = MediaCodec.createEncoderByType(params.audioMIME);
mMediaCodec.configure(params.createAudioMediaFormat(), null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mediaCodecInputStream = new MediaCodecInputStream(mMediaCodec, new MediaCodecInputStream.MediaFormatCallback() {
@Override
public void onChangeMediaFormat(MediaFormat mediaFormat) {
params.setAudioOutputMediaFormat(mediaFormat);
}
});
mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, samplingRate, AudioFormat.CHANNEL_IN_STEREO, AudioFormat.ENCODING_PCM_16BIT, bufferSize);
}
示例5: getMinInputFrameSize
private static int getMinInputFrameSize(int sampleRateInHz, int numChannels) {
final int bytesPerFrame = numChannels * (BITS_PER_SAMPLE / 8);
final int channelConfig =
(numChannels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
return AudioRecord.getMinBufferSize(
sampleRateInHz, channelConfig, AudioFormat.ENCODING_PCM_16BIT)
/ bytesPerFrame;
}
示例6: writeWavHeader
/**
* Writes the proper 44-byte RIFF/WAVE header to/for the given stream
* Two size fields are left empty/null since we do not yet know the final stream size
*
* @param out The stream to write the header to
* @param channelMask An AudioFormat.CHANNEL_* mask
* @param sampleRate The sample rate in hertz
* @param encoding An AudioFormat.ENCODING_PCM_* value
* @throws IOException
*/
private void writeWavHeader(OutputStream out, int channelMask, int sampleRate, int encoding)
throws IOException {
short channels;
switch (channelMask) {
case AudioFormat.CHANNEL_IN_MONO:
channels = 1;
break;
case AudioFormat.CHANNEL_IN_STEREO:
channels = 2;
break;
default:
throw new IllegalArgumentException("Unacceptable channel mask");
}
short bitDepth;
switch (encoding) {
case AudioFormat.ENCODING_PCM_8BIT:
bitDepth = 8;
break;
case AudioFormat.ENCODING_PCM_16BIT:
bitDepth = 16;
break;
case AudioFormat.ENCODING_PCM_FLOAT:
bitDepth = 32;
break;
default:
throw new IllegalArgumentException("Unacceptable encoding");
}
writeWavHeader(out, channels, sampleRate, bitDepth);
}
示例7: findAudioRecord
/**
* 查找可用的音頻錄製器
*
* @return
*/
private AudioRecord findAudioRecord() {
int[] samplingRates = new int[]{44100, 22050, 11025, 8000};
int[] audioFormats = new int[]{
AudioFormat.ENCODING_PCM_16BIT,
AudioFormat.ENCODING_PCM_8BIT};
int[] channelConfigs = new int[]{
AudioFormat.CHANNEL_IN_STEREO,
AudioFormat.CHANNEL_IN_MONO};
for (int rate : samplingRates) {
for (int format : audioFormats) {
for (int config : channelConfigs) {
try {
int bufferSize = AudioRecord.getMinBufferSize(rate, config, format);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
for (int source : AUDIO_SOURCES) {
AudioRecord recorder = new AudioRecord(source, rate, config, format, bufferSize * 4);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) {
return recorder;
}
}
}
} catch (Exception e) {
Log.e(TAG, "Init AudioRecord Error." + Log.getStackTraceString(e));
}
}
}
}
return null;
}
示例8: setupAudioRecord
private void setupAudioRecord() {
int channelConfig = stereo ? AudioFormat.CHANNEL_IN_STEREO : AudioFormat.CHANNEL_IN_MONO;
int minBufferSize = AudioRecord.getMinBufferSize(sampleRate, channelConfig,
AudioFormat.ENCODING_PCM_16BIT);
audioRecord = new AudioRecord(
MediaRecorder.AudioSource.MIC,
sampleRate,
channelConfig,
AudioFormat.ENCODING_PCM_16BIT,
4 * minBufferSize);
}
示例9: doDemoAudio
private void doDemoAudio() throws Exception{
// 音頻獲取源
int audioSource = MediaRecorder.AudioSource.MIC;
// 設置音頻采樣率,44100是目前的標準,但是某些設備仍然支持22050,16000,11025
int sampleRateInHz = 44100;
// 設置音頻的錄製的聲道CHANNEL_IN_STEREO為雙聲道,CHANNEL_CONFIGURATION_MONO為單聲道
int channelConfig = AudioFormat.CHANNEL_IN_STEREO;
// 音頻數據格式:PCM 16位每個樣本。保證設備支持。PCM 8位每個樣本。不一定能得到設備支持。
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
// 緩衝區字節大小
int bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz,
channelConfig, audioFormat);
// 創建AudioRecord對象
final AudioRecord ar = new AudioRecord(audioSource, sampleRateInHz,
channelConfig, audioFormat, bufferSizeInBytes);
ar.startRecording();
Log.i(TAG, "doDemoAudio : AudioRecord____startRecording()");
mActivity.runOnUiThread(new Runnable() {
@Override
public void run() {
try {
ar.stop();
ar.release();
Log.i(TAG, "doDemoAudio : AudioRecord____release()");
}catch (Exception e){
e.printStackTrace();
}
}
});
}
示例10: isCanUseAudio
/**
* 判斷是是否有錄音權限
*/
public static boolean isCanUseAudio(Activity activity) {
/* if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M){//6.0以上係統
if (ContextCompat.checkSelfPermission(activity,Manifest.permission.RECORD_AUDIO)!= PackageManager.PERMISSION_GRANTED) {
return false;
}else{
return true;
}
}else {//6.0以下係統*/
// 音頻獲取源
int audioSource = MediaRecorder.AudioSource.MIC;
// 設置音頻采樣率,44100是目前的標準,但是某些設備仍然支持22050,16000,11025
int sampleRateInHz = 44100;
// 設置音頻的錄製的聲道CHANNEL_IN_STEREO為雙聲道,CHANNEL_CONFIGURATION_MONO為單聲道
int channelConfig = AudioFormat.CHANNEL_IN_STEREO;
// 音頻數據格式:PCM 16位每個樣本。保證設備支持。PCM 8位每個樣本。不一定能得到設備支持。
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
// 緩衝區字節大小
int bufferSizeInBytes;
bufferSizeInBytes = AudioRecord.getMinBufferSize(sampleRateInHz,
channelConfig, audioFormat);
AudioRecord audioRecord = new AudioRecord(audioSource, sampleRateInHz,
channelConfig, audioFormat, bufferSizeInBytes);
//開始錄製音頻
try {
// 防止某些手機崩潰,例如聯想
audioRecord.startRecording();
} catch (IllegalStateException e) {
e.printStackTrace();
}
//根據開始錄音判斷是否有錄音權限
if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
// context.startActivity(new Intent(Settings.ACTION_MANAGE_APPLICATIONS_SETTINGS));
return false;
} else {
audioRecord.stop();
audioRecord.release();
return true;
}
// }
}
示例11: channelCountToConfiguration
private int channelCountToConfiguration(int channels) {
return (channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO);
}
示例12: start
public void start(HashMap<String, String> options, ModuleResultListener listener){
int audioChannel = AudioFormat.CHANNEL_IN_STEREO;
if (options.get("channel").equals("mono")) audioChannel = AudioFormat.CHANNEL_IN_MONO;
int sampleRate = 22050;
int audioBit = AudioFormat.ENCODING_PCM_16BIT;
switch (options.get("quality")) {
case "low":
sampleRate = 8000;
audioBit = AudioFormat.ENCODING_PCM_8BIT;
break;
case "high":
sampleRate = 44100;
audioBit = AudioFormat.ENCODING_PCM_16BIT;
break;
}
if (mIsRecording) {
if (mIsPausing) {
if (mRecorder != null)mRecorder.resumeRecording();
mIsPausing = false;
listener.onResult(null);
} else {
listener.onResult(Util.getError(Constant.RECORDER_BUSY, Constant.RECORDER_BUSY_CODE));
}
} else {
String time_str = new Date().getTime() + "";
try {
mFile = Util.getFile(time_str + ".wav");
} catch (IOException e) {
e.printStackTrace();
listener.onResult(Util.getError(Constant.MEDIA_INTERNAL_ERROR, Constant.MEDIA_INTERNAL_ERROR_CODE));
}
mRecorder = OmRecorder.wav(
new PullTransport.Default(getMic(audioBit, audioChannel, sampleRate), new PullTransport.OnAudioChunkPulledListener() {
@Override
public void onAudioChunkPulled(AudioChunk audioChunk) {
}
}), mFile);
mRecorder.startRecording();
mIsRecording = true;
listener.onResult(null);
}
}
示例13: createAudioRecord
@SuppressLint("NewApi")
public static AudioRecord createAudioRecord(
final int source, final int sampling_rate, final int channels, final int format, final int buffer_size) {
final int[] AUDIO_SOURCES = new int[] {
MediaRecorder.AudioSource.DEFAULT, // ここ(1つ目)は引數で置き換えられる
MediaRecorder.AudioSource.CAMCORDER, // これにするとUSBオーディオルーティングが有効な場合でも內蔵マイクからの音になる
MediaRecorder.AudioSource.MIC,
MediaRecorder.AudioSource.DEFAULT,
MediaRecorder.AudioSource.VOICE_COMMUNICATION,
MediaRecorder.AudioSource.VOICE_RECOGNITION,
};
switch (source) {
case 1: AUDIO_SOURCES[0] = MediaRecorder.AudioSource.MIC; break; // 自動
case 2: AUDIO_SOURCES[0] = MediaRecorder.AudioSource.CAMCORDER; break; // 內蔵マイク
default:AUDIO_SOURCES[0] = MediaRecorder.AudioSource.MIC; break; // 自動(UACのopenに失敗した時など)
}
AudioRecord audioRecord = null;
for (final int src: AUDIO_SOURCES) {
try {
if (BuildCheck.isAndroid6()) {
audioRecord = new AudioRecord.Builder()
.setAudioSource(src)
.setAudioFormat(new AudioFormat.Builder()
.setEncoding(format)
.setSampleRate(sampling_rate)
.setChannelMask((channels == 1
? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO))
.build())
.setBufferSizeInBytes(buffer_size)
.build();
} else {
audioRecord = new AudioRecord(src, sampling_rate,
(channels == 1 ? AudioFormat.CHANNEL_IN_MONO : AudioFormat.CHANNEL_IN_STEREO),
format, buffer_size);
}
if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
audioRecord.release();
audioRecord = null;
}
} catch (final Exception e) {
audioRecord = null;
}
if (audioRecord != null)
break;
}
return audioRecord;
}
示例14: getInfo
EncoderInfo getInfo() {
final int channels = RawSamples.CHANNEL_CONFIG == AudioFormat.CHANNEL_IN_STEREO ? 2 : 1;
final int bps = RawSamples.AUDIO_FORMAT == AudioFormat.ENCODING_PCM_16BIT ? 16 : 8;
return new EncoderInfo(channels, sampleRate, bps);
}