本文整理汇总了Java中com.google.assistant.embedded.v1alpha1.AudioOutConfig类的典型用法代码示例。如果您正苦于以下问题:Java AudioOutConfig类的具体用法?Java AudioOutConfig怎么用?Java AudioOutConfig使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
AudioOutConfig类属于com.google.assistant.embedded.v1alpha1包,在下文中一共展示了AudioOutConfig类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: run
import com.google.assistant.embedded.v1alpha1.AudioOutConfig; //导入依赖的package包/类
@Override
public void run() {
Log.i(TAG, "starting assistant request");
mAudioRecord.startRecording();
mAssistantRequestObserver = mAssistantService.converse(mAssistantResponseObserver);
ConverseConfig.Builder converseConfigBuilder = ConverseConfig.newBuilder()
.setAudioInConfig(AudioInConfig.newBuilder()
.setEncoding(ENCODING_INPUT)
.setSampleRateHertz(SAMPLE_RATE)
.build())
.setAudioOutConfig(AudioOutConfig.newBuilder()
.setEncoding(ENCODING_OUTPUT)
.setSampleRateHertz(SAMPLE_RATE)
.setVolumePercentage(mVolumePercentage)
.build());
if (mConversationState != null) {
converseConfigBuilder.setConverseState(ConverseState.newBuilder()
.setConversationState(mConversationState)
.build());
}
mAssistantRequestObserver.onNext(
ConverseRequest.newBuilder()
.setConfig(converseConfigBuilder.build())
.build());
mAssistantHandler.post(mStreamAssistantRequest);
}
示例2: startRecognizing
import com.google.assistant.embedded.v1alpha1.AudioOutConfig; //导入依赖的package包/类
public void startRecognizing(int sampleRate) {
if (mApi == null) {
Log.w(TAG, "API not ready. Ignoring the request.");
return;
}
//Log.d(TAG,"request sending");
for (Listener listener : mListeners) {
listener.onRequestStart();
// Log.d(TAG,"request sending");
}
// Configure the API
mRequestObserver = mApi.converse(mResponseObserver);
ConverseConfig.Builder converseConfigBuilder =ConverseConfig.newBuilder()
.setAudioInConfig(AudioInConfig.newBuilder()
.setEncoding(AudioInConfig.Encoding.LINEAR16)
.setSampleRateHertz(sampleRate)
.build())
.setAudioOutConfig(AudioOutConfig.newBuilder()
.setEncoding(AudioOutConfig.Encoding.LINEAR16)
.setSampleRateHertz(sampleRate)
.setVolumePercentage(DEFAULT_VOLUME)
.build());
if (vConversationState != null) {
converseConfigBuilder.setConverseState(
ConverseState.newBuilder()
.setConversationState(vConversationState)
.build());
}
mRequestObserver.onNext(ConverseRequest.newBuilder()
.setConfig(converseConfigBuilder.build())
.build());
}
示例3: build
import com.google.assistant.embedded.v1alpha1.AudioOutConfig; //导入依赖的package包/类
/**
* Returns an AssistantManager if all required parameters have been supplied.
*
* @return An inactive AssistantManager. Call {@link EmbeddedAssistant#connect()} to start
* it.
*/
public EmbeddedAssistant build() {
if (mEmbeddedAssistant.mRequestCallback == null) {
throw new NullPointerException("There must be a defined RequestCallback");
}
if (mEmbeddedAssistant.mConversationCallback == null) {
throw new NullPointerException("There must be a defined ConversationCallback");
}
if (mEmbeddedAssistant.mUserCredentials == null) {
throw new NullPointerException("There must be provided credentials");
}
if (mSampleRate == 0) {
throw new NullPointerException("There must be a defined sample rate");
}
final int audioEncoding = AudioFormat.ENCODING_PCM_16BIT;
// Construct audio configurations.
mEmbeddedAssistant.mAudioInConfig = AudioInConfig.newBuilder()
.setEncoding(AudioInConfig.Encoding.LINEAR16)
.setSampleRateHertz(mSampleRate)
.build();
mEmbeddedAssistant.mAudioOutConfig = AudioOutConfig.newBuilder()
.setEncoding(AudioOutConfig.Encoding.LINEAR16)
.setSampleRateHertz(mSampleRate)
.setVolumePercentage(mEmbeddedAssistant.mVolume)
.build();
// Initialize Audio framework parameters.
mEmbeddedAssistant.mAudioInputFormat = new AudioFormat.Builder()
.setChannelMask(AudioFormat.CHANNEL_IN_MONO)
.setEncoding(audioEncoding)
.setSampleRate(mSampleRate)
.build();
mEmbeddedAssistant.mAudioInputBufferSize = AudioRecord.getMinBufferSize(
mEmbeddedAssistant.mAudioInputFormat.getSampleRate(),
mEmbeddedAssistant.mAudioInputFormat.getChannelMask(),
mEmbeddedAssistant.mAudioInputFormat.getEncoding());
mEmbeddedAssistant.mAudioOutputFormat = new AudioFormat.Builder()
.setChannelMask(AudioFormat.CHANNEL_OUT_MONO)
.setEncoding(audioEncoding)
.setSampleRate(mSampleRate)
.build();
mEmbeddedAssistant.mAudioOutputBufferSize = AudioTrack.getMinBufferSize(
mEmbeddedAssistant.mAudioOutputFormat.getSampleRate(),
mEmbeddedAssistant.mAudioOutputFormat.getChannelMask(),
mEmbeddedAssistant.mAudioOutputFormat.getEncoding());
// create new AudioRecord to workaround audio routing issues.
mEmbeddedAssistant.mAudioRecord = new AudioRecord.Builder()
.setAudioSource(AudioSource.VOICE_RECOGNITION)
.setAudioFormat(mEmbeddedAssistant.mAudioInputFormat)
.setBufferSizeInBytes(mEmbeddedAssistant.mAudioInputBufferSize)
.build();
if (mEmbeddedAssistant.mAudioInputDevice != null) {
boolean result = mEmbeddedAssistant.mAudioRecord.setPreferredDevice(
mEmbeddedAssistant.mAudioInputDevice);
if (!result) {
Log.e(TAG, "failed to set preferred input device");
}
}
return mEmbeddedAssistant;
}