本文整理汇总了Java中com.google.assistant.embedded.v1alpha1.ConverseResponse类的典型用法代码示例。如果您正苦于以下问题:Java ConverseResponse类的具体用法?Java ConverseResponse怎么用?Java ConverseResponse使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ConverseResponse类属于com.google.assistant.embedded.v1alpha1包,在下文中一共展示了ConverseResponse类的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: onNext
import com.google.assistant.embedded.v1alpha1.ConverseResponse; //导入依赖的package包/类
@Override
public void onNext(ConverseResponse value) {
switch (value.getConverseResponseCase()) {
case EVENT_TYPE:
Log.d(TAG, "converse response event: " + value.getEventType());
break;
case RESULT:
final String spokenRequestText = value.getResult().getSpokenRequestText();
if (!spokenRequestText.isEmpty()) {
Log.i(TAG, "assistant request text: " + spokenRequestText);
mMainHandler.post(() -> mAssistantRequestsAdapter.add(spokenRequestText));
}
break;
case AUDIO_OUT:
final ByteBuffer audioData =
ByteBuffer.wrap(value.getAudioOut().getAudioData().toByteArray());
Log.d(TAG, "converse audio size: " + audioData.remaining());
mAssistantResponses.add(audioData);
if (mLed != null) {
try {
mLed.setValue(!mLed.getValue());
} catch (IOException e) {
Log.w(TAG, "error toggling LED:", e);
}
}
break;
case ERROR:
Log.e(TAG, "converse response error: " + value.getError());
break;
}
}
示例2: onNext
import com.google.assistant.embedded.v1alpha1.ConverseResponse; //导入依赖的package包/类
@Override
public void onNext(ConverseResponse value) {
switch (value.getConverseResponseCase()) {
case EVENT_TYPE:
Log.d(TAG, "converse response event: " + value.getEventType());
break;
case RESULT:
final String spokenRequestText = value.getResult().getSpokenRequestText();
if (value.getResult().getVolumePercentage() != 0) {
mVolumePercentage = value.getResult().getVolumePercentage();
Log.i(TAG, "assistant volume changed: " + mVolumePercentage);
mAudioTrack.setVolume(AudioTrack.getMaxVolume() *
mVolumePercentage / 100.0f);
}
if (!spokenRequestText.isEmpty()) {
Log.i(TAG, "assistant request text: " + spokenRequestText);
mMainHandler.post(() -> mAssistantRequestsAdapter.add(spokenRequestText));
}
break;
case AUDIO_OUT:
final ByteBuffer audioData =
ByteBuffer.wrap(value.getAudioOut().getAudioData().toByteArray());
Log.d(TAG, "converse audio size: " + audioData.remaining());
mAssistantResponses.add(audioData);
if (mLed != null) {
try {
mLed.setValue(!mLed.getValue());
} catch (IOException e) {
Log.w(TAG, "error toggling LED:", e);
}
}
break;
case ERROR:
Log.e(TAG, "converse response error: " + value.getError());
break;
}
}
示例3: onNext
import com.google.assistant.embedded.v1alpha1.ConverseResponse; //导入依赖的package包/类
@Override
public void onNext(ConverseResponse value) {
switch (value.getConverseResponseCase()) {
case EVENT_TYPE:
Log.d(TAG, "converse response event: " + value.getEventType());
break;
case RESULT:
final String spokenRequestText = value.getResult().getSpokenRequestText();
if (value.getResult().getVolumePercentage() != 0) {
mVolumePercentage = value.getResult().getVolumePercentage();
Log.i(TAG, "assistant volume changed: " + mVolumePercentage);
mAudioTrack.setVolume(AudioTrack.getMaxVolume() *
mVolumePercentage / 100.0f);
}
mConversationState = value.getResult().getConversationState();
if (!spokenRequestText.isEmpty()) {
Log.i(TAG, "assistant request text: " + spokenRequestText);
mMainHandler.post(() -> mAssistantRequestsAdapter.add(spokenRequestText));
}
break;
case AUDIO_OUT:
final ByteBuffer audioData =
ByteBuffer.wrap(value.getAudioOut().getAudioData().toByteArray());
Log.d(TAG, "converse audio size: " + audioData.remaining());
mAssistantResponses.add(audioData);
if (mLed != null) {
try {
mLed.setValue(!mLed.getValue());
} catch (IOException e) {
Log.w(TAG, "error toggling LED:", e);
}
}
break;
case ERROR:
Log.e(TAG, "converse response error: " + value.getError());
break;
}
}
示例4: onNext
import com.google.assistant.embedded.v1alpha1.ConverseResponse; //导入依赖的package包/类
@Override
public void onNext(final ConverseResponse value) {
switch (value.getConverseResponseCase()) {
case EVENT_TYPE:
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onConversationEvent(value.getEventType());
}
});
break;
case RESULT:
// Update state.
mConversationState = value.getResult().getConversationState();
// Update volume.
if (value.getResult().getVolumePercentage() != 0) {
final int volumePercentage = value.getResult().getVolumePercentage();
mVolume = volumePercentage;
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onVolumeChanged(volumePercentage);
}
});
}
if (value.getResult().getSpokenRequestText() != null &&
!value.getResult().getSpokenRequestText().isEmpty()) {
mRequestHandler.post(new Runnable() {
@Override
public void run() {
mRequestCallback.onSpeechRecognition(value.getResult()
.getSpokenRequestText());
}
});
}
// Update microphone mode.
mMicrophoneMode = value.getResult().getMicrophoneMode();
break;
case AUDIO_OUT:
if (mAudioOutSize <= value.getAudioOut().getSerializedSize()) {
mAudioOutSize = value.getAudioOut().getSerializedSize();
} else {
mAudioOutSize = 0;
onCompleted();
}
final ByteBuffer audioData =
ByteBuffer.wrap(value.getAudioOut().getAudioData().toByteArray());
mAssistantResponses.add(audioData);
break;
case ERROR:
mConversationHandler.post(new Runnable() {
@Override
public void run() {
mConversationCallback.onConversationError(value.getError());
}
});
break;
}
}
示例5: onNext
import com.google.assistant.embedded.v1alpha1.ConverseResponse; //导入依赖的package包/类
@Override
public void onNext(ConverseResponse value) {
switch (value.getConverseResponseCase()) {
case EVENT_TYPE:
// Log.d(TAG, "converse response event: " + value.getEventType());
//playAudioSong=false;
break;
case RESULT:
final String spokenRequestText = value.getResult().getSpokenRequestText();
final String spokenResponseText= value.getResult().getSpokenResponseText();
vConversationState = value.getResult().getConversationState();
if (!spokenRequestText.isEmpty()) {
Log.i(TAG, "assistant request text: " + spokenRequestText);
for (Listener listener : mListeners) {
listener.onSpeechRecognized(spokenRequestText, true);
}
}
if (value.getResult().getVolumePercentage() != 0) {
mVolumePercentage = value.getResult().getVolumePercentage();
Log.i(TAG, "assistant volume changed: " + mVolumePercentage);
float newVolume = mAudioTrack.getMaxVolume() * mVolumePercentage / 100.0f;
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
mAudioTrack.setVolume(newVolume);
}
}
if (!spokenResponseText.isEmpty()) {
Log.i(TAG, "assistant response text: " + spokenResponseText);
/*for (Listener listener : mListeners) {
listener.onSpeechResponsed(spokenResponseText, false);
}*/
}
break;
case AUDIO_OUT:
byte[] data = value.getAudioOut().getAudioData().toByteArray();
final ByteBuffer audioData = ByteBuffer.wrap(data);
//Log.d(TAG, "converse audio size: " + audioData.remaining());
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP) {
mAudioTrack.write(audioData, audioData.remaining(), AudioTrack.WRITE_BLOCKING);
}
else
{
mAudioTrack.write(data,0,data.length);
}
break;
case ERROR:
Log.e(TAG, "converse response error: " + value.getError());
break;
case CONVERSERESPONSE_NOT_SET:
// Log.d(TAG, "CONVERSERESPONSE_NOT_SET"+value.getEventType());
break;
}
}