本文整理汇总了Python中naoqi.ALProxy.setAudioExpression方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.setAudioExpression方法的具体用法?Python ALProxy.setAudioExpression怎么用?Python ALProxy.setAudioExpression使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类naoqi.ALProxy
的用法示例。
在下文中一共展示了ALProxy.setAudioExpression方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: NAOVoiceRec
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
class NAOVoiceRec(ALModule):
def __init__(self, id, ip, port, wordList, callBack, wordSpotting=True, visualExpression=True, audioExpression=False):
super(NAOVoiceRec, self).__init__(id)
self.id = id
self.wordCallBack = callBack;
#create the speech recognition proxy
self.speechRec = ALProxy("ALSpeechRecognition", ip, port)
#set the language
self.speechRec.setLanguage("English")
#load the vocabulary
self.speechRec.setVocabulary(wordList, wordSpotting)
self.speechRec.subscribe(id)
# configure expressions
self.speechRec.setVisualExpression(visualExpression)
self.speechRec.setAudioExpression(audioExpression)
#get the ALMemory Proxy and subscribe to the events
self.memProx = ALProxy("ALMemory")
self.memProx.subscribeToEvent("WordRecognized", self.id, "wordRecognized")
def __del__(self):
self.speechRec.unsubscribe(self.id)
self.memProx.unsubscribeToEvent("WordRecognized", self.id)
def wordRecognized(self, event, words, id):
self.wordCallBack(words)
示例2: __init__
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
def __init__(self, name):
ALModule.__init__(self, name)
self.tts = ALProxy("ALTextToSpeech", "138.110.234.37", 9559)
asr = ALProxy("ALSpeechRecognition", "138.110.234.37", 9559)
global memory
memory = ALProxy("ALMemory")
memory.subscribeToEvent("WordRecognized", "Test", "onWordRecognized")
asr.pause(True)
asr.setVocabulary(["cabbage", "hello", "punch"], True)
asr.setAudioExpression(True)
asr.setVisualExpression(True)
asr.pause(False)
示例3: __init__
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
def __init__(self, name):
ALModule.__init__(self, name)
# No need for IP and port here because
# we have our Python broker connected to NAOqi broker
# Create a proxy to ALTextToSpeech for later use
self.tts = ALProxy("ALTextToSpeech")
asr = ALProxy("ALSpeechRecognition")
asr.pause(True)
asr.setVocabulary(["point"], True)
asr.setAudioExpression(True)
asr.setVisualExpression(True)
asr.pause(False)
# Subscribe to the FaceDetected event:
global memory
memory = ALProxy("ALMemory")
memory.subscribeToEvent("WordRecognized", "Test", "onWordRecognized")
示例4: ALProxy
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
# -*- encoding: UTF-8 -*-
import sys
import time
import random
import motion
from naoqi import ALProxy
tts = ALProxy("ALTextToSpeech","nao.local",9559)
sr = ALProxy("ALSpeechRecognition","nao.local",9559)
memProxy = ALProxy("ALMemory","nao.local",9559)
motion = ALProxy("ALMotion","nao.local",9559)
sr.setVocabulary(["oui","non","un","deux","trois","quatre","cinq","six","sept","huit","neuf","dix","douze","quatorze","quinze","seize","dix-huit","vingt","vingt-et-un","vingt-quatre","vingt-cinq","vingt-sept","vingt-huit","trente","trente-deux","trente-cinq","trente-six","quarante","quarante-deux","quarante-cinq","quarante-huit","quarante-neuf","cinquante","cinquante-quatre","cinquante-six","soixante","soixante-trois","soixante-quatre","soixante-dix","soixante-douze","quatre-vingts","quatre-vingt-un","quatre-vingt-dix","cent"],True)
sr.setAudioExpression(True)
sr.setVisualExpression(True)
fini = False
x = "WordRecognized"
cpt = 3
def onWordRecognized(x,total):
"""Ici on regarde le mot reconnu si l'indice de fiabilité est supérieur à 0.5 """
retVal = memProxy.getData("WordRecognized")
print x, retVal[0], retVal[1]
print total
if(retVal[0] != "" and retVal[1]>0.5):
if(retVal[0] == "un" and total == 1):
return True
elif retVal[0] == "deux" and total == 2:
示例5: __init__
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
#.........这里部分代码省略.........
def headmove(self, angles,speed):
if self.headlock == True:
return
self.headlock == True
Id = self.motionProxy.post.setAngles(["HeadYaw", "HeadPitch"], angles, speed)
self.motionProxy.wait(Id, 0)
self.headlock == False
def search(self):
prevodom = self.headOdom
self.headmove([-0.8,self.headOdom[1]],0.1)
time.sleep(2)
if self.stop == False :
self.headmove([0.8, self.headOdom[1]],0.1)
time.sleep(2)
#return to original
if self.stop == False:
self.headmove(prevodom,0.1)
self.stop = False
def audio_callback(self,msg):
if self.speechini == False :
self.speechini = True
self.asr.pause(True)
self.vocabulary = ["yes", "no", "please", "hello","the","be","to","of","and","in","that","have","it","robot"]
self.asr.setVocabulary(self.vocabulary, True)
self.asr.setVisualExpression(True)
self.asr.setAudioExpression(True)
self.asr.subscribe("ASR")
self.asr.pause(False)
nodetectionCount = 0
# print msg.azimuth.data
while True:
time.sleep(0.8)
speech = self.memoryProxy.getData(self.memValue, 0)
voice = self.memoryProxy.getData('SpeechDetected', 0)
if (voice ==1 ):
if speech[1] > 0.1:
nodetectionCount =0
self.speechPub.publish(True)
return
else:
nodetectionCount +=1
if nodetectionCount >10:
self.asr.pause(True)
return
def navigate(self,x):
self.checkawake()
self.motionProxy.moveTo(x,0,0)
def rotate(self,z):
self.checkawake()
self.motionProxy.moveTo(0, 0, z)
示例6: SpellerModule
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
class SpellerModule(ALModule):
def __init__(self, name):
ALModule.__init__(self, name)
self.name = name
self.tts = ALProxy("ALTextToSpeech")
self.asr = ALProxy("ALSpeechRecognition")
self.memory = ALProxy("ALMemory")
# self.asr.setLanguage("English")
self.word = ""
self.letters = list(string.ascii_lowercase) + list(string.digits)
self.correctCommands = ["delete", "back"]
self.stopCommands = ["stop"]
self.capitalTag = "capital"
self.capitalLetters = map(lambda l: self.capitalTag + " " + l, self.letters)
self.capitalMode = False
def beginSpelling(self, callback=None, additionalStopCommand=None):
self.word = ""
self.capitalMode = False
self.asr.setAudioExpression(False)
if additionalStopCommand and additionalStopCommand not in self.stopCommands:
self.stopCommands.append(additionalStopCommand)
vocabulary = self.letters + \
self.correctCommands + \
self.stopCommands + \
self.capitalLetters
self.asr.setWordListAsVocabulary(vocabulary)
self.memory.subscribeToEvent("WordRecognized", self.name, "onWordRecognized")
self.callback = callback
def endSpelling(self):
self.memory.unsubscribeToEvent("WordRecognized", self.name)
if self.callback:
self.callback(self.word)
def sayLetter(self, letter):
if (self.isUppercase(letter)):
self.tts.say("capital")
self.tts.say(letter)
else:
self.tts.say(letter)
def saySpelling(self, word):
for letter in word:
self.sayLetter(letter)
time.sleep(0.1)
def onWordRecognized(self, eventName, value, subscriberIdentifier):
recognizedWord = value[0]
confidence = value[1]
difference = 9
print "word: %s confidence: %f" % (recognizedWord, confidence)
if len(value) > 3:
difference = confidence - value[3]
print "difference: %f %f" % (difference, difference - 0.01)
if (confidence < 0.5 or difference < 0.03):
return
if (recognizedWord in self.correctCommands):
self.capitalMode = False
deletedLetter = self.word[-1]
self.word = self.word[:-1]
self.tts.say("deleted " + deletedLetter)
elif (recognizedWord in self.stopCommands):
self.capitalMode = False
self.endSpelling()
elif (recognizedWord == self.capitalTag):
self.capitalMode = True
else:
if (self.capitalMode):
recognizedWord = string.upper(recognizedWord)
self.word += recognizedWord
self.capitalMode = False
self.memory.unsubscribeToEvent("WordRecognized", self.name)
self.sayLetter(recognizedWord)
self.memory.subscribeToEvent("WordRecognized", self.name, "onWordRecognized")
def isUppercase(self, word):
if word not in string.ascii_letters:
return False
else:
return word.upper() == word
def stop(self):
self.tts.say("You terminated me!")
self.memory.unsubscribeToEvent("WordRecognized", self.name)
示例7: SpeechRecognitionWrapper
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
class SpeechRecognitionWrapper(ALModule):
"""ROS wrapper for Naoqi speech recognition"""
def __init__(self, ip, port, publisher, config):
# Get a (unique) name for naoqi module which is based on the node name
# and is a valid Python identifier (will be useful later)
self.naoqi_name = Util.to_naoqi_name( rospy.get_name() )
#Start ALBroker (needed by ALModule)
self.broker = ALBroker(self.naoqi_name + "_broker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
ip, # parent broker IP
port ) # parent broker port
#Init superclass ALModule
ALModule.__init__( self, self.naoqi_name )
# Start naoqi proxies
self.memory = ALProxy("ALMemory")
self.proxy = ALProxy("ALSpeechRecognition")
#Keep publisher to send word recognized
self.pub = publisher
#Install global variables needed by Naoqi
self.install_naoqi_globals()
#Check no one else is subscribed to this event
subscribers = self.memory.getSubscribers(Constants.EVENT)
if subscribers:
rospy.logwarn("Speech recognition already in use by another node")
for module in subscribers:
self.stop(module)
# Configure this instance
self.reconfigure(config)
#And subscribe to the event raised by speech recognition
rospy.loginfo("Subscribing '{}' to NAO speech recognition".format(
self.naoqi_name) )
self.memory.subscribeToEvent(
Constants.EVENT,
self.naoqi_name,
self.on_word_recognised.func_name )
# Install global variables needed for Naoqi callbacks to work
def install_naoqi_globals(self):
globals()[self.naoqi_name] = self
globals()["memory"] = self.memory
def reconfigure(self, config):
self.proxy.setLanguage( config["language"] )
self.proxy.setAudioExpression( config["audio_expression"] )
self.proxy.setVisualExpression( config["visual_expression"] )
self.proxy.setVocabulary(
Util.parse_vocabulary( config["vocabulary"] ),
config["word_spotting"] )
def stop(self, module = None):
if module is None:
module = self.naoqi_name
rospy.loginfo("Unsubscribing '{}' from NAO speech recognition".format(
module))
try:
self.memory.unsubscribeToEvent( Constants.EVENT, module )
except RuntimeError:
rospy.logwarn("Could not unsubscribe from NAO speech recognition")
def on_word_recognised(self, key, value, subscriber_id ):
"""Publish the words recognized by NAO via ROS """
#Create dictionary, by grouping into tuples the list in value
temp_dict = dict( value[i:i+2] for i in range(0, len(value), 2) )
#Delete empty string from dictionary
if '' in temp_dict:
del(temp_dict[''])
self.pub.publish(WordRecognized( temp_dict.keys(), temp_dict.values() ))
示例8: PhotographerModule
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
class PhotographerModule(ALModule):
def __init__(self, name):
ALModule.__init__(self, name)
self.name = name
self.tts = ALProxy("ALTextToSpeech")
self.asr = ALProxy("ALSpeechRecognition")
self.memory = ALProxy("ALMemory")
self.posture = ALProxy("ALRobotPosture")
self.motion = ALProxy("ALMotion")
self.minimumPeople = 1
self.numbers = ["1", "2", "3", "4", "5", "6", "7", "8", "9"]
self.many = ["many", "lot", "all", "lots", "everybody"]
self.wordRecognizedCallback = None
self.wordRecognizedMinimumConfidence = 0.0
self.lastPhoto = False
self.pickedUp = False
self.placed = False
self.framing = ""
def start(self):
self.posture.goToPosture("StandInit", 0.5)
self.waitForPhotoWord()
def stop(self):
try:
self.memory.unsubscribeToEvent("WordRecognized", self.name)
except RuntimeError:
pass
self.tts.say("You terminated me!")
self.posture.goToPosture("Sit", 0.5)
self.motion.stiffnessInterpolation("Body", 0.0, 1.0)
def waitForWords(self, triggerWords, callback, minimumConfidence=0.5):
try:
self.memory.unsubscribeToEvent("WordRecognized", self.name)
except:
pass
self.asr.setAudioExpression(False)
self.asr.setVocabulary(triggerWords, True)
self.wordRecognizedCallback = callback
self.wordRecognizedMinimumConfidence = minimumConfidence
self.memory.subscribeToEvent("WordRecognized", self.name, "onWordRecognized")
def onWordRecognized(self, eventName, value, subscriberIdentifier):
recognizedWord = value[0]
confidence = value[1]
print "onWordRecognized: '%s' with confidence %f" % (recognizedWord, confidence)
if confidence < 0.5:
return
else:
self.memory.unsubscribeToEvent("WordRecognized", self.name)
self.wordRecognizedCallback(recognizedWord)
def waitForPhotoWord(self):
triggers = ["photo", "photograph", "picture", "shot", "portrait"]
self.waitForWords(triggers, self.onPhotoWordRecognized, 0.3)
def onPhotoWordRecognized(self, recognizedWord):
print "onPhotoWordRecognized"
self.tts.say("Of course!")
HeadMove.start(self.askPeopleNumber)
def askPeopleNumber(self):
print "askPeopleNumber"
self.tts.say("How many people will be on the photo?")
self.waitForWords(self.numbers + self.many, self.onPeopleNumberWordRecognized, 0.5)
def onPeopleNumberWordRecognized(self, recognizedWord):
print "onPhotoWordRecognized"
reactions = None
if (recognizedWord in self.many):
self.minimumPeople = 2
else:
self.minimumPeople = int(recognizedWord)
if self.minimumPeople == 1:
reactions = ["It is just you and me then!",
"I love taking photos of you!",
"Let's make a very personal photo."]
elif self.minimumPeople == 2:
reactions = ["You make such a cute couple!",
"Let's capture this moment."]
elif self.minimumPeople == 3:
reactions = ["It is going to be an incredible trio!",
"Go team!"]
if not reactions:
reactions = ["Great! I love group shots!",
"That's one big family!",
"Let's see if we can fit that many!"]
reaction = random.choice(reactions)
self.tts.say(reaction)
questions = ["Where do you want to take the picture?",
"Would you like to do it here?"]
self.tts.say(random.choice(questions))
self.decideOnNextPhoto()
#.........这里部分代码省略.........
示例9: AudioRecognitionModule
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setAudioExpression [as 别名]
class AudioRecognitionModule(ALModule):
tts = None
mot = "Rien"
cs = 0
asr = None
Redballactif = False
def __init__(self, name):
global Redball
ALModule.__init__(self, name)
self.tts = ALProxy("ALTextToSpeech")
def disconnect(self, *_args):
global asr
global memory
if (self.asr != None):
self.asr.unsubscribe("ALSpeech")
self.asr = None
memory.unsubscribeToEvent("WordRecognized","AudioRecognition")
memory = None
self.tts = None
def connect(self, *_args):
if self.asr != None:
self.disconnect(self)
self.tts.setLanguage("French")
self.tts.say("Bon, Que la fête commence")
# Connecting to the Speech recognition module
self.asr = ALProxy("ALSpeechRecognition",NAO_IP,NAO_PORT)
# Set the language of recognition to French
self.asr.setLanguage("French")
# Enable to make a bip is played at the beginning of the recognition process,
# and another bip is played at the end of the process.
self.asr.setAudioExpression(True)
# The words that have to be recognised by the robot
wordList=["On ne joue plus","Suis la balle","Attrape","Dis bonjour Naomie"]
# We update the vocabulary list
# Warning : will crash if the ASR engine is still running
self.asr.setVocabulary(wordList,False)
# Says the word that can be recognised
self.tts.say("Les actions pouvant etre reconnus sont")
for index in range(0,len(wordList)):
self.tts.say(wordList[index])
# Subscribe to the Wordrecognised event
self.asr.subscribe("ALSpeech")
# Subscribe to the Wordrecognised event:
global memory
memory = ALProxy("ALMemory")
memory.subscribeToEvent("WordRecognized",
"AudioRecognition",
"onWordRecognised")
def onWordRecognised(self, *_args):
""" This will be called each time a word is recognised.
"""
# Unsubscribe to the event when talking,
# to avoid repetitions
memory.unsubscribeToEvent("WordRecognized","AudioRecognition")
# We access to the word recognised in the memory
word = memory.getData("WordRecognized")
# Debug : Print the word recognised
print("Mot :")
print(word[0])
print("Indice de confiance :")
print(word[1])
print
# We acknoledge a word if the trust is high enough
if (word[1] > 0.28):
self.mot = word[0]
#self.tts.say("Le mot reconnu est :"+self.mot)
StateManager(self)
# Subscribe again to the event
memory.subscribeToEvent("WordRecognized",
"AudioRecognition",
#.........这里部分代码省略.........