本文整理汇总了Python中naoqi.ALProxy.subscribe方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.subscribe方法的具体用法?Python ALProxy.subscribe怎么用?Python ALProxy.subscribe使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类naoqi.ALProxy
的用法示例。
在下文中一共展示了ALProxy.subscribe方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
def init(IP,PORT):
global motionProxy
global tts
global post
global sonarProxy
global memoryProxy
global cameraProxy
global videoClient
post = ALProxy("ALRobotPosture", IP, PORT)
tts = ALProxy("ALTextToSpeech", IP, PORT)
motionProxy = ALProxy("ALMotion", IP, PORT)
cameraProxy = ALProxy("ALVideoDevice", IP, PORT)
# init video
resolution = 0 # 0 : QQVGA, 1 : QVGA, 2 : VGA
colorSpace = 11 # RGB
camNum = 0 # 0:top cam, 1: bottom cam
fps = 1; # frame Per Second
cameraProxy.setParam(18, camNum)
try:
videoClient = cameraProxy.subscribe("python_client",
resolution, colorSpace, fps)
except:
cameraProxy.unsubscribe("python_client")
videoClient = cameraProxy.subscribe("python_client",
resolution, colorSpace, fps)
print "Start videoClient: ",videoClient
sonarProxy = ALProxy("ALSonar", IP, PORT)
sonarProxy.subscribe("myApplication")
memoryProxy = ALProxy("ALMemory", IP, PORT)
post.goToPosture("Crouch", 1.0)
time.sleep(2)
示例2: SpeechDetectionModule
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
class SpeechDetectionModule(ALModule):
""" A module that handles NAO recognition app commands. """
def __init__(self, name):
ALModule.__init__(self, name)
self.name = name
self.memory = ALProxy("ALMemory")
self.asr = ALProxy("ALSpeechRecognition")
self.asr.setLanguage("English")
vocabulary = ["color", "text", "gesture", "phone"]
self.asr.setVocabulary(vocabulary, False)
self.asr.subscribe(self.getName())
self.memory.subscribeToEvent("WordRecognized", self.getName(), "onWordRecognized")
def onWordRecognized(self, key, value, message):
""" A method that handles command recognition. """
global NaoWorkingMode
if(len(value) > 1 and value[1] >= 0.5):
print 'recognized the word :', value[0]
NaoWorkingMode = value[0]
else:
print 'unsifficient threshold'
NaoWorkingMode = None
def disconnect(self):
try:
self.memory.unsubscribeToEvent("WordRecognized", self.getName())
self.asr.unsubscribe(self.getName())
except BaseException, err:
print "Error while disconnecting from speech module: " + str(err)
示例3: naoSensors
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
class naoSensors():
motionproxy = None
headnames = ['HeadYaw', 'HeadPitch']
armnames = ['RShoulderRoll', 'RShoulderPitch', 'RElbowYaw', 'RElbowRoll']
redballproxy = None
memproxy = None
openHand = True
nao = None
def connect(self,nao):
self.nao = nao
self.motionproxy = nao.motionproxy
# exception needed needed to be able to use this code with webots, which does not support redballdetection
# TODO: add class for own balldetection, that will be loaded when using naoqi redballdetection fails
# .. use opencv, see old localization module
# ! IMPORTANT, methods of second implementation MUST be the same as in redballdetection, so this needs no changes at other places!
try:
self.redballproxy = ALProxy("ALRedBallDetection", nao.ip, nao.port)
self.memproxy = ALProxy("ALMemory",nao.ip,nao.port)
# this might not be the best way to start the naoqi redballdetection
# .. after subscribing the nao head processor searches and calculates the
# .. ballposition over and over again, <- and very likely will get hot
self.redballproxy.subscribe("detector")
except RuntimeError,e:
print e
示例4: VideoModule
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
class VideoModule():
def __init__(self, resolution=2, colorSpace=11, fps=5):
self.vd = ALProxy('ALVideoDevice')
modules.append(self)
self.vd.subscribe('videoModule', resolution, colorSpace, fps)
def getImage(self):
results = self.vd.getImageRemote('videoModule')
image = NaoImage()
image.width = results[0]
image.height = results[1]
image.layersNumber = results[2]
image.colorSpace = results[3]
image.timestamp = results[4]
image.microtimestamp = results[5]
image.pixels = np.frombuffer(
results[6], dtype=np.uint8).reshape((image.height, image.width, 3))
image.cameraID = results[7]
image.leftAngle = results[8]
image.rightAngle = results[9]
image.topAngle = results[10]
image.bottomAngle = results[11]
return image
def startRecord(self, filename):
self.vd.recordVideo('videoModule', 100, 1)
def stopRecord(self, filename):
self.vd.stopVideo('videoModule')
def close(self):
self.vd.unsubscribe('videoModule')
示例5: getImage
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
def getImage(self):
"""main method, wait until qr code is found."""
period = 1000
qrCodeProxy = ALProxy("ALBarcodeReader", self.NAO_IP, self.NAO_PORT)
qrCodeProxy.subscribe("Testh_qr", period, 0.0)
detected = False
i = 0
while detected is False:
time.sleep(0.5)
val = self.memory.getData("BarcodeReader/BarcodeDetected")
print val
if val is not None:
if len(val) >= 1:
detected = True
todo = val[0][0]
ac = todo.split(" ", 1)
if len(ac) > 1:
action = self.nao.getAction().get(str(ac[0]))
action(str(ac[1]))
self.memory.insertData("BarcodeReader/BarcodeDetected", "")
else:
action = self.nao.getAction().get(todo)
action()
self.memory.insertData("BarcodeReader/BarcodeDetected", "")
i += 1
if i is 30:
detected = True
示例6: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
def main(robotIp,robotPort):
sonarProxy = ALProxy("ALSonar",robotIp,robotPort)
sonarProxy.subscribe("Sensores")
memoryProxy = ALProxy("ALMemory",robotIp,robotPort)
while (1):
left=memoryProxy.getData("Device/SubDeviceList/US/Left/Sensor/Value")
right=memoryProxy.getData("Device/SubDeviceList/US/Right/Sensor/Value")
val=[left,right]
print val
if left<0.3 and right<0.3:
if left<right:
talker("ObsL")
else:
talker("ObsR")
time.sleep(7)
continue
if left<0.3:
talker("ObsL")
time.sleep(7)
continue
if right<0.3:
talker("ObsR")
time.sleep(7)
continue
sonarProxy.unsubscribe("Sensores")
示例7: NAOVoiceRec
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
class NAOVoiceRec(ALModule):
def __init__(self, id, ip, port, wordList, callBack, wordSpotting=True, visualExpression=True, audioExpression=False):
super(NAOVoiceRec, self).__init__(id)
self.id = id
self.wordCallBack = callBack;
#create the speech recognition proxy
self.speechRec = ALProxy("ALSpeechRecognition", ip, port)
#set the language
self.speechRec.setLanguage("English")
#load the vocabulary
self.speechRec.setVocabulary(wordList, wordSpotting)
self.speechRec.subscribe(id)
# configure expressions
self.speechRec.setVisualExpression(visualExpression)
self.speechRec.setAudioExpression(audioExpression)
#get the ALMemory Proxy and subscribe to the events
self.memProx = ALProxy("ALMemory")
self.memProx.subscribeToEvent("WordRecognized", self.id, "wordRecognized")
def __del__(self):
self.speechRec.unsubscribe(self.id)
self.memProx.unsubscribeToEvent("WordRecognized", self.id)
def wordRecognized(self, event, words, id):
self.wordCallBack(words)
示例8: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
def main():
""" Main entry point
"""
parser = OptionParser()
parser.add_option("--pip",
help="Parent broker port. The IP address or your robot",
dest="pip")
parser.add_option("--pport",
help="Parent broker port. The port NAOqi is listening to",
dest="pport",
type="int")
parser.set_defaults(
pip="10.0.1.3",
pport=9559)
(opts, args_) = parser.parse_args()
pip = opts.pip
pport = opts.pport
# We need this broker to be able to construct
# NAOqi modules and subscribe to other modules
# The broker must stay alive until the program exists
myBroker = ALBroker("myBroker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
pip, # parent broker IP
pport) # parent broker port
asr = ALProxy("ALSpeechRecognition", "10.0.1.3", 9559)
asr.setLanguage("English")
# Example: Adds "yes", "no" and "please" to the vocabulary (without wordspotting)
vocabulary = ["yes", "no", "please"]
asr.setVocabulary(vocabulary, False)
# Start the speech recognition engine with user Test_ASR
asr.subscribe("Test_ASR")
# Warning: HumanGreeter must be a global variable
# The name given to the constructor must be the name of the
# variable
global SpeechDetector
SpeechDetector = SpeechDetectorModule("SpeechDetector")
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print
print "Interrupted by user, shutting down"
asr.unsubscribe("Test_ASR")
myBroker.shutdown()
sys.exit(0)
示例9: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
def main(robot_ip, robot_port, topf_path):
# creates the speech recognition proxy and a csv file
createSpeechRecogProxy(robot_ip, robot_port)
createFile(csvf)
# creates dialog and posture proxies
dialog_p = ALProxy('ALDialog', robot_ip, robot_port)
postureProxy = ALProxy("ALRobotPosture", robot_ip, robot_port)
dialog_p.setLanguage("English")
postureProxy.goToPosture("StandInit", 0.5) # brings robot to standing pos.
# Load topic - absolute path is required
# TOPIC MUST BE ON ROBOT
topic = dialog_p.loadTopic(topf_path)
# Start dialog
dialog_p.subscribe('myModule')
# Activate dialog
dialog_p.activateTopic(topic)
# create broker
myBroker = ALBroker("myBroker", "0.0.0.0",
0, robot_ip, robot_port)
# creates a module called "Move"
global Move
Move = TestModule("Move")
# pressing key will unsubscribe from the topic
raw_input(u"Press 'Enter to exit.")
asr.unsubscribe("Test_ASR")
# until interrupted, keep broker running
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print
print "Interrupted by user, shutting down"
myBroker.shutdown()
# Deactivate topic
dialog_p.deactivateTopic(topic)
# Unload topic
dialog_p.unloadTopic(topic)
# Stop dialog
dialog_p.unsubscribe('myModule')
# close file
f.close()
# exit
sys.exit(0)
示例10: Reward
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
class Reward(ALModule):
value = 0
event_received = False
def __init__(self, _name):
self.name = _name
ALModule.__init__(self, _name)
self.memory = ALProxy("ALMemory")
self.speechRecognizer = ALProxy("ALSpeechRecognition")
for subscriber in self.speechRecognizer.getSubscribersInfo():
self.speechRecognizer.unsubscribe(subscriber[0])
vocabulary=["bravo"]
self.speechRecognizer.setVocabulary(vocabulary, False)
def subscribe_to_events(self):
self.memory.subscribeToEvent( "FrontTactilTouched", self.name, "onFrontTactilTouched" )
self.memory.subscribeToEvent( "RearTactilTouched", self.name, "onRearTactilTouched" )
self.speechRecognizer.subscribe("success_event")
def unsubscribe_to_events(self):
self.memory.unsubscribeToEvent("FrontTactilTouched", self.name)
self.memory.unsubscribeToEvent("RearTactilTouched", self.name)
self.speechRecognizer.unsubscribe("success_event")
def reset(self):
self.value = 0
self.event_received = False
self.unsubscribe_to_events()
def positiveReward(self):
self.value = 1
self.event_received = True
def negativeReward(self):
self.value = -1
self.event_received = True
def successReward(self):
self.value = 10
self.event_received = True
def onFrontTactilTouched(self, *_args):
"""
Callback method for FrontTactilTouched event
"""
if not(self.event_received):
self.positiveReward()
def onRearTactilTouched(self, *_args):
"""
Callback method for RearTactilTouched event
"""
if not(self.event_received):
self.negativeReward()
示例11: start
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
def start(self):
audio = ALProxy("ALAudioDevice",self.naoIP,self.naoPort)
# all = 0, left = 1, right = 2, front = 3, rear = 4
deinterleave = 0
sampleRate = 16000
if self.chanFlag==0:
sampleRate = 48000
audio.setClientPreferences(self.getName(),sampleRate,self.chanFlag,deinterleave)
audio.subscribe(self.getName())
print "INF: AudioReceiver: started!"
示例12: ConversationModule
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
class ConversationModule(ALModule):
def __init__(self, name):
ALModule.__init__(self, name)
self.tts = ALProxy("ALTextToSpeech")
self.ears = ALProxy("ALSpeechRecognition")
self.memory = ALProxy("ALMemory")
self.log = ALProxy("ALLogger")
self.ears.subscribe("Conversation")
self.memory.subscribeToEvent("WordRecognized", "Conversation", "onWordRecognized")
def onWordRecognized(self, key, value, message):
"""
Subscribe to change in mood
:param key: memory key
:param value: memory value
:param message: message of the event
:return:
"""
self.ears.unsubscribe("Conversation")
self.tts.say("I recognized %s" % value)
self.ears.subscribe("Conversation")
pass
def setMood(self, value):
"""
Sets the current mood felt by the robot
:param value: Mood value 1=good, 0=neutral, -1=bad
:return:
"""
self.__previousMood = self.__mood
self.__mood = Mood(value)
self.memory.raiseEvent("Brain/Mood/Text", self.__mood.text)
self.memory.raiseEvent("Brain/Mood/Value", self.__mood.value)
pass
def getMood(self):
"""
Gets the current mood
:return:
"""
return self.__mood
def getPreviousMood(self):
"""
Gets the previous mood
:return:
"""
return self.__previousMood
示例13: createSpeechRecogProxy
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
def createSpeechRecogProxy(robot_ip, robot_port):
# creates the speech recognition proxy
global asr
asr = ALProxy("ALSpeechRecognition", robot_ip, robot_port)
asr.setLanguage("English")
asr.pause(True)
# adds vocabulary, sets it, and enables word spotting
vocabulary = ["what is the slope of the line from"]
asr.setVocabulary(vocabulary, True)
asr.pause(False)
asr.subscribe("Test_ASR")
示例14: SoundReceiverModule
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
class SoundReceiverModule(ALModule):
"""
A module to record speech while a person is talking
"""
def __init__(self, name):
ALModule.__init__(self, name)
self.audio = ALProxy("ALAudioDevice")
self.data = [[]] * 4
self.count = 999999
def start_processing(self):
"""
Subscribe to the audio stream and begin processing
"""
channel_flag = 3 # ALL_Channels: 0, AL::LEFTCHANNEL: 1, AL::RIGHTCHANNEL: 2 AL::FRONTCHANNEL: 3 or AL::REARCHANNEL: 4.
deinterleave = 0
sample_rate = 16000
self.data = [[]] * 4
self.audio.setClientPreferences(self.getName(), sample_rate, channel_flag, deinterleave )
self.audio.subscribe(self.getName())
def stop_processing(self):
"""
Unsubscribe from the audio stream and end processing
Returns raw audio data
"""
self.audio.unsubscribe(self.getName())
return self.data[0]
def processRemote(self, num_channels, num_samples, timestamp, buffer):
"""
This is THE method that receives all the sound buffers from the "ALAudioDevice" module
"""
data = np.reshape(np.fromstring(str(buffer), dtype=np.int16), (num_channels, num_samples), 'F')
for i in range(num_channels):
self.data[i].append(data[i].tolist())
peak_value = np.max(self.data)
if peak_value > 7500:
self.count = 30
self.count -= 1
if self.count == 0:
self.check = True
示例15: getFaceData
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribe [as 别名]
def getFaceData(IP, PORT):
#make proxy to face detection
faceProxy = ALProxy("ALFaceDetection", IP, PORT)
#Make proxy to memory
memoryProxy = ALProxy("ALMemory", IP, PORT)
faceProxy.subscribe("GetFaceData")
faceData = memoryProxy.getData("FaceDetected")
faceProxy.unsubscribe("GetFaceData")
return faceData