本文整理汇总了Python中naoqi.ALProxy类的典型用法代码示例。如果您正苦于以下问题:Python ALProxy类的具体用法?Python ALProxy怎么用?Python ALProxy使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ALProxy类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: TactileHeadModule
class TactileHeadModule(ALModule):
AudioModule = None
def __init__(self, name, audiomodule):
ALModule.__init__(self, name)
self.AudioModule = audiomodule
# Create a proxy to ALTextToSpeech for later use
self.tts = ALProxy("ALTextToSpeech")
# Subscribe to TouchChanged event:
global memory
memory = ALProxy("ALMemory")
memory.subscribeToEvent("MiddleTactilTouched",
"ReactToTouch",
"onTouched")
def onTouched(self, strVarName, value):
""" This will be called each time a touch
is detected.
"""
# Unsubscribe to the event when talking,
# to avoid repetitions
memory.unsubscribeToEvent("MiddleTactilTouched",
"ReactToTouch")
self.tts.say("D'accord, on arrête de jouer")
self.AudioModule.cs = 0
# Subscribe again to the event
memory.subscribeToEvent("MiddleTactilTouched",
"ReactToTouch",
"onTouched")
示例2: start_sound_track
def start_sound_track(self, msg):
self.__proxyTTS = ALProxy("ALAnimatedSpeech", self.__ip, self.__port)
# set the local configuration
sayconfig = {"bodyLanguageMode":"contextual"}
self.__proxyTTS.say("Can you help me find you by clapping your hand?", sayconfig)
self.__proxyMotion = ALProxy("ALMotion", self.__ip, self.__port)
#initialise microphone
#self.__audioProxy = ALProxy("ALAudioDevice", self.__ip, self.__port)
#initialise soundsourcelocalisation
self.__sslProxy = ALProxy("ALSoundLocalization", self.__ip, self.__port)
#initialise almemory
self.__memoryProxy = ALProxy("ALMemory", self.__ip, self.__port)
#debugging purpose
#self.__audioProxy.setClientPreferences( self.getName() , 16000, 3, 0 )
#self.__audioProxy.subscribe(self.getName())
#configure sound detection
self.__sslProxy.setParameter("Sensitivity",0.1)
#callback from memory
try:
self.__memoryProxy.unsubscribeToEvent("ALSoundLocalization/SoundLocated","soundtracking")
except:
pass
self.__sslProxy.subscribe("sound_source_locator")
self.__memoryProxy.subscribeToMicroEvent(
"ALSoundLocalization/SoundLocated",
self.getName(),
"AnotherUserDataToIdentifyEvent",
"sound_callback")
示例3: main
def main(robotIP, PORT=9559):
motionProxy = ALProxy("ALMotion", robotIP, PORT)
# Example showing how to get the robot config
robotConfig = motionProxy.getRobotConfig()
for i in range(len(robotConfig[0])):
print robotConfig[0][i], ": ", robotConfig[1][i]
示例4: showNaoImage
def showNaoImage(IP, PORT):
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # VGA
colorSpace = 11 # RGB
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = camProxy.getImageRemote(videoClient)
camProxy.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
# Save the image.
im.save("../public/imgNao/live.jpeg", "JPEG")
示例5: getImage
def getImage(self):
"""main method, wait until qr code is found."""
period = 1000
qrCodeProxy = ALProxy("ALBarcodeReader", self.NAO_IP, self.NAO_PORT)
qrCodeProxy.subscribe("Testh_qr", period, 0.0)
detected = False
i = 0
while detected is False:
time.sleep(0.5)
val = self.memory.getData("BarcodeReader/BarcodeDetected")
print val
if val is not None:
if len(val) >= 1:
detected = True
todo = val[0][0]
ac = todo.split(" ", 1)
if len(ac) > 1:
action = self.nao.getAction().get(str(ac[0]))
action(str(ac[1]))
self.memory.insertData("BarcodeReader/BarcodeDetected", "")
else:
action = self.nao.getAction().get(todo)
action()
self.memory.insertData("BarcodeReader/BarcodeDetected", "")
i += 1
if i is 30:
detected = True
示例6: powerOff
def powerOff (self):
tts = ALProxy("ALTextToSpeech", 'nao.local', 9559)
tts.say("即将执行关机操作!")
command1 = 'sudo shutdown -h now'
os.system(command1)
command2 = 'root\r' # here is tha default password of root user
os.system(command2)
示例7: main
def main():
""" Parse command line arguments,
run recordData and write the results
into a csv file
"""
if len(sys.argv) < 2:
nao_ip = ROBOT_IP
else:
nao_ip = sys.argv[1]
motion = ALProxy("ALMotion", nao_ip, 9559)
# Set stiffness on for Head motors
motion.setStiffnesses("Head", 1.0)
# Will go to 1.0 then 0 radian
# in two seconds
motion.post.angleInterpolation(
["HeadYaw"],
[1.0, 0.0],
[1 , 2],
False
)
data = recordData(nao_ip)
# Gently set stiff off for Head motors
motion.setStiffnesses("Head", 0.0)
output = os.path.abspath("record.csv")
with open(output, "w") as fp:
for line in data:
fp.write("; ".join(str(x) for x in line))
fp.write("\n")
print "Results written to", output
示例8: update_battery
def update_battery(self):
if (self.ip.get()) == "Disconnected":
self.battery_status.set("0 %")
else :
try:
## import naoqi library and creates all the relevant modules that will be used
from naoqi import ALProxy
self.memory = ALProxy("ALMemory", self.ip.get() , 9559)
self.memory.ping()
self.battery = ALProxy("ALBattery", self.ip.get() , 9559)
self.tts = ALProxy("ALTextToSpeech", self.ip.get() , 9559)
self.motion = ALProxy("ALMotion" , self.ip.get() , 9559)
self.posture = ALProxy("ALRobotPosture", self.ip.get(), 9559)
#Sets the status of the battery to the existent value
self.battery_status.set(str(self.battery.getBatteryCharge()) + " %")
# Thread that updates the battery status after specific period of time
#threading.Timer(5.0, self.update_battery).start()
threading.Timer(15.0, self.update_battery).start()
except BaseException:
self.ip.set("Disconnected")
return
示例9: __init__
def __init__(self, name): #contructor of the class, which takes two parameters, self refers to the instance of the class and the name parameter which is just a string
ALModule.__init__(self, name) #calling of the contructpor of the ALModule
self.tts = ALProxy("ALTextToSpeech", ip, 9559) #proxy creation on the tts module
self.asr = ALProxy("ALSpeechRecognition", ip, 9559) #proxy creation on the asr module
self.memory = ALProxy("ALMemory", ip, 9559) #proxy creation on the memory module
self.num1 = random.randint(1, 10) #here are two integers randomly selected from 1 to 10
self.num2 = random.randint(1, 10)
self.operator = random.choice("-") #here is randomly choosen operator which is then applied to the equation
self.tts.setLanguage("English") #set the the language which NAO uses for talking
if self.operator == "-": #NAO was programmed to create equations which have a positive result
if self.num1 > self.num2: #the numbers are compared in order to asure that the larger number is first
self.result = str(eval(str(self.num1) + self.operator + str(self.num2))) #the result is evaluated and put into a string so NOA can say it
self.operator = " minus " #and so is the operator
self.question = "What is the result of " + str(self.num1) + self.operator + str(self.num2) + "?" #the question is created
else:
self.result = str(eval(str(self.num2) + self.operator + str(self.num1)))
self.operator = " minus "
self.question = "What is the result of " + str(self.num2) + self.operator + str(self.num1) + "?"
else:
self.result = str(eval(str(self.num1) + self.operator + str(self.num2)))
self.operator = " plus "
self.question = "What is the result of " + str(self.num1) + self.operator + str(self.num2) + "?"
print self.question #the question is printed to the terminal
print self.result #the reslt is printed to the terminal
self.tts.say(self.question) #NAO tells the question
self.speech_recognition() #the speech_recognition method is called
示例10: getColour
def getColour(IP, PORT):
"""
First get an image from Nao, then show it on the screen with PIL.
:param IP:
:param PORT:
"""
myBroker = ALBroker("myBroker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
IP, # parent broker IP
PORT) # parent broker port
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # VGA
colorSpace = 11 # RGB
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
t0 = time.time()
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = camProxy.getImageRemote(videoClient)
t1 = time.time()
# Time the image transfer.
#print "Runde: ", b
camProxy.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
#Create a PIL Image Instance from our pixel array.
img0= Image.frombytes("RGB", (imageWidth, imageHeight), array)
#frame=np.asarray(convert2pil(img0)[:,:])
#object_rect2=detectColor(img0, RED_MIN,RED_MAX)
frame=detectShape(img0, RED_MIN,RED_MAX)
#frame=selectDetected(object_rect1,frame)
#frame=selectDetected(object_rect2,frame)
# currentImage = path+ "/camImage1cm.jpg"
# cv2.imwrite(currentImage, frame)
cv2.imshow('contour',frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例11: FaceDetectionModule
class FaceDetectionModule(ALModule):
# Déclaration de méthode.
def __init__(self, name):
ALModule.__init__(self, name)
print "[INFO ] FaceDetectionModule initialization"
# Instanciation d'un objet tts de classe ALTextToSpeech.
self.tts = ALProxy("ALTextToSpeech")
self.tts.setLanguage("french")
# Instanciation d'un objet tts de classe ALFaceDetection.
self.fd = ALProxy("ALFaceDetection")
# Variable d'instance.
global memory
# Instanciation d'un objet memory de classe ALMemory.
memory = ALProxy("ALMemory")
# Appel de la methode subsribeToEvent...
memory.subscribeToEvent("FaceDetected", # Sur cet evenement...
"FaceDetection", # ...de cet instance...
"onDetection") # ...declancher l'appel
# ...de cette methode.
print "[INFO ] FaceDetectionModule initialized"
# Méthode appelée sur l'évènement.
def onDetection(self, *_args):
print "[INFO ] FaceDetection: Face detected"
global face_nb
print "[INFO ] FaceDetection initialize face detection process"
learnFaceProcess(self, face_nb)
示例12: getMarkXYZ
def getMarkXYZ (IP, portNumber, markData, landmarkSize):
print "0"
currentCamera = "CameraTop"
print "1"
# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]
print "2"
# Compute distance to landmark.
distanceFromCameraToLandmark = landmarkSize / ( 2 * math.tan( angularSize / 2))
print "3"
motionProxy = ALProxy("ALMotion", IP, portNumber)
print "4"
# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
print "5"
wyCamera = markData[1][0][0][2]
print "6"
# Get current camera position in NAO space.
transform = motionProxy.getTransform(currentCamera, 2, True)
print "7"
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)
# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)
# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)
# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
return robotToLandmark.r1_c4, robotToLandmark.r2_c4, robotToLandmark.r3_c4
示例13: start
def start(self):
if self.al.connected():
self.tts.say("You are already connected")
else:
self.networks = self.al.list_networks()
self.tts.say("Here are the Wi Fi networks")
for num, network in enumerate(self.networks, 1):
self.tts.say(network)
self.tts.say("is number %d" % (num,))
time.sleep(0.2)
if len(self.networks) == 0:
self.tts.say("Sorry you are in a wifi free zone")
else:
self.tts.say("Which number Wi Fi network shall I connect to?")
try:
self.memory.unsubscribeToEvent("WordRecognized")
except Exception:
pass
speech_recognition = ALProxy("ALSpeechRecognition", NAO_IP, 9559)
speech_recognition.setLanguage("English")
try:
speech_recognition.setWordListAsVocabulary([str(i) for i in range(1, len(self.networks))])
except Exception:
self.tts.say("Could not set vocabulary")
try:
result = self.memory.subscribeToEvent("WordRecognized", self.module_name, "on_word_recognised")
print "Subscribed to event WordRecognized with package ", self.module_name, " and result ", result
except Exception as e:
print "Failed to subscribe ", e
示例14: NaoTTS
class NaoTTS(object):
"""
Nao text-to-speech service
"""
def __init__(self):
"""
Sets up members
"""
self._tts = None
# Authorization to speak
self._can_speak = threading.Event()
self._can_speak.set()
self.__speaking_lock = threading.Lock()
@Validate
def validate(self, context):
"""
Component validated
"""
# Set up the TTS proxy
self._tts = ALProxy("ALTextToSpeech")
@Invalidate
def invalidate(self, context):
"""
Component invalidated
"""
# Stop using the proxy
self._tts = None
# Unlock everything
self._can_speak.set()
def say(self, sentence):
"""
Says the given sentence
:param sentence: Text to say
"""
with self.__speaking_lock:
# Wait to be authorized to speak
self._can_speak.wait()
# Say what we have to
self._tts.say(sentence)
def resume(self):
"""
Allows Nao to speak
"""
self._can_speak.set()
def pause(self):
"""
Forbids Nao to speak
"""
if self._can_speak.is_set():
with self.__speaking_lock:
self._can_speak.clear()
示例15: getNaoImage
def getNaoImage(IP, PORT):
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # 640*480px http://doc.aldebaran.com/2-1/family/robots/video_robot.html#cameraresolution-mt9m114
colorSpace = 11 # RGB colorspace http://doc.aldebaran.com/2-1/family/robots/video_robot.html#cameracolorspace-mt9m114
fps = 5 # can be 0-30 fps
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, fps)
t0 = time.time()
naoImage = camProxy.getImageRemote(videoClient)
t1 = time.time()
camProxy.unsubscribe(videoClient)
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
#grab image from PIL and convert to opencv image
img = np.array(im)
img = img[:, :, ::-1].copy()
#im.save(name,"PNG")
print "acquisition delay ", t1 - t0
return img