当前位置: 首页>>代码示例>>Python>>正文


Python ALProxy.on方法代码示例

本文整理汇总了Python中naoqi.ALProxy.on方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.on方法的具体用法?Python ALProxy.on怎么用?Python ALProxy.on使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在naoqi.ALProxy的用法示例。


在下文中一共展示了ALProxy.on方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: stt

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
def stt():
	speech_client = speech.Client.from_service_account_json('/home/nao/ourcodes/MyFirstProject-47fa9e048ac2.json')

	tts = ALProxy("ALTextToSpeech","localhost",9559)
	tts.resetSpeed()

	channels = []

	channels.append(0)
	channels.append(0)
	channels.append(1)
	channels.append(0)

	rec = ALProxy("ALAudioRecorder","localhost",9559)
	leds = ALProxy("ALLeds","localhost",9559)

	rec.startMicrophonesRecording("/home/nao/ourcodes/test.wav", "wav", 16000, channels)

	leds.rotateEyes(0x000000FF,1,5)

	rec.stopMicrophonesRecording()
	leds.on("FaceLeds")

	with open("/home/nao/ourcodes/test.wav", 'rb') as audio_file:
			content = audio_file.read()
			audio_sample = speech_client.sample(
				content=content,
				source_uri=None,
				encoding='LINEAR16',
				sample_rate=16000)
			try:
				alternatives = speech_client.speech_api.sync_recognize(audio_sample,language_code='en-IN')
				return (str(alternatives[0].transcript))
			except ValueError:
    				return ""	
开发者ID:sanketh26,项目名称:github-demo,代码行数:37,代码来源:naogooglestt.py

示例2: Robot

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
class Robot(ALModule):
	def __init__( self, strName, address = "bobby.local", port = 9559):
		ALModule.__init__( self, strName )
		self.outfile = None
		self.outfiles = [None]*(3)
		self.count = 99999999
		self.check = False

		# --- audio ---
		self.audio = ALProxy("ALAudioDevice", address, port)
		self.audio.setClientPreferences(self.getName(), 48000, [1,1,1,1], 0, 0)

		# --- speech recognition ---
		# self.sr = ALProxy("SoundReceiver", address, port)
		self.asr = ALProxy("ALSpeechRecognition", address, port)
		self.asr.setLanguage("English")

		self.yes_no_vocab = {
			"yes": ["yes", "ya", "sure", "definitely"],
			"no": ["no", "nope", "nah"]
		}

		# TODO: add unknown object names into this somehow
		# also add other names for objects dynamically??????
		self.object_vocab = {
			"digital_clock": ["digital clock", "blue clock", "black alarm clock"],
			"analog_clock": ["analog clock", "black clock", "black alarm clock"],
			"red_soccer_ball": [u"red soccer ball", "red ball"],
			"basketball": ["basketball", "orange ball"],
			"football": ["football"],
			"yellow_book": ["yellow book"],
			"yellow_flashlight": ["yellow flashlight"],
			"blue_soccer_ball": ["blue soccer ball", "blue ball"],
			"apple": ["apple"],
			"black_mug": ["black mug"],
			"blue_book": ["blue book"],
			"blue_flashlight": [u"blue flashlight"],
			"cardboard_box": ["cardboard box"],
			"pepper": ["pepper", "jalapeno"],
			"green_mug": ["green mug"],
			"polka_dot_box": ["polka dot box", "polka dotted box", "spotted box", "brown and white box"],
			"scissors": ["scissors"]
		}

		self.asr.setVocabulary([j for i in self.yes_no_vocab.values() for j in i], False)

		# Custom segmentationation module
		self.segmentation = ALProxy("Segmentation", address, port)

		# --- text to speech ---
		self.tts = ALProxy("ALTextToSpeech", address, port)

		# --- memory ---
		self.mem = ALProxy("ALMemory", address, port)

		# --- robot movement ---
		self.motion = ALProxy("ALMotion", address, port)
		self.pose = ALProxy("ALRobotPosture", address, port)

		self.motion.stiffnessInterpolation("Body", 1.0, 1.0)
		self.pose.goToPosture("Crouch", 0.2)

		# --- face tracking ---
		self.track = ALProxy("ALFaceTracker", address, port)

		self.track.setWholeBodyOn(False)

		# --- gaze analysis ---
		self.gaze = ALProxy("ALGazeAnalysis", address, port)

		# --- camera ---
		self.cam = ALProxy("ALVideoDevice", address, port)

		# --- leds ---
		self.leds = ALProxy("ALLeds", address, port)

		self.colors = {
			"pink": 0x00FF00A2,
			"red": 0x00FF0000,
			"orange": 0x00FF7300,
			"yellow": 0x00FFFB00,
			"green": 0x000DFF00,
			"blue": 0x000D00FF,
			"purple": 0x009D00FF
		}

		# --- sound detection ---
		self.sound = ALProxy("ALSoundDetection", address, port)

		self.sound.setParameter("Sensibility", 0.99)

	def __del__(self):
		print "End Robot Class"


	def say(self, text, block = True):
		"""
		Uses ALTextToSpeech to vocalize the given string.
		If "block" argument is False, makes call asynchronous.
		"""
#.........这里部分代码省略.........
开发者ID:noelledavis,项目名称:nao_gaze_tracking,代码行数:103,代码来源:robot.py

示例3: Nao

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
class Nao(object):
    """
    Controls a nao
    """

    TO_RAD = math.pi / 180.0 
    ## Minimum and maximum ranges of joints
    __joint_range = {}

    def __init__(self, robot_ip, port=9559, nobody=False):
        self.logger = logging.getLogger('Borg.Brain.BodyController.Nao')
        self.__robot_ip = robot_ip
        self.__port = port
        self.__username = 'nao'
        self.__password = 'nao'
        self.__FM = ALProxy("ALFrameManager", robot_ip, int(port))
        try:
            # Local NaoQi does not have TTS, real robot does
            self.__TTS = ALProxy("ALTextToSpeech", robot_ip, int(port))
            self.__VisionTB = ALProxy("ALVisionToolbox", robot_ip, int(port))
            self.__Sonar = ALProxy("ALSonar", robot_ip, int(port))
            self.__Sonar.subscribe("Sonar",200,0.02)
            self.__BallTracker = ALProxy("ALRedBallTracker", robot_ip, int(port))
            self.__Navigation = ALProxy("ALNavigation", robot_ip, int(port))
            self.simulation = False
        except:
            self.__TTS = None
            self.__VisionTB = None
            self.__Sonar = None
            self.__BallTracker = None
            self.__Navigation = None
            self.simulation = True
        self.__Motion = ALProxy("ALMotion", robot_ip, int(port))
        self.__Memory = ALProxy("ALMemory", robot_ip, int(port))
        self.__Video = ALProxy("ALVideoDevice", robot_ip, int(port))
        self.__Leds = ALProxy("ALLeds", robot_ip, int(port))
        self.__behaviorIDs = {}
        self.__stop_crouch = True
        self.__nobody = nobody

        # Enable TTS notifications, just in case (so we can determine if the nao is currently speaking or not):
        if not self.__TTS == None:
            try:
                self.__TTS.enableNotifications()
            except Exception as e:
                #TODO: Verify that the generated exception is catched only because notifications is already enabled.
                print e
        
        #Create LED Groups for NAO eyes or ears
        self.setLedsGroup()


    def __del__(self):
        self.logger.info("NAO controller stopping, de-enslaving NAO")
        self.set_stifness(['Body'], [0], [0.25])
        if self.__Sonar:
            self.__Sonar.unsubscribe("Sonar")

    def stop(self):
        if self.__nobody:
            return

        if self.__stop_crouch:
            self.sit_down()
        else:
            self.start_behavior('sitdown', True)
            time.sleep(10)
        print "De-enslaving Nao"
        self.set_stifness(['Body'], [0], [0.25])
    def setLedsGroup(self, names = None):
        if not names:
            # Create a new group
            names = [
            "Face/Led/Red/Left/0Deg/Actuator/Value",
            "Face/Led/Red/Left/90Deg/Actuator/Value",
            "Face/Led/Red/Left/180Deg/Actuator/Value",
            "Face/Led/Red/Left/270Deg/Actuator/Value",
            "Face/Led/Red/Right/0Deg/Actuator/Value",
            "Face/Led/Red/Right/90Deg/Actuator/Value",
            "Face/Led/Red/Right/180Deg/Actuator/Value",
            "Face/Led/Red/Right/270Deg/Actuator/Value"]
            self.__Leds.createGroup("MyGroup",names)
            
    def set_crouch_on_stop(self, crouch=True):
        self.__stop_crouch = crouch

    def move(self, Joint, Angle, Speed):
        self.__Motion.setAngles(Joint, Angle, Speed)

    def walk(self, X=0, Y=0, Teta=0):
        self.__Motion.walkTo(X, Y, Teta)

    def walkNav(self, X=0, Y=0, Teta=0, distance = 0.4):
        self.__Navigation.setSecurityDistance(distance)
        self.__Navigation.moveTo(X, Y, Teta)

    def isWalking(self):
        return self.__Motion.walkIsActive()    
        
    def stopwalk(self):
#.........这里部分代码省略.........
开发者ID:maxseidler,项目名称:PAS,代码行数:103,代码来源:nao.py

示例4: followTheLineModule

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
class followTheLineModule(ALModule, Thread):
    """docstring for followTheLineModule"""
    def __init__(self, name):
        Thread.__init__(self)
        ALModule.__init__(self, name)

        # Create new object display a colored message on computer's terminal.
        self.logs = logs()
        self.logs.display("Initializing line following module...")
        
        # Create new proxies.
        self.posture = ALProxy("ALRobotPosture")
        self.logs.display("Subscribed to an ALRobotPosture proxy",
                          "Good")

        self.leds = ALProxy("ALLeds")
        self.logs.display("Subscribed to an ALLeds proxy",
                          "Good")

        # Prepare Nao.
        self.posture.goToPosture("StandInit", 1.0)
        self.logs.display("Nao is going to posture StandInit")

        # Create new threads.
        global getDirectionFromLineThread
        if (getDirectionFromLineThread is None):
            getDirectionFromLineThread = getDirectionFromLine("getDirectionFromLine")
            self.logs.display("New thread 'getDirectionFromLine' created",
                              "Good")

        global NaoWalksThread
        if (NaoWalksThread is None):
            NaoWalksThread = NaoWalks("NaoWalks")
            self.logs.display("New thread 'NaoWalks' created",
                              "Good")

        global RefreshCamThread
        if (RefreshCamThread is None):
            RefreshCamThread = RefreshCam("RefreshCam")
            self.logs.display("New thread 'RefreshCam' created",
                              "Good")

        # Start thread.
        if (RefreshCamThread is not None and RefreshCamThread.isAlive()):
            pass
        else:
            RefreshCamThread.start()
            self.logs.display("Thread 'RefreshCam' started",
                              "Good")

        # Ready!
        self.logs.display("Module ready", "Good")
        self.tts.say("Je suis prêt")


    # Shut down Nao by clicking on his front head button.
    def onTouched(self, *_args):
        global memory
        memory.unsubscribeToEvent("FrontTactilTouched",
                                  "followTheLine")

        # Properly close threads.
        global getDirectionFromLineThread
        getDirectionFromLineThread.delete()
        global NaoWalksThread
        NaoWalksThread.delete()
        global RefreshCamThread
        RefreshCamThread.delete()
        global followTheLine
        followTheLine.delete()


    # Method called by the Thread.start() method.
    def run(self):
        global memory       
        memory = ALProxy("ALMemory")
        memory.subscribeToEvent("FrontTactilTouched",
                                "followTheLine",
                                "onTouched")

        # Start a new threads.
        global getDirectionFromLineThread
        getDirectionFromLineThread.start()

        while True:
            if (getDirectionFromLine is not None 
                and getDirectionFromLineThread.isAlive()):
                pass
            else:
                # Start a new thread.
                getDirectionFromLineThread = getDirectionFromLine("getDirectionFromLine")
                getDirectionFromLineThread.start()

            global NaoWalksThread
            global NaoShouldWalk
            if (NaoShouldWalk is True):
                self.leds.randomEyes(0.5)
                self.leds.on("FaceLeds")
                if (NaoWalksThread.isAlive() is not True):
                    # Start a new thread.
#.........这里部分代码省略.........
开发者ID:NSenaud,项目名称:NaoChallengeProject,代码行数:103,代码来源:followTheLine.py

示例5: int

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
        moments = cv.Moments(cv.GetMat(hsvThresh), 0)
        area = cv.GetCentralMoment(moments, 0, 0)

        xPrev = xTracker
        yPrev = yTracker

        ##################################################
        ####################### LED EYES #################
        ##################################################

        if(area > 1):
            xTracker = int(cv.GetSpatialMoment(moments, 1, 0) / area)/40
            yTracker = int(cv.GetSpatialMoment(moments, 0, 1) / area)/40
            if((xTracker >= 7) & (xTracker <= 9) & (yPrev != yTracker)):
                led.on('lGroup')
                led.on('rGroup')
            elif(xPrev > xTracker):
                led.off('rGroup')
                led.on('lGroup')
            elif(xPrev < xTracker):
                led.off('lGroup')
                led.on('rGroup')
            else:
                led.off('lGroup')
                led.off('rGroup')
        else:
                led.off('lGroup')
                led.off('rGroup')

        #convert from BGR to RGB
开发者ID:brandonstrong,项目名称:school,代码行数:32,代码来源:Naoqi+Penalty+Kick.py

示例6: PictureTakerModule

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
class PictureTakerModule(ALModule):

    take_picture_pose_data = {'RElbowRoll': 1.5539841651916504,
                              'LShoulderRoll': 0.2607381343841553,
                              'LElbowRoll': -1.5615700483322144,
                              'LWristYaw': 0.04444408416748047,
                              'RShoulderPitch': 0.21480202674865723,
                              'RElbowYaw': 1.1719341278076172,
                              'RShoulderRoll': -0.28996801376342773,
                              'RHand': 0.9692000150680542,
                              'LShoulderPitch': 0.34050607681274414,
                              'LElbowYaw': -1.199629783630371,
                              'LHand': 0.9991999864578247,
                              'RWristYaw': -0.0353238582611084}

    def __init__(self, name):
        ALModule.__init__(self, name)
        self.name = name
        self.tts = ALProxy("ALTextToSpeech")
        self.soundDetection = ALProxy("ALSoundDetection")
        self.soundDetection.setParameter("Sensibility", 0.8)
        self.soundPlayer = ALProxy("ALAudioPlayer")
        self.memory = ALProxy("ALMemory")
        self.motion = ALProxy("ALMotion")
        self.leds = ALProxy("ALLeds")
        self.onCompleteCallback = None

    def start(self, onCompleteCallback=None):
        self.onCompleteCallback = onCompleteCallback
        self.moveToPose(self.take_picture_pose_data)
        prompts = ["Say Cheese!",
                   "Cheese!",
                   "Say Fromage!",
                   "Say Robot!"]
        self.tts.say(random.choice(prompts))
        self.memory.subscribeToEvent("SoundDetected", "PictureTaker", "onSoundDetected")

    def moveToPose(self, poseData):
        self.motion.setStiffnesses("Arms", 0.5)
        self.motion.setAngles(poseData.keys(), poseData.values(), 0.3)
        self.leds.off("FaceLeds")

    def onSoundDetected(self, eventName, soundInfo, _):
        """called when nao detects a loud sound"""
        print "Got sound %r" % (soundInfo,)
        for sound in soundInfo:
            print "sound ", sound
        self.memory.unsubscribeToEvent("SoundDetected", "PictureTaker")
        time.sleep(0.5)
        self.soundPlayer.post.playFile(SHUTTER_SOUND_FILE)
        img_bin = self.takePicture()
        self.emailPicture(img_bin)
        self.sendPictureToFlickr(img_bin)
 
    def takePicture(self):
        absoluteFilename = tempfile.mktemp()
        directoryName = "/" + absoluteFilename.split('/')[1]
        filename = absoluteFilename[-1]
        photoCapture = ALProxy("ALPhotoCapture")
        photoCapture.setResolution(3)
        photoCapture.setPictureFormat('jpg')
        theFilename = photoCapture.takePicture(directoryName, filename)
        img_bin = ''
        with open(theFilename[0], "rb") as f:
            img_bin = f.read()
        os.unlink(theFilename[0])
        reactions = ["That's a fantastic picture!",
                     "Great photo!",
                     "Nice one!"]
        self.leds.on("FaceLeds")
        self.tts.say(random.choice(reactions))
        self.motion.setStiffnesses("Arms", 0.0)
        if self.onCompleteCallback:
            self.onCompleteCallback()
        return img_bin

    def sendPictureToFlickr(self, img_bin):
        flickr = flickrapi.FlickrAPI(FLICKR_API_KEY, FLICKR_SECRET)
        def auth(frob, perms):
            print "Please give permissions %s for frob %s" % (perms, frob)
            encoded = flickr.encode_and_sign({ 
                      "api_key": flickr.api_key, 
                      "frob": frob, 
                      "perms": perms})
            print "URL is: https://api.flickr.com/services/auth?%s" % (encoded,) 
        token, frob = flickr.get_token_part_one(perms="write", auth_callback=auth)
        if not token: raw_input("Press ENTER after you authorized this program")
        flickr.get_token_part_two((token, frob))
        self.flickr_tempfilename = tempfile.mktemp()
        f = open(self.flickr_tempfilename, "wb")
        f.write(img_bin)
        f.close()
        flickr.upload(self.flickr_tempfilename,
            callback=self.flickrCallback,
            title="Taken by Xico at " + time.strftime('%Y-%M-%d %H:%m'),
            tags='nao',
            is_public='0',
            is_family='0',
            is_friend='0',
            content_type='1')
#.........这里部分代码省略.........
开发者ID:zaheerm,项目名称:naophotographer,代码行数:103,代码来源:take_picture.py

示例7: abs

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
    # initial head pitch
    r = rospy.Rate(5) 
    goingLeft = True
    while not rospy.is_shutdown():
#     	pitch = motionProxy.getAngles("HeadPitch",True)[0]
#     	if abs(pitch - -.4) > .1:
# 	        moveJoint ("HeadPitch",motionProxy,-.4, .5)
        # Obtengo los del ultimo seg
        now = rospy.Time.now()
        markers = markersCache.query(now - rospy.Duration(3), now)
        # Los cuento
        numMarkers = len(markers)
        # Si hay alguno
        if (numMarkers >= 1):
            ledProxy.on("FaceLeds")
#             print "Vi hace un segundo por lo menos"
            # Posicion del ultimo -> pos
            lastM = markers[numMarkers - 1]
            x = lastM.pose.position.x
            px = x / .40
            anglex = -math.copysign(.1 * abs(px), px)
            velx = 1#abs(px) * 0.1
            moveJoint ("HeadYaw", motionProxy, anglex, velx)
        else :
            ledProxy.off("FaceLeds")
#             print "No veo!"
#             yaw = motionProxy.getAngles("HeadYaw", True)[0]
# #             print yaw
# #             print goingLeft
#             if (goingLeft):
开发者ID:mllofriu,项目名称:morris_simulator,代码行数:32,代码来源:headMarkerFollower.py

示例8: __init__

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
class LedController:

    def __init__(self, use_nao):
        self.use_nao = use_nao

        if use_nao:
            self.leds = ALProxy("ALLeds","10.0.1.3",9559)
            red_names = [
                "Face/Led/Red/Left/0Deg/Actuator/Value",
                "Face/Led/Red/Left/90Deg/Actuator/Value",
                "Face/Led/Red/Left/180Deg/Actuator/Value",
                "Face/Led/Red/Left/270Deg/Actuator/Value",
                "Face/Led/Red/Left/45Deg/Actuator/Value",
                "Face/Led/Red/Left/135Deg/Actuator/Value",
                "Face/Led/Red/Left/225Deg/Actuator/Value",
                "Face/Led/Red/Left/315Deg/Actuator/Value",
                "Face/Led/Red/Right/0Deg/Actuator/Value",
                "Face/Led/Red/Right/90Deg/Actuator/Value",
                "Face/Led/Red/Right/180Deg/Actuator/Value",
                "Face/Led/Red/Right/270Deg/Actuator/Value",
                "Face/Led/Red/Right/45Deg/Actuator/Value",
                "Face/Led/Red/Right/135Deg/Actuator/Value",
                "Face/Led/Red/Right/225Deg/Actuator/Value",
                "Face/Led/Red/Right/315Deg/Actuator/Value"]
            self.leds.createGroup("RedGroup",red_names)

            blue_names = [
                "Face/Led/Blue/Left/0Deg/Actuator/Value",
                "Face/Led/Blue/Left/90Deg/Actuator/Value",
                "Face/Led/Blue/Left/180Deg/Actuator/Value",
                "Face/Led/Blue/Left/270Deg/Actuator/Value",
                "Face/Led/Blue/Left/45Deg/Actuator/Value",
                "Face/Led/Blue/Left/135Deg/Actuator/Value",
                "Face/Led/Blue/Left/225Deg/Actuator/Value",
                "Face/Led/Blue/Left/315Deg/Actuator/Value",
                "Face/Led/Blue/Right/0Deg/Actuator/Value",
                "Face/Led/Blue/Right/90Deg/Actuator/Value",
                "Face/Led/Blue/Right/180Deg/Actuator/Value",
                "Face/Led/Blue/Right/270Deg/Actuator/Value",
                "Face/Led/Blue/Right/45Deg/Actuator/Value",
                "Face/Led/Blue/Right/135Deg/Actuator/Value",
                "Face/Led/Blue/Right/225Deg/Actuator/Value",
                "Face/Led/Blue/Right/315Deg/Actuator/Value"]
            self.leds.createGroup("BlueGroup",blue_names)

            green_names = [
                "Face/Led/Green/Left/0Deg/Actuator/Value",
                "Face/Led/Green/Left/90Deg/Actuator/Value",
                "Face/Led/Green/Left/180Deg/Actuator/Value",
                "Face/Led/Green/Left/270Deg/Actuator/Value",
                "Face/Led/Green/Left/45Deg/Actuator/Value",
                "Face/Led/Green/Left/135Deg/Actuator/Value",
                "Face/Led/Green/Left/225Deg/Actuator/Value",
                "Face/Led/Green/Left/315Deg/Actuator/Value",
                "Face/Led/Green/Right/0Deg/Actuator/Value",
                "Face/Led/Green/Right/90Deg/Actuator/Value",
                "Face/Led/Green/Right/180Deg/Actuator/Value",
                "Face/Led/Green/Right/270Deg/Actuator/Value",
                "Face/Led/Green/Right/45Deg/Actuator/Value",
                "Face/Led/Green/Right/135Deg/Actuator/Value",
                "Face/Led/Green/Right/225Deg/Actuator/Value",
                "Face/Led/Green/Right/315Deg/Actuator/Value"]
            self.leds.createGroup("GreenGroup",green_names)


            # Switch the new groups on
            self.leds.on("RedGroup")
            self.leds.on("BlueGroup")
            self.leds.on("GreenGroup")
    
    def set_eyes_to_green(self, use_nao):
        if use_nao:
            self.leds.setIntensity("RedGroup", 0)
            self.leds.setIntensity("BlueGroup", 0)
            self.leds.setIntensity("GreenGroup", 1)
        
    def set_eyes_to_blue(self, use_nao):
        if use_nao:
            self.leds.setIntensity("RedGroup", 0)
            self.leds.setIntensity("BlueGroup", 1)
            self.leds.setIntensity("GreenGroup", 0)
        
    def set_eyes_to_red(self, use_nao):
        if use_nao:
            self.leds.setIntensity("RedGroup", 1)
            self.leds.setIntensity("BlueGroup", 0)
            self.leds.setIntensity("GreenGroup", 0)
        
    def set_eyes_to_yellow(self, use_nao):
        if use_nao:
            self.leds.setIntensity("RedGroup", 1)
            self.leds.setIntensity("BlueGroup", 0)
            self.leds.setIntensity("GreenGroup", 1)
        
    def set_eyes_to_white(self, use_nao):
        if use_nao:
            self.leds.setIntensity("RedGroup", 1)
            self.leds.setIntensity("BlueGroup", 1)
            self.leds.setIntensity("GreenGroup", 1)
        
#.........这里部分代码省略.........
开发者ID:pieterwolfert,项目名称:HRI,代码行数:103,代码来源:eye_leds.py

示例9: Robot

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
class Robot(ALModule):
	def __init__( self, strName, address = "bobby.local", port = 9559):
		ALModule.__init__( self, strName )

		# text to speech
		self.tts = ALProxy("ALTextToSpeech", address, port)

		# memory
		self.mem = ALProxy("ALMemory", address, port)

		# robot movement
		self.motion = ALProxy("ALMotion", address, port)
		self.pose = ALProxy("ALRobotPosture", address, port)

		# face tracking
		self.track = ALProxy("ALFaceTracker", address, port)
		# initialize face-tracking to following with head movement only
		self.track.setWholeBodyOn(False)

		# gaze analysis
		self.gaze = ALProxy("ALGazeAnalysis", address, port)
		# set highest tolerance for determining if people are looking at the robot because those people's IDs are the only ones stored
		self.gaze.setTolerance(1)
		# start writing gaze data to robot memory
		self.gaze.subscribe("_")

		# camera
		self.cam = ALProxy("ALVideoDevice", address, port)

		# leds
		self.leds = ALProxy("ALLeds", address, port)

		self.colors = {
			"pink": 0x00FF00A2,
			"red": 0x00FF0000,
			"orange": 0x00FF7300,
			"yellow": 0x00FFFB00,
			"green": 0x000DFF00,
			"blue": 0x000D00FF,
			"purple": 0x009D00FF
		}

		# sound detection
		self.sound = ALProxy("ALSoundDetection", address, port)
		# initialize sound sensitivity
		self.sound.setParameter("Sensibility", 0.95)

	def __del__(self):
		print "End Robot Class"


	def say(self, text, block = True):
		"""
		Uses ALTextToSpeech to vocalize the given string.
		If "block" argument is False, makes call asynchronous,
		i.e. does not wait until it finishes talking to move on to next part of the code.
		"""

		if block:
			self.tts.say(text)

		else:
			self.tts.post.say(text)

	def wake(self, stiffness = 1.0):
		"""
		Turns stiffnesses on, goes to Crouch position, and lifts head
		"""

		self.motion.stiffnessInterpolation("Body", stiffness, 1.0)
		self.pose.goToPosture("Crouch", 0.2)
		self.turnHead(pitch = math.radians(-10))

	def rest(self):
		"""
		Goes to Crouch position and turns robot stiffnesses off
		"""

		self.motion.rest()

	def sit(self):
		"""
		Goes to Crouch position
		"""

		self.pose.goToPosture("Crouch")

	def stand(self):
		"""
		Robot stands
		"""

		self.pose.goToPosture("Stand", 0.4)

	def turnHead(self, yaw = None, pitch = None, speed = 0.2):
		"""
		Turns robot head to the specified yaw and/or pitch in degrees at the given speed.
		Yaw can range from 119.5 deg (left) to -119.5 deg (right) and pitch can range from 38.5 deg (down) to -29.5 deg (up).
		"""

#.........这里部分代码省略.........
开发者ID:HiLT-ISpy,项目名称:robot-control,代码行数:103,代码来源:robot.py

示例10: Robot

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import on [as 别名]
class Robot(ALModule):
	def __init__( self, strName, address = "bobby.local", port = 9559):
		ALModule.__init__( self, strName )

		# Are these used for anything?
		# self.outfile = None
		# self.outfiles = [None]*(3)
		# self.count = 99999999
		# self.check = False

		# --- audio ---
		self.audio = ALProxy("ALAudioDevice", address, port)
		self.audio.setClientPreferences(self.getName(), 48000, [1,1,1,1], 0, 0)

		# --- speech recognition ---
		self.sr = ALProxy("SoundReceiver", address, port)
		self.asr = ALProxy("ALSpeechRecognition", address, port)
		self.asr.setLanguage("English")

		self.yes_no_vocab = {
			True: ["yes", "ya", "sure", "definitely"],
			False: ["no", "nope", "nah"]
		}

		# TODO: add unknown object names into this somehow
		# also add other names for objects dynamically??????
		self.object_vocab = {
			"digital_clock": ["digital clock", "blue clock", "black alarm clock"],
			"analog_clock": ["analog clock", "black clock", "black alarm clock"],
			"red_soccer_ball": [u"red soccer ball", "red ball"],
			"basketball": ["basketball", "orange ball"],
			"football": ["football"],
			"yellow_book": ["yellow book"],
			"yellow_flashlight": ["yellow flashlight"],
			"blue_soccer_ball": ["blue soccer ball", "blue ball"],
			"apple": ["apple"],
			"black_mug": ["black mug"],
			"blue_book": ["blue book"],
			"blue_flashlight": [u"blue flashlight"],
			"cardboard_box": ["cardboard box"],
			"pepper": ["pepper", "jalapeno"],
			"green_mug": ["green mug"],
			"polka_dot_box": ["polka dot box", "polka dotted box", "spotted box", "brown and white box"],
			"scissors": ["scissors"]
		}

		self.asr.setVocabulary([j for i in self.yes_no_vocab.values() for j in i], False)

		# Custom segmentationation module
		self.segmentation = ALProxy("Segmentation", address, port)

		# --- text to speech ---
		self.tts = ALProxy("ALTextToSpeech", address, port)

		# --- memory ---
		self.mem = ALProxy("ALMemory", address, port)

		# --- robot movement ---
		self.motion = ALProxy("ALMotion", address, port)
		self.pose = ALProxy("ALRobotPosture", address, port)

		self.motion.stiffnessInterpolation("Body", 1.0, 1.0)
		self.pose.goToPosture("Crouch", 0.2)

		# --- face tracking ---
		self.track = ALProxy("ALFaceTracker", address, port)

		self.track.setWholeBodyOn(False)

		# --- gaze analysis ---
		self.gaze = ALProxy("ALGazeAnalysis", address, port)
		# set highest tolerance for determining if people are looking at the robot because those people's IDs are the only ones stored
		self.gaze.setTolerance(1)
		# start writing gaze data to robot memory
		self.gaze.subscribe("_")

		# --- camera ---
		self.cam = ALProxy("ALVideoDevice", address, port)

		# --- leds ---
		self.leds = ALProxy("ALLeds", address, port)

		self.colors = {
			"pink": 0x00FF00A2,
			"red": 0x00FF0000,
			"orange": 0x00FF7300,
			"yellow": 0x00FFFB00,
			"green": 0x000DFF00,
			"blue": 0x000D00FF,
			"purple": 0x009D00FF
		}

		# --- sound detection ---
		self.sound = ALProxy("ALSoundDetection", address, port)

		self.sound.setParameter("Sensibility", 0.95)

	def __del__(self):
		print "End Robot Class"

#.........这里部分代码省略.........
开发者ID:HiLT-ISpy,项目名称:ispy,代码行数:103,代码来源:robot.py


注:本文中的naoqi.ALProxy.on方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。