当前位置: 首页>>代码示例>>Python>>正文


Python ALProxy.setParameter方法代码示例

本文整理汇总了Python中naoqi.ALProxy.setParameter方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.setParameter方法的具体用法?Python ALProxy.setParameter怎么用?Python ALProxy.setParameter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在naoqi.ALProxy的用法示例。


在下文中一共展示了ALProxy.setParameter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
class Nao:
    def __init__(self, robotIP, port):
        # proxies
        self.memoryProxy = ALProxy("ALMemory", robotIP, port)
        self.managerProxy = ALProxy("ALBehaviorManager", robotIP, port)
        self.motionProxy = ALProxy("ALMotion", robotIP, port)
        self.animatedSpeechProxy = ALProxy("ALAnimatedSpeech", robotIP, port)
        self.navigationProxy = ALProxy("ALNavigation", robotIP, port)
        self.postureProxy = ALProxy("ALRobotPosture", robotIP, port)
        self.trackerProxy = ALProxy("ALTracker", robotIP, port)
        self.peopleProxy = ALProxy("ALPeoplePerception", robotIP, port)
        self.autonomousMovesProxy = ALProxy("ALAutonomousMoves", robotIP, port)
        self.autonomousMovesProxy.setExpressiveListeningEnabled(False)
        self.dialog_p = ALProxy('ALDialog', robotIP, port)
        self.tts = ALProxy("ALTextToSpeech", robotIP, port)
        self.tts.setParameter("speed", 90)
        self.basic_awareness = ALProxy("ALBasicAwareness", robotIP, port)
        self.peopleLocation = []
        self.trackerTarget = -1
        self.People_list = []
        self.People_undetected = []
        self.People_detected = []
        self.visible_detection = 5
        self.People_angles = []
        self.undetected_person_count = 50
        self.same_person_threshold = 0.2

        # publishers
        self.trackingPub = rospy.Publisher('/nao_behavior/tracking', Bool, queue_size=1)
        self.visualPub = rospy.Publisher('visual', MarkerArray, queue_size=5)
        rospy.Subscriber("/nao_behavior/enable_Diag", String, self.startDialog)
        rospy.Subscriber("/nao_behavior/reset_Diag", String, self.resetDialog)
        rospy.Subscriber("/nao_behavior/add/blocking", String, self.blocking_callback)
        rospy.Subscriber("/nao_behavior/add/nonblocking", String, self.nonblocking_callback)

        self.dialogLoaded = False

        Thread(target=self.start_PeopleDetectection).start()
        self.breath()

    def start_awareness(self):
        self.basic_awareness.setTrackingMode("Head")
        self.basic_awareness.setEngagementMode("FullyEngaged")
        self.basic_awareness.setStimulusDetectionEnabled("Sound", True)
        self.basic_awareness.setStimulusDetectionEnabled("Movement", True)
        self.basic_awareness.setStimulusDetectionEnabled("People", True)
        self.basic_awareness.setStimulusDetectionEnabled("Touch", False)
        self.basic_awareness.setParameter("MaxHumanSearchTime", float(2))
        self.basic_awareness.startAwareness()
        time.sleep(0.5)
        while not rospy.is_shutdown():
            # get current target being tracked
            try:
                self.trackerTarget = self.memoryProxy.getData('ALBasicAwareness/HumanTracked', 0)
                self.trackingPub.publish(not self.trackerProxy.isTargetLost())
            except Exception, e:
                print e
开发者ID:vainmouse,项目名称:Nao,代码行数:59,代码来源:Nao2.py

示例2: setParameterLoudly

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
def setParameterLoudly( nNumParam, value ):
    """
    change each parameter, loudly, so that we're sure it has been set
    """
    print( "INF: setParameterLoudly( %d, %s )" % ( nNumParam, str( value ) ) );
    avd = ALProxy( "ALVideoDevice",  strNaoIp, 9559 );
    for i in range( 5 ):
        bRet = avd.setParameter( 1, nNumParam, value );
        print( "bRet: %s" % str(bRet) );
        if( bRet ):
            break;
        time.sleep( 1.0 );
开发者ID:cnuNao,项目名称:naoOcculusDev,代码行数:14,代码来源:nao_camera_viewer.py

示例3: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
def main(robot_IP, robot_PORT = 9559):
    global tts, audio, record, aup
    # ----------> Connect to robot <----------
    sd = ALProxy("ALSoundDetection", robot_IP, robot_PORT)
    tts = ALProxy("ALTextToSpeech", robot_IP, robot_PORT)
    audio = ALProxy("ALAudioDevice", robot_IP, robot_PORT)
    record = ALProxy("ALAudioRecorder", robot_IP, robot_PORT)
    aup = ALProxy("ALAudioPlayer", robot_IP, robot_PORT)
    mem = ALProxy('ALMemory', robot_IP, robot_PORT)
    print(mem.getDataListName())
    # ----------> recording <----------
    print 'start recording...'

    sd.setParameter("Sensibility", 0.9)
    audio.openAudioInputs()
    record_path = '/home/nao/audio/wista.wav'

    # noise_output = wave.open('sample.wav', 'w')
    # noise_output.setparams((1, 4, 16000, 0, 'NONE', 'not compressed'))
    #(nchannels, sampwidth, framerate, nframes, comptype, compname)
    # kanaly, szerokosc probki, czestotliwosc wyswietlania klatek, ilosc ramek

    # noise_output = wave.open('file.wav', 'w')
    record.startMicrophonesRecording('wista.wav', 'wav', 16000, (1, 0, 0, 0))

    print("start!!!")
    time.sleep(35)
    print("stop!!!")
    record.stopMicrophonesRecording()

    # record_to_read = aup.playFile('/home/nao/audio/wista.wav', 0.1, 0)
    #
    r = sr.Recognizer()
    with sr.AudioFile('audio/wista.wav') as source:#sr.Microphone()
        try:
            audio = r.record(source)# read the entire audio file
            print("You said " + r.recognize_sphinx(audio))
        except sr.UnknownValueError:
            tts.say("sorry")#"I don't understand you, sorry! ")
开发者ID:KatarzynaStudzinska,项目名称:ADlipiec,代码行数:41,代码来源:raz.py

示例4: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
import time
from naoqi import ALProxy
import config
tts = ALProxy("ALTextToSpeech", config.ipnao, 9559)

tts.setParameter("pitchShift",0)
tts.setParameter("doubleVoice",0)
#tts.setParameter("doubleVoiceLevel",4)
tts.say("je test")
time.sleep(1)

tts.setParameter("pitchShift",0)
tts.say("je test")
time.sleep(1)


#Applies a pitch shifting to the voice
tts.setParameter("pitchShift", 1.5)
#Deactivates double voice
tts.setParameter("doubleVoice", 0.0)

tts.say("Pitch shift and double voice changed")
开发者ID:Cydev2306,项目名称:NaoWebApp,代码行数:24,代码来源:testVoice.py

示例5: emotional_demo_module

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
class emotional_demo_module(ALModule):
    """ A simple module to change the eye LEDs colour to represent emotions.

    """
    def __init__(self, name):
        ALModule.__init__(self, name)

        # Create proxies for the instance.
        global memory
        memory = ALProxy("ALMemory")

        self.tts = ALProxy("ALTextToSpeech")
        self.leds = ALProxy("ALLeds")
        self.motion = ALProxy("ALMotion")

        # Write empty valence and arousal values to memory.
        valence = 0
        arousal = 0
        param1 = 'null'
        current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (param1, "param2")]
        memory.insertData("Emotion/Current", current_emotion)

        # Disable ALAutonomousLife to better demonstrate emotional actions.
        self.autonomous_life = ALProxy("ALAutonomousLife")
        if (self.autonomous_life.getState() != "disabled"):
            self.autonomous_life.setState("disabled")
        time.sleep(1.0)
        self.motion.wakeUp()

        # Run behaviour when a tactile touched.
        memory.subscribeToEvent("VAChanged", self.getName(), "express_current_emotion")

    def express_current_emotion(self, *_args):
        """ Expresses the current emotion from the current valence and arousal values in ALMemory.
                        
        """

        # SETUP
        # Events.
        memory.unsubscribeToEvent("VAChanged", self.getName())

        # Motion.
        motion_names = list()
        motion_times = list()
        motion_keys = list()
        
        # Eyes.
        eye_colour_lookup_table = [[(0xF82C35),(0xF82C35),(0xD55528),(0xD55528),(0xFF622B),(0xFF622B),(0xFFB047),(0xFFB047),(0xFFB047),(0xFFB047),(0xFFB047)],
                                [(0xF82C35),(0xF82C35),(0xD5542A),(0xD5542A),(0xE96A37),(0xFF8232),(0xFF8232),(0xFEB340),(0xFEB340),(0xFEB340),(0xFFFF00)],
                                [(0xF62D35),(0xF62D35),(0xF62D35),(0xE96A37),(0xE96A37),(0xFF984D),(0xFF8232),(0xFDC147),(0xFFB144),(0xFFFF00),(0xFFFF00)],
                                [(0xF72C32),(0xF72C32),(0xFF4048),(0xFE5761),(0xED8659),(0xFEB278),(0xFECE6A),(0xFECE6A),(0xFEE566),(0xFFFF00),(0xFFFF00)],
                                [(0xF6255C),(0xF6255C),(0xF9386F),(0xFD585E),(0xF78C84),(0xFFB379),(0xFEDEA1),(0xFEE67C),(0xFFE564),(0xFFFF00),(0xFFFF00)],
                                [(0xF6255C),(0xF93871),(0xF93871),(0xFE9EB9),(0xFE9EB9),(0xFFFFFF),(0xD0E7B3),(0xA5D277),(0x85B957),(0x6EAB34),(0x6EAB34)],
                                [(0xA82C72),(0xA82C72),(0xC03381),(0xDB5CA1),(0xE8A1C3),(0xD1E5F0),(0xCFDADE),(0x73B8B3),(0x87B958),(0x6EAB34),(0x6EAB34)],
                                [(0xA82C72),(0xA82C72),(0xC03381),(0x9C3F74),(0xB36893),(0xD1E4F2),(0x91C3E6),(0x91C3E6),(0x219A95),(0x00948E),(0x6BAC34)],
                                [(0xA82C72),(0xA82C72),(0x86305D),(0x86305D),(0x94C8D6),(0x93C8D8),(0x92C2E6),(0x3196CE),(0x009591),(0x009591),(0x009591)],
                                [(0xA62D72),(0x692850),(0x692850),(0x692850),(0x2D9DB1),(0x2C9FB2),(0x2F96CE),(0x0085BE),(0x00968D),(0x00968D),(0x00968D)],
                                [(0x692850),(0x692850),(0x692850),(0x692850),(0x037F9B),(0x037F9B),(0x0085BE),(0x0085BE),(0x0085BE),(0x0085BE),(0x0085BE)]
                                ]

        # Speech.
        # Speech parameter lookup table. Format (pitch modifier, volume modifier)
        speech_parameter_lookup_table = [((1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00)),
                                        ((1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00)),
                                        ((1.00,0.75),(0.81,0.75),(0.00,0.00),(0.00,0.00),(-0.25,0.00),(0.50,1.00),(0.62,0.50),(0.75,),(0.75,),(0.75,0.75),(1.00,0.75)),
                                        ((1.00,0.50),(0.63,0.50),(-0.20,-0.50),(-1.00,-1.00),(-0.25,-0.50),(0.25,0.50),(0.25,0.50),(0.50,),(0.50,0.50),(0.50,0.50),(0.00,0.50)),
                                        ((1.00,0.25),(0.44,0.25),(0.40,-0.50),(0.30,-0.50),(0.25,-0.50),(0.25,0.00),(0.25,0.00),(0.25,0.25),(0.25,0.25),(0.25,0.25),(0.00,0.25)),
                                        ((1.00,0.00),(0.25,0.00),(0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.10,0.00),(0.10,0.00),(0.10,0.00),(0.00,0.00)),
                                        ((0.25,-0.25),(0.06,-0.25),(-0.10,-0.25),(-0.20,0.00),(-0.20,0.00),(-0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.25,-0.50),(-0.13,-0.50),(-0.35,-0.50),(-0.20,-0.25),(-0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.25,-0.75),(-0.31,-0.75),(-0.35,-0.75),(-0.10,-0.50),(-0.10,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.50,-1.00),(-0.50,-1.00),(-0.40,-1.00),(-0.20,-0.75),(-0.10,-0.50),(0.00,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.50,-1.00),(-0.50,-1.00),(-0.50,-1.00),(-0.25,-0.75),(0.00,-0.50),(0.00,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00))]
                      
        # CALCULATIONS
        # Get current emotional values and generic calcs.
        current_emotion = memory.getData("Emotion/Current")
        print "current_emotion (module): ", current_emotion
        valence = current_emotion[0][0]
        arousal = current_emotion[0][1]
        emotion_name = current_emotion[3][0]
        # Valence and arousal are normalised between -1 and 1, with an axis intersection at (0, 0). Convert axis intersection
        # to index.
        valence_index = (int(valence * 5) + 5)
        arousal_index = 10 - (int(arousal * 5) + 5)

        # Speech.
        # The pitch and volume modifier values need scaled, final value to be determined. e.g. a value of 4 will divide the parameter by 4 to give a +/- of 25% of the default value
        speech_parameter_scaling_value = 4
        string_to_say = "I am feeling " + emotion_name
        scaled_pitch_modifier = 1 + (speech_parameter_lookup_table[arousal_index][valence_index][0] / speech_parameter_scaling_value)
        # NAO can only increase pitch! So need to check if a pitch reduction required and negate it. Range 1.0 - 4.0.
        if scaled_pitch_modifier < 1.0:
            scaled_pitch_modifier = 1.0
        # NAO volume (gain) range 0.0 - 1.0.
        scaled_volume_modifier = 0.5 + (speech_parameter_lookup_table[arousal_index][valence_index][1] / speech_parameter_scaling_value)
        self.tts.setParameter("pitchShift", scaled_pitch_modifier)
        self.tts.setVolume(scaled_volume_modifier)
        
        # Eyes.        
#.........这里部分代码省略.........
开发者ID:Sandy4321,项目名称:nao-emotional-framework,代码行数:103,代码来源:standalone_emotional_demo.py

示例6: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
import naoqi
import sys

from naoqi import ALProxy
IP   = "10.0.1.3"
PORT = 9559
loggerProxy = ALProxy("ALTextToSpeech", IP, PORT)
loggerProxy.setParameter("speed", 20)
loggerProxy.setParameter("pitchShift", 1.0)
print loggerProxy.getAvailableVoices()
#loggerProxy.setVoice("Kenny22Enhanced")
def say(speech):
    loggerProxy.say(speech)
开发者ID:pieterwolfert,项目名称:HRI,代码行数:15,代码来源:naoqi_speech.py

示例7: range

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
if( bDrawInfo ):
    nThickness = 1;
    font = cv.InitFont( cv.CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0, nThickness, 8 );   
    
if( bMoveEyes ):
    import abcdk.config
    abcdk.config.bInChoregraphe = False; # access to the right proxy
    abcdk.config.strDefaultIP = strNaoIp;

    import abcdk.motiontools
    import abcdk.romeo
    abcdk.motiontools.eyesMover.setStiffness();
    abcdk.romeo.setCameraGroup( 1, bUseFuse = False );
    bEyesFlipFlop = True;
    for nNumCamera in range(2):
        avd.setParameter( nNumCamera, kCameraAutoExpositionID, 0 );
        avd.setParameter( nNumCamera, kCameraExposureID, 10 );
        avd.setParameter( nNumCamera, kCameraGainID, 511 );

    

while( not bKeyPressed ):
    if( False ):
        print( "camera current params:" );
        for i in range( 34 ):
            try:
                retVal = avd.getParam( i );
                print( "param %d: %s" % ( i, str( retVal ) ) );
            except BaseException, err:
                print( "DBG: get param(%d) => error: %s" % ( i, str( err ) ) );            
    #~ try:
开发者ID:cnuNao,项目名称:naoOcculusDev,代码行数:33,代码来源:nao_camera_viewer.py

示例8: EmailRecognitionModule

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
class EmailRecognitionModule(ALModule):
	""" A simple module able to react to
	sound detection events
	"""
	
	# Constructor of EmailRcognitionModule
	def __init__(self,name):
		ALModule.__init__(self,name)
		
		print "[Core agent server] - Server initialization"
		
		# Initialization of ROS node
		rospy.init_node('acore_email_server')
		self.moduleName = name
		
		# Initialization of Naoqi modules and ROS services
		self.initALModule()
		
		self.setVariables()
		self.openServices()
		
		print "[Core agent server] - Waits for clients ..."
		
		
	
	# Initialization of Naoqi modules
	def initALModule(self):
		print "[Core agent server] - Initialization of Naoqi modules"
		
		print "[Core agent server] - ALMemory proxy initialization"		
		global prox_memory
		prox_memory = ALProxy("ALMemory")
		if prox_memory is None:
			rospy.logerr("[Core agent server] - Could not get a proxy to ALMemory")
			exit(1)
			
		print "[Core agent server] - ALTextToSpeech proxy initialization"
		self.prox_tts = ALProxy("ALTextToSpeech")
		if self.prox_tts is None:
			rospy.logerr("[Core agent server] - Could not get a proxy to ALTextToSpeech")
			exit(1)
			
		print "[Core agent server] - ALSoundDetection proxy initialization"
		self.prox_sd = ALProxy("ALSoundDetection")
		if self.prox_sd is None:
			rospy.logerr("[Core agent server] - Could not get a proxy to ALSoundDetection")
			exit(1)
			
		print "[Core agent server] - ALSpeechRecognition proxy initialization"
		self.prox_sprec = ALProxy("ALSpeechRecognition")
		if self.prox_sprec is None:
			rospy.logerr("[Core agent server] - Could not get a proxy to ALSpeechRecognition")
			exit(1)
		
		print "[Core agent server] - ALAudioRecorder proxy initialization"
		self.prox_ar = ALProxy("ALAudioRecorder")
		if self.prox_ar is None:
			rospy.logerr("[Core agent server]- Could not get a proxy to ALAudioRecorder")
			exit(1)
	
	
	# Setting variables
	def setVariables(self):
		print "[Core agent server] - Setting variables"
		self.isEmailFound = False
		self.stopListening = False
		self.email_address = "[email protected]"
		self.prox_sd.setParameter("Sensitivity",0.7)
		self.period=500
		self.memValue = "LastWordRecognized"
		# Temporary database of recognized words
		self.database = ["John", "Max", "Rapp", "Nao", "Alarm", "Exit"]
		self.prox_sprec.pause(True)
		self.prox_sprec.setVocabulary(self.database, False)
		self.prox_sprec.pause(False)
		
		
		
		self.recordedExtention="ogg"
		# Sample rate of recorded audio (in Hz)
		self.sampleRate = 16000
		# Creating channel
		self.channels = []
		#Channels setup
		self.channels.append(1)
		self.channels.append(1)
		self.channels.append(1)
		self.channels.append(1)
		
		self.recordingTime = 3.0
		
	
	def setWordDatabase(self, database):
		print "[Core agent server] - Setting word database to recognize a spoken word"
		self.prox_sprec.pause(True)
		self.prox_sprec.setVocabulary(database, False)
		self.prox_sprec.pause(False)
		
	# Initialization of ROS services
	def openServices(self):
#.........这里部分代码省略.........
开发者ID:rapp-project,项目名称:rapp-robot-nao,代码行数:103,代码来源:acore_email_server.py

示例9:

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
			# todo: replace time data with parametric values, or see ipnb for other ideas.
			timesMotion = []
			keysMotion = []
			movementList = []
			movementList = mtmd.leftArmMovementList[self.currentStateActionLeftArm] + mtmd.rightArmMovementList[self.currentStateActionRightArm]
			for n, t, k in movementList:
				namesMotion.append(n)
				timesMotion.append(t)
				keysMotion.append(k)
			# Build walk.
			walk1 = self.walkDictionary[self.currentStateWalk]
			# Creating a reverse move list to return NAO to start position'ish!
			walk2 = [i * -1 for i in walk1]

		# Say and do.
		speechProxy.setParameter("pitchShift", laughPitch)
		speechProxy.setParameter("doubleVoiceLevel", doubleVoiceLaugh)
		if motionEnabled:
			""" Only does this when the target tickle area is tickled."""
			try:
				x = walk1[0]
				y = walk1[1]
				theta = walk1[2]
				robotMotionProxy.moveTo(x, y, theta)
				robotMotionProxy.waitUntilMoveIsFinished()
			except Exception, e:
				print "robotMotionProxy error: ", e
			try:
				robotMotionProxy.post.angleInterpolation(namesMotion, keysMotion, timesMotion, True)
			except Exception, e:
				print "robotMotionProxy error: ", e
开发者ID:mikemcfarlane,项目名称:TickleMeNAO,代码行数:33,代码来源:Markov_tickles.py

示例10: __init__

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
class NaoBehavior:
    def __init__(self, robotIP ,port):

        # behavior lists
        self.l_type = []
        self.l_behaviors = []

        # auto
        self.breathEnabled = False
        self.idletime = 8  # waits for half a second so 10  = 5 seconds
        self.idlecount = 0

        # activation
        self.active = False
        self.resting = False
        self.speechini= False
        self.headOdom = [0,0]
        self.stop = False
        self.headlock =False
        self.enableAutoRotate = True



        # proxies
        self.memoryProxy = ALProxy("ALMemory", robotIP, port)

        self.managerProxy = ALProxy("ALBehaviorManager", robotIP, port)
        self.motionProxy = ALProxy("ALMotion", robotIP, port)
        self.animatedSpeechProxy = ALProxy("ALAnimatedSpeech", robotIP, port)
        self.navigationProxy = ALProxy("ALNavigation", robotIP, port)
        self.postureProxy = ALProxy("ALRobotPosture", robotIP, port)
        self.asr = ALProxy("ALSpeechRecognition", robotIP, port)

        self.autonomousMovesProxy = ALProxy("ALAutonomousMoves", robotIP, port)
        self.autonomousMovesProxy.setExpressiveListeningEnabled(False)
        self.memValue = "WordRecognized"
        self.tts = ALProxy("ALTextToSpeech", robotIP, port)
        self.tts.setParameter("speed", 100)

        #topics
        rospy.Subscriber("/nao_behavior/add", String, self.run_callback)
        self.statesub = rospy.Subscriber("/joint_states", JointState, self.jointstateC)

        rospy.Subscriber("/nao_audio/audio_source_localization", AudioSourceLocalization, self.audio_callback)
        self.speechPub = rospy.Publisher('/nao_behavior/speech_detection', Bool, queue_size=5)

        #speech ini
        rospy.Subscriber("/nao_behavior/head", Float32MultiArray, self.move)
        self.breath(True)

    def move(self, data):
        if self.headlock == True:
            return
        self.headlock ==  True

        # we want to tilt the body if the position is too far to the endge
        if self.enableAutoRotate:
            if data.data[0] > 2:
                self.rotate(0.1)
            elif data.data[0] < -2:
                self.rotate(-0.1)
            self.headlock == false
            return

        Id = self.motionProxy.post.setAngles(["HeadYaw", "HeadPitch"], [data.data[0],data.data[1]], data.data[2])
        self.motionProxy.wait(Id, 0)
        self.headlock == False


    def headmove(self, angles,speed):
        if self.headlock == True:
            return
        self.headlock == True
        Id = self.motionProxy.post.setAngles(["HeadYaw", "HeadPitch"], angles, speed)
        self.motionProxy.wait(Id, 0)
        self.headlock == False


    def search(self):
        prevodom = self.headOdom
        self.headmove([-0.8,self.headOdom[1]],0.1)
        time.sleep(2)

        if self.stop == False :
            self.headmove([0.8, self.headOdom[1]],0.1)
            time.sleep(2)
        #return to original
        if self.stop == False:
            self.headmove(prevodom,0.1)

        self.stop = False


    def audio_callback(self,msg):

        if self.speechini == False :
            self.speechini = True
            self.asr.pause(True)
            self.vocabulary = ["yes", "no", "please", "hello","the","be","to","of","and","in","that","have","it","robot"]

#.........这里部分代码省略.........
开发者ID:vainmouse,项目名称:Nao,代码行数:103,代码来源:behaviors3.py

示例11: while

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
while(True):
    foundFace = False
    faceData = FacialRecognitionModule.getFaceData(IP, PORT)

    while(not foundFace):
        #Get data until face is found
        while (faceData is None or len(faceData) == 0):
            print ("looking for face")
            faceData = FacialRecognitionModule.getFaceData(IP, PORT)

        if(FacialRecognitionModule.getFaceConfidince(faceData) > 0.4):
            foundFace = True
        else:
            print ("conf found")
            faceData = FacialRecognitionModule.getFaceData(IP, PORT)


    faceName = FacialRecognitionModule.getFaceName(faceData)


    #Make proxy to speech
    speechProxy = ALProxy("ALTextToSpeech", IP, PORT)
    if(faceName == "Josh"):
        speechProxy.setParameter("pitchShift", 1.5)
        speechProxy.say("NO! NO! HELP! Please don't hurt me anymore Josh")
    else:
        speechProxy.setParameter("pitchShift", 1.0)
        speechProxy.say("Hello " + str(faceName) + " I really like you.")

    time.sleep(1)
开发者ID:afranka69,项目名称:ShowRobbie,代码行数:32,代码来源:RecognizeFace.py

示例12: SoundTrackerModule

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
class SoundTrackerModule(ALModule) :
    def __init__(self):
        ALModule.__init__(self, "SoundTracker")
        self.modnom = "SoundTracker"
        self.tts = ALProxy("ALTextToSpeech", "lain.local", 9559)
        self.robot = ALProxy("ALMotion", "lain.local", 9559)
        self.locator = ALProxy("ALAudioSourceLocalization", "lain.local", 9559)
        self.mem = ALProxy("ALMemory", "lain.local", 9559)
        self.locmut = threading.Semaphore(0)
        self.locator.setParameter("EnergyComputation", 1)
        self.locator.setParameter("Sensibility", .95)
        self.video = ALProxy("ALVideoDevice", "lain.local", 9559)
        self.resolution = 2
        # self.colorSpace = 11
        # self.resolution = vision_definitions.kQVGA
        self.colorSpace = vision_definitions.kYUVColorSpace
        self.server = socket.socket()
        self.listener = ConnectionListener(self)
        self.nameID = self.video.subscribe("SoundTracker", self.resolution, self.colorSpace, 30)
    
    def connect(self):
        self.listener.start()
    
    def disconnect(self):
        self.listener.disconnect()
    
    def subscribe(self):
        self.send('subscribing to onSoundDetected')
        self.tts.say('subscribing to on sound detected in')
        self.tts.post.say('three')
        time.sleep(1);
        self.tts.post.say('two')
        time.sleep(1);
        self.tts.post.say('one')
        time.sleep(1);
        self.mem.subscribeToEvent("SoundDetected",
            self.modnom,
            "onSoundDetected")
        self.send('onSoundDetected subscribed')
    
    def unsubscribe(self):
        self.mem.unsubscribeToEvent("SoundDetected",
            self.modnom)
        self.send('onSoundDetected unsubscribed')
    
    def onSoundDetected(self, *_args):
        self.mem.unsubscribeToEvent("SoundDetected",
            self.modnom)
        self.send('onSoundDetected start')
        # self.tts.say('sound detected')
        
        self.send(str(_args))
        
        self.send('\tsubscribing to onSoundLocated')
        self.mem.subscribeToEvent("ALAudioSourceLocalization/SoundLocated",
            self.modnom,
            "onSoundLocated")
        self.send('\tonSoundDetected sleep')
        self.tcount = 0;
        self.loccount = 0;
        self.wakecount = 5;
        self.robot.wakeUp()
        self.locmut.release()
        while self.wakecount > 0 :
            # self.tts.say(str(self.wakecount))
            time.sleep(1)
            self.tcount = self.tcount + 1
            self.locmut.acquire()
            print("\tonSoundLocated check: (wc,lc,tc) = " + str(self.wakecount) + "," + str(self.loccount) + "," + str(self.tcount))
            if self.loccount > 0 :
                self.loccount = 0
                self.wakecount = 5
            elif self.tcount > 8 :
                self.wakecount = 0
            else :
                self.wakecount = self.wakecount - 1
            self.locmut.release()
        self.locmut.acquire()
        self.robot.rest()
        self.send('\tonSoundDetected awake')
        self.mem.unsubscribeToEvent("ALAudioSourceLocalization/SoundLocated",
            self.modnom)
        self.send('\tonSoundLocated unsubscribed')
        
        self.send('onSoundDetected end')
        self.mem.subscribeToEvent("SoundDetected",
            self.modnom,
            "onSoundDetected")
    
    def onSoundLocated(self, *_args):
        if _args[1][1][2] > .9 :
            self.locmut.acquire()
            self.mem.unsubscribeToEvent("ALAudioSourceLocalization/SoundLocated",
                self.modnom)
            self.send('\tonSoundLocated start')
            self.tts.post.say('sound located')
            self.loccount = self.loccount + 1
            
            self.send(str(_args))
            
#.........这里部分代码省略.........
开发者ID:StetsonG,项目名称:NAOLearn,代码行数:103,代码来源:rtserver.py

示例13: VideoRobot

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
class VideoRobot(object):

    FPS = 5
    RESOLUTION = vision_definitions.kVGA
    COLOR_SPACE = vision_definitions.kRGBColorSpace

    def __init__(self):
        self.video_proxy = None
        self.cameras = [None, None] #two cameras on Nao, top and bottom

    def connect(self, host, port):
        print 'Video - Connecting to robot on {0}:{1}...'.format(host, port)
        try:
            self.video_proxy = ALProxy("ALVideoDevice", host, port)
        except Exception as exception: # pylint: disable=broad-except
            raise Exception('Could not create proxy: {0}', format(exception))
        self._subscribe_to_cameras()

    def _subscribe_to_cameras(self):
        for camera_index in range(len(self.cameras)):
            camera_name = 'nc_camera_{0}'.format(camera_index)
            camera = self.video_proxy.subscribeCamera(camera_name, camera_index, self.RESOLUTION, self.COLOR_SPACE, self.FPS)
            self.cameras[camera_index] = camera
            if camera is None:
                print 'Failed to subscribe to camera: {0}'.format(camera_index)

    def get_remote_image(self, use_bottom_camera=False):
        camera_index = vision_definitions.kBottomCamera if use_bottom_camera else vision_definitions.kTopCamera
        camera = self.cameras[camera_index]
        capture = self.video_proxy.getImageRemote(camera)
        if capture is None:
            print 'Failed to retrieve image remotely from camera: {0}'.format(camera_index)
        image = None
        if capture is not None:
            width = capture[0]
            height = capture[1]
            data = capture[6]
            try:    
                image = self._convert_capture_to_image(width, height, 'RGB', data)
                #timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
                #image.save(timestamp + '.png', "PNG")
                #image.show()
            except IOError as error: 
                print 'Error saving image: {0}'.format(error)
            finally:
                self.video_proxy.releaseImage(camera)
        return image

    @staticmethod
    def _convert_capture_to_image(width, height, mode, data):
        image = None
        try:
            image = Image.frombytes(mode, (width, height), data)
        except ValueError as error:
            raise ValueError('Invalid parameter passed to image creation: {0}'.format(error))
        except TypeError as error:
            raise TypeError('Invalid type passed to image creation: {0}'.format(error))
        return image

    def set_auto_exposure(self, target_exposure, set_bottom_camera=False):
        if target_exposure < 0 or target_exposure > 3:
            raise ValueError('target_exposure must be between 0 and 3')

        camera_index = vision_definitions.kBottomCamera if set_bottom_camera else vision_definitions.kTopCamera
        success = self.video_proxy.setParameter(camera_index, vision_definitions.kCameraExposureAlgorithmID, target_exposure)

        if success:
            print 'Successfully changed camera {0} exposure to {1}'.format(camera_index, target_exposure)
        else:
            print 'Failed to change camera {0} exposure to {1}'.format(camera_index, target_exposure)
        return success

    def __del__(self):
        print 'Unsubscribing from cameras...'
        for camera_index in range(len(self.cameras)):
            camera = self.cameras[camera_index]
            if not camera is None:
                unsubscribe_success = self.video_proxy.unsubscribe(camera)
                if not unsubscribe_success:
                    print 'Failed to unsubscribe to camera: {0}'.format(camera_index)
开发者ID:lushdog,项目名称:NaoController,代码行数:82,代码来源:video_robot.py

示例14: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
#NAO Calculator
from Tkinter import * #from the Tkinter module is everything imported
from naoqi import ALProxy #from naoqi module ALProxy is imported

# ip variable
ip = "192.168.0.106" #ip address od robot
port = 9559 #port number

tts = ALProxy("ALTextToSpeech", ip, port) #proxy creation on the tts module
tts.setParameter("pitchShift", 1.0) #sets the pitch shift of Nao's voice
tts.setVolume(1.0) # sets the volume of speech


def frame(root, side): #this is a method used for placing buttons into a frame
    frame = Frame(root) #Frame is a widget used for grouping and organizing other widgets in a somehow friendly way. It works like a container, which is responsible for arranging the position of other widgets.
    frame.pack(side=side, expand=YES, fill=BOTH) #pack is  geometry manager which organizes widgets in blocks before placing them in the parent widget.
    #Side determines which side of the parent widget packs against
    #When expand is set to true, widget expands to fill any space not otherwise used in widget's parent.
    #Fill is used for filling space in X or Y or both directions
    return frame #returns a frame widget


def button(root, side, text, command=None): #method used for creating buttons
    frame = Button(root, text=text, command=command) #The Button widget is used to add buttons in a Python application. These buttons can display text or images that convey the purpose of the buttons. You can attach a function or a method to a button which is called automatically when you click the button.
    frame.pack(side=side, expand=YES, fill=BOTH) #The same as in frame method
    return frame #returns a button widget


class Calculator(Frame): #this class inherits from the Frame container widget
    def __init__(self): #contructor of the Calculator Class. It has a self attribute refering to the instance of the class
        Frame.__init__(self) #calling of the Frame contructor
开发者ID:peterhalachan,项目名称:nao,代码行数:33,代码来源:NAO+Calculator.py

示例15: Robot

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParameter [as 别名]
class Robot(ALModule):
	def __init__( self, strName, address = "bobby.local", port = 9559):
		ALModule.__init__( self, strName )

		# text to speech
		self.tts = ALProxy("ALTextToSpeech", address, port)

		# memory
		self.mem = ALProxy("ALMemory", address, port)

		# robot movement
		self.motion = ALProxy("ALMotion", address, port)
		self.pose = ALProxy("ALRobotPosture", address, port)

		# face tracking
		self.track = ALProxy("ALFaceTracker", address, port)
		# initialize face-tracking to following with head movement only
		self.track.setWholeBodyOn(False)

		# gaze analysis
		self.gaze = ALProxy("ALGazeAnalysis", address, port)
		# set highest tolerance for determining if people are looking at the robot because those people's IDs are the only ones stored
		self.gaze.setTolerance(1)
		# start writing gaze data to robot memory
		self.gaze.subscribe("_")

		# camera
		self.cam = ALProxy("ALVideoDevice", address, port)

		# leds
		self.leds = ALProxy("ALLeds", address, port)

		self.colors = {
			"pink": 0x00FF00A2,
			"red": 0x00FF0000,
			"orange": 0x00FF7300,
			"yellow": 0x00FFFB00,
			"green": 0x000DFF00,
			"blue": 0x000D00FF,
			"purple": 0x009D00FF
		}

		# sound detection
		self.sound = ALProxy("ALSoundDetection", address, port)
		# initialize sound sensitivity
		self.sound.setParameter("Sensibility", 0.95)

	def __del__(self):
		print "End Robot Class"


	def say(self, text, block = True):
		"""
		Uses ALTextToSpeech to vocalize the given string.
		If "block" argument is False, makes call asynchronous,
		i.e. does not wait until it finishes talking to move on to next part of the code.
		"""

		if block:
			self.tts.say(text)

		else:
			self.tts.post.say(text)

	def wake(self, stiffness = 1.0):
		"""
		Turns stiffnesses on, goes to Crouch position, and lifts head
		"""

		self.motion.stiffnessInterpolation("Body", stiffness, 1.0)
		self.pose.goToPosture("Crouch", 0.2)
		self.turnHead(pitch = math.radians(-10))

	def rest(self):
		"""
		Goes to Crouch position and turns robot stiffnesses off
		"""

		self.motion.rest()

	def sit(self):
		"""
		Goes to Crouch position
		"""

		self.pose.goToPosture("Crouch")

	def stand(self):
		"""
		Robot stands
		"""

		self.pose.goToPosture("Stand", 0.4)

	def turnHead(self, yaw = None, pitch = None, speed = 0.2):
		"""
		Turns robot head to the specified yaw and/or pitch in degrees at the given speed.
		Yaw can range from 119.5 deg (left) to -119.5 deg (right) and pitch can range from 38.5 deg (down) to -29.5 deg (up).
		"""

#.........这里部分代码省略.........
开发者ID:HiLT-ISpy,项目名称:robot-control,代码行数:103,代码来源:robot.py


注:本文中的naoqi.ALProxy.setParameter方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。