当前位置: 首页>>代码示例>>Python>>正文


Python ALProxy.setVolume方法代码示例

本文整理汇总了Python中naoqi.ALProxy.setVolume方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.setVolume方法的具体用法?Python ALProxy.setVolume怎么用?Python ALProxy.setVolume使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在naoqi.ALProxy的用法示例。


在下文中一共展示了ALProxy.setVolume方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: exe

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
 def exe(self, args=None, addr=None):
     
     # get proxies
     tts = ALProxy("ALTextToSpeech", Settings.naoHostName, Settings.naoPort)
     
     # set system volume
     if len(args) > 0:
         tts.setVolume( float(args[0]) )
开发者ID:Adriencerbelaud,项目名称:NAO-Communication-server,代码行数:10,代码来源:cmdSetSpeechVolume.py

示例2: setSpeechVol

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
def setSpeechVol( vol ):
	
	vol = float(vol / 100.0)
	
	tts = ALProxy( "ALTextToSpeech", config.naoIP, config.naoPort )
	print "speech volume set to " + str( int(vol * 100) ) + "%"
	try: tts.setVolume( vol )
	except: return False
	
	return True
开发者ID:hanneseilers,项目名称:NAOStory,代码行数:12,代码来源:command.py

示例3: TouchMeToSpeakModule

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
class TouchMeToSpeakModule(ALModule):
	# Declaration de methode.
	def __init__(self, name):
		ALModule.__init__(self, name)
		
		# Instanciation d'un objet tts de classe ALTextToSpeech.
		self.tts = ALProxy("ALTextToSpeech")

		# Variable d'instance.
		global memory		
		# Instanciation d'un objet memory de classe ALMemory.
		memory = ALProxy("ALMemory")
		# Appel de la methode subsribeToEvent...
		memory.subscribeToEvent("FrontTactilTouched", # Sur cet evenement...
								"TouchMeToSpeak",	  # ...de cet instance...
								"onTouched")		  # ...declancher l'appel
													  # ...de cette methode.

	# Methode appellee sur l'evenement.
	def onTouched(self, *_args):
		# Suppression de l'attente d'evenement...
		# ... Pour eviter un conflit ?
		memory.unsubscribeToEvent("FrontTactilTouched",
								  "TouchMeToSpeak")

		# *** Action principale de la methode ***
		# Ici Nao dit la chaine en parametre de la methode say	
		global etat
		if etat == 1:
			self.tts.setLanguage("french")
			self.tts.setVolume(0.2)
			self.tts.say("Je suis dans le premier état")
			etat = 2
		elif etat == 2:
			self.tts.setLanguage("french")
			self.tts.setVolume(0.2)
			self.tts.say("Je suis dans le second état")
			etat = 1
		
		# ***************************************

		# On se reinscrit a l'evenement.
		memory.subscribeToEvent("FrontTactilTouched",
								 "TouchMeToSpeak",
								 "onTouched")
开发者ID:NSenaud,项目名称:Reco-Follow,代码行数:47,代码来源:main.py

示例4: NaoSpeak

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
class NaoSpeak(ALModule):
    """docstring for NaoSpeak"""
    def __init__(self, name, message):
    
        ALModule.__init__(self, name)

        self.message = message

        self.logs = logs.logs()
        self.tts = ALProxy("ALTextToSpeech")
        self.logs.display("Subscribed to an ALTextToSpeech proxy",
                          "Good")

        

    def say(self, message, volume=0.3, language='french'):
        self.tts.setLanguage(language)
        self.tts.setVolume(volume)
        self.tts.say(message)
开发者ID:NSenaud,项目名称:NaoChallengeProject,代码行数:21,代码来源:ihm.py

示例5: __init__

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
class wsNaoAudio:
  _robotip=""
  _audio=None
  _speech=None
  def __init__(self,ip="192.168.143.101"):
    self._robotip=ip
    self._audio=ALProxy("ALAudioPlayer",self._robotip,9559)
    self._speech=ALProxy("ALTextToSpeech",self._robotip, 9559)
    pass
  def say(self,s):
    self._speech.post.say(s)
  def setVolume(self,n):
    if n>1.0:n=1
    if n<0.0:n=0
    self._speech.setVolume(n)
  def setLanguage(self,l):
    pass
  def playMusic(self,s):
    self._audio.post.playFile(s)
  def stopMusic(self):
    self._audio.stopAll()
开发者ID:winxos,项目名称:nao,代码行数:23,代码来源:wsNaoAudio.py

示例6: int

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
robotPORT = int(_port_)

##############################################################################
## Creating some Python proxies

try:
    from naoqi import ALProxy
except Exception, e:
    cprint("Error when importing naoqi Python module", "white", "on_red")
    print str(e)
    os._exit(1)

try:
    TTS = ALProxy("ALTextToSpeech", robotIP, robotPORT)
    TTS.setLanguage(NAOQI_LANG)
    TTS.setVolume(NAOQI_VOLUME)
except Exception, e:
    cprint("Error when creating ALTextToSpeech proxy", "white", "on_red")
    print str(e)
    os._exit(1)

try:
    MOTION = ALProxy("ALMotion", robotIP, robotPORT)
    MOTION.stiffnessInterpolation("Body", 1.0, 1.0)
except Exception, e:
    cprint("Error when creating ALMotion proxy", "white", "on_red")
    print str(e)
    os._exit(1)

try:
    POSTURE = ALProxy("ALRobotPosture", robotIP, robotPORT)
开发者ID:crbothe,项目名称:naoyadtk,代码行数:33,代码来源:naoqi.py

示例7: walk_to_position

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
        if postureProxy.getPosture() != "Stand":
            postureProxy.goToPosture("Stand", 1.0)

        walk_to_position(-1.0, 0.0, 0.0, 1)

    elif 'walk' in '{}'.format(string):
        if postureProxy.getPosture() != "Stand":
            postureProxy.goToPosture("Stand", 1.0)
        walk_to_position(0.2, 0.3)
    elif 'row' in '{}'.format(string):
        if env.global_rowing == False:
            env.global_rowing = True
            rowing = True
        else:
            volume = tts.getVolume()
            tts.setVolume(1)
            tts.say("Fight the Power")
            tts.setVolume(volume)



    if not rowing:
        env.global_rowing = False

    # else:
    #     from pygoogle import pygoogle
    #     g = pygoogle('{}'.format(string))
    #     g.pages = 1
    #     print '*Found %s results*' % (g.get_result_count())
    #     import pdb; pdb.set_trace()
    #     g.get_urls()
开发者ID:damintu,项目名称:NaoqiRecognition,代码行数:33,代码来源:actions.py

示例8: emotional_speech_module

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
class emotional_speech_module(ALModule):
    """ A simple module to change speech parameters dependent on emotions.

    """
    def __init__(self, name):
        ALModule.__init__(self, name)

        # Create proxies for the instance.
        self.tts = ALProxy("ALTextToSpeech")
        

        # Run behaviour when a tactile touched.
        global memory
        memory = ALProxy("ALMemory")
        memory.subscribeToEvent("TouchChanged", self.getName(), "emotive_speech")

    def emotive_speech(self, *_args):
        """ Change speech parameters dependent on the emotion.
            Change volume, speed, pitch & insert spaces.
                        
        """

        memory.unsubscribeToEvent("TouchChanged", self.getName())
        
        # Speech parameter lookup table. Format (pitch modifier, volume modifier)
        speech_parameter_lookup_table = [((1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00)),
                                        ((1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00)),
                                        ((1.00,0.75),(0.81,0.75),(0.00,0.00),(0.00,0.00),(-0.25,0.00),(0.50,1.00),(0.62,0.50),(0.75,),(0.75,),(0.75,0.75),(1.00,0.75)),
                                        ((1.00,0.50),(0.63,0.50),(-0.20,-0.50),(-1.00,-1.00),(-0.25,-0.50),(0.25,0.50),(0.25,0.50),(0.50,),(0.50,0.50),(0.50,0.50),(0.00,0.50)),
                                        ((1.00,0.25),(0.44,0.25),(0.40,-0.50),(0.30,-0.50),(0.25,-0.50),(0.25,0.00),(0.25,0.00),(0.25,0.25),(0.25,0.25),(0.25,0.25),(0.00,0.25)),
                                        ((1.00,0.00),(0.25,0.00),(0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.10,0.00),(0.10,0.00),(0.10,0.00),(0.00,0.00)),
                                        ((0.25,-0.25),(0.06,-0.25),(-0.10,-0.25),(-0.20,0.00),(-0.20,0.00),(-0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.25,-0.50),(-0.13,-0.50),(-0.35,-0.50),(-0.20,-0.25),(-0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.25,-0.75),(-0.31,-0.75),(-0.35,-0.75),(-0.10,-0.50),(-0.10,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.50,-1.00),(-0.50,-1.00),(-0.40,-1.00),(-0.20,-0.75),(-0.10,-0.50),(0.00,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.50,-1.00),(-0.50,-1.00),(-0.50,-1.00),(-0.25,-0.75),(0.00,-0.50),(0.00,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00))]
        
        # The pitch and volume modifier values need scaled, final value to be determined. e.g. a value of 4 will divide the parameter by 4 to give a +/- of 25% of the default value
        speech_parameter_scaling_value = 4
        
        current_emotion = memory.getData("Emotion/Current")
        valence = current_emotion[0][0]
        arousal = current_emotion[0][1]
        valence_index = (int(valence * 5) + 5)
        arousal_index = 10 - (int(arousal * 5) + 5)
        scaled_pitch_modifier = 1 + (speech_parameter_lookup_table[arousal_index][valence_index][0] / speech_parameter_scaling_value)
        # NAO can only increase pitch! So need to check if a pitch reduction required and negate it. Range 1.0 - 4.0.
        if scaled_pitch_modifier < 1.0:
            scaled_pitch_modifier = 1.0
        # NAO volume (gain) range 0.0 - 1.0.
        scaled_volume_modifier = 0.5 + (speech_parameter_lookup_table[arousal_index][valence_index][1] / speech_parameter_scaling_value)
       
        print speech_parameter_lookup_table[arousal_index][valence_index][0]
        print speech_parameter_lookup_table[arousal_index][valence_index][1]
        print scaled_pitch_modifier
        print scaled_volume_modifier
        
        self.tts.setParameter("pitchShift", scaled_pitch_modifier)
        self.tts.setVolume(scaled_volume_modifier)
        self.tts.say("What is going on, I can't feel my feet!")

        # Reset all values to default.
        self.tts.setParameter("pitchShift", 0)
        self.tts.setVolume(0.5)

        time.sleep(1.0)


        memory.subscribeToEvent("TouchChanged", self.getName(), "emotive_speech")
开发者ID:Sandy4321,项目名称:nao-emotional-framework,代码行数:71,代码来源:emotional_speech_prototype.py

示例9: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
import rps_main as rm
import img_orient as io
import time
from naoqi import ALProxy

IP = "192.168.10.151"
PORT = 9559

tts = ALProxy("ALTextToSpeech", IP, PORT)
#set the address of the nao ip address in img_orient
tts.setVolume(1)

while True:
    str = rm.rps_classify()

    #nothing was found, dont run the rest of the app
    if str == 'nothing':
        continue

    #we have found something! say it
    #we can just do tts.say(str), but the nao is set to dutch
    #we do a translation here as the strings are englishs
    if str == rm.directories[0]:
        tts.say("steen")
    elif str == rm.directories[1]:
        tts.say("papier")
    elif str == rm.directories[2]:
        tts.say("schaar")
    #elif str == rm.directories[3]:
    #    tts.say("Groen")
开发者ID:shootout,项目名称:dsrc,代码行数:32,代码来源:nao_app.py

示例10: emotional_demo_module

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
class emotional_demo_module(ALModule):
    """ A simple module to change the eye LEDs colour to represent emotions.

    """
    def __init__(self, name):
        ALModule.__init__(self, name)

        # Create proxies for the instance.
        global memory
        memory = ALProxy("ALMemory")

        self.tts = ALProxy("ALTextToSpeech")
        self.leds = ALProxy("ALLeds")
        self.motion = ALProxy("ALMotion")

        # Write empty valence and arousal values to memory.
        valence = 0
        arousal = 0
        param1 = 'null'
        current_emotion = [(valence, arousal), ("valence_mood", "arousal_mood"), ("personality"), (param1, "param2")]
        memory.insertData("Emotion/Current", current_emotion)

        # Disable ALAutonomousLife to better demonstrate emotional actions.
        self.autonomous_life = ALProxy("ALAutonomousLife")
        if (self.autonomous_life.getState() != "disabled"):
            self.autonomous_life.setState("disabled")
        time.sleep(1.0)
        self.motion.wakeUp()

        # Run behaviour when a tactile touched.
        memory.subscribeToEvent("VAChanged", self.getName(), "express_current_emotion")

    def express_current_emotion(self, *_args):
        """ Expresses the current emotion from the current valence and arousal values in ALMemory.
                        
        """

        # SETUP
        # Events.
        memory.unsubscribeToEvent("VAChanged", self.getName())

        # Motion.
        motion_names = list()
        motion_times = list()
        motion_keys = list()
        
        # Eyes.
        eye_colour_lookup_table = [[(0xF82C35),(0xF82C35),(0xD55528),(0xD55528),(0xFF622B),(0xFF622B),(0xFFB047),(0xFFB047),(0xFFB047),(0xFFB047),(0xFFB047)],
                                [(0xF82C35),(0xF82C35),(0xD5542A),(0xD5542A),(0xE96A37),(0xFF8232),(0xFF8232),(0xFEB340),(0xFEB340),(0xFEB340),(0xFFFF00)],
                                [(0xF62D35),(0xF62D35),(0xF62D35),(0xE96A37),(0xE96A37),(0xFF984D),(0xFF8232),(0xFDC147),(0xFFB144),(0xFFFF00),(0xFFFF00)],
                                [(0xF72C32),(0xF72C32),(0xFF4048),(0xFE5761),(0xED8659),(0xFEB278),(0xFECE6A),(0xFECE6A),(0xFEE566),(0xFFFF00),(0xFFFF00)],
                                [(0xF6255C),(0xF6255C),(0xF9386F),(0xFD585E),(0xF78C84),(0xFFB379),(0xFEDEA1),(0xFEE67C),(0xFFE564),(0xFFFF00),(0xFFFF00)],
                                [(0xF6255C),(0xF93871),(0xF93871),(0xFE9EB9),(0xFE9EB9),(0xFFFFFF),(0xD0E7B3),(0xA5D277),(0x85B957),(0x6EAB34),(0x6EAB34)],
                                [(0xA82C72),(0xA82C72),(0xC03381),(0xDB5CA1),(0xE8A1C3),(0xD1E5F0),(0xCFDADE),(0x73B8B3),(0x87B958),(0x6EAB34),(0x6EAB34)],
                                [(0xA82C72),(0xA82C72),(0xC03381),(0x9C3F74),(0xB36893),(0xD1E4F2),(0x91C3E6),(0x91C3E6),(0x219A95),(0x00948E),(0x6BAC34)],
                                [(0xA82C72),(0xA82C72),(0x86305D),(0x86305D),(0x94C8D6),(0x93C8D8),(0x92C2E6),(0x3196CE),(0x009591),(0x009591),(0x009591)],
                                [(0xA62D72),(0x692850),(0x692850),(0x692850),(0x2D9DB1),(0x2C9FB2),(0x2F96CE),(0x0085BE),(0x00968D),(0x00968D),(0x00968D)],
                                [(0x692850),(0x692850),(0x692850),(0x692850),(0x037F9B),(0x037F9B),(0x0085BE),(0x0085BE),(0x0085BE),(0x0085BE),(0x0085BE)]
                                ]

        # Speech.
        # Speech parameter lookup table. Format (pitch modifier, volume modifier)
        speech_parameter_lookup_table = [((1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00)),
                                        ((1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00),(1.00,1.00)),
                                        ((1.00,0.75),(0.81,0.75),(0.00,0.00),(0.00,0.00),(-0.25,0.00),(0.50,1.00),(0.62,0.50),(0.75,),(0.75,),(0.75,0.75),(1.00,0.75)),
                                        ((1.00,0.50),(0.63,0.50),(-0.20,-0.50),(-1.00,-1.00),(-0.25,-0.50),(0.25,0.50),(0.25,0.50),(0.50,),(0.50,0.50),(0.50,0.50),(0.00,0.50)),
                                        ((1.00,0.25),(0.44,0.25),(0.40,-0.50),(0.30,-0.50),(0.25,-0.50),(0.25,0.00),(0.25,0.00),(0.25,0.25),(0.25,0.25),(0.25,0.25),(0.00,0.25)),
                                        ((1.00,0.00),(0.25,0.00),(0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.10,0.00),(0.10,0.00),(0.10,0.00),(0.00,0.00)),
                                        ((0.25,-0.25),(0.06,-0.25),(-0.10,-0.25),(-0.20,0.00),(-0.20,0.00),(-0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.25,-0.50),(-0.13,-0.50),(-0.35,-0.50),(-0.20,-0.25),(-0.10,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.25,-0.75),(-0.31,-0.75),(-0.35,-0.75),(-0.10,-0.50),(-0.10,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.50,-1.00),(-0.50,-1.00),(-0.40,-1.00),(-0.20,-0.75),(-0.10,-0.50),(0.00,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00)),
                                        ((-0.50,-1.00),(-0.50,-1.00),(-0.50,-1.00),(-0.25,-0.75),(0.00,-0.50),(0.00,-0.25),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00),(0.00,0.00))]
                      
        # CALCULATIONS
        # Get current emotional values and generic calcs.
        current_emotion = memory.getData("Emotion/Current")
        print "current_emotion (module): ", current_emotion
        valence = current_emotion[0][0]
        arousal = current_emotion[0][1]
        emotion_name = current_emotion[3][0]
        # Valence and arousal are normalised between -1 and 1, with an axis intersection at (0, 0). Convert axis intersection
        # to index.
        valence_index = (int(valence * 5) + 5)
        arousal_index = 10 - (int(arousal * 5) + 5)

        # Speech.
        # The pitch and volume modifier values need scaled, final value to be determined. e.g. a value of 4 will divide the parameter by 4 to give a +/- of 25% of the default value
        speech_parameter_scaling_value = 4
        string_to_say = "I am feeling " + emotion_name
        scaled_pitch_modifier = 1 + (speech_parameter_lookup_table[arousal_index][valence_index][0] / speech_parameter_scaling_value)
        # NAO can only increase pitch! So need to check if a pitch reduction required and negate it. Range 1.0 - 4.0.
        if scaled_pitch_modifier < 1.0:
            scaled_pitch_modifier = 1.0
        # NAO volume (gain) range 0.0 - 1.0.
        scaled_volume_modifier = 0.5 + (speech_parameter_lookup_table[arousal_index][valence_index][1] / speech_parameter_scaling_value)
        self.tts.setParameter("pitchShift", scaled_pitch_modifier)
        self.tts.setVolume(scaled_volume_modifier)
        
        # Eyes.        
#.........这里部分代码省略.........
开发者ID:Sandy4321,项目名称:nao-emotional-framework,代码行数:103,代码来源:standalone_emotional_demo.py

示例11: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
				motionProxy.moveTo(0.0, 0.0, 10*almath.TO_RAD)
			elif key == 101:
				motionProxy.moveTo(0.0, 0.0, -10*almath.TO_RAD)			
			else:
				key = 0
	
		
if __name__ == "__main__":
	IP = "172.20.12.26"
	PORT = 9559
	tts = ALProxy("ALTextToSpeech", IP, PORT)
	camProxy = ALProxy("ALVideoDevice", IP, PORT)
	motionProxy = ALProxy("ALMotion", IP, PORT)
	post = ALProxy("ALRobotPosture", IP, PORT)
	tts.setLanguage("English")
	tts.setVolume(1.0)
	
	post.goToPosture("Stand", 1.0)
	time.sleep(1.0)
	tts.say("S C V Ready !")
	tracker = Tracker()

	while True:
		command = raw_input()
		if command=='sp':
			liveSpeak()
		if command=='im':
			showNaoImage()
		if command=='oh':
			itsyou()
		if command=='dep':
开发者ID:benefije,项目名称:DHE,代码行数:33,代码来源:nao_live.py

示例12:

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
led.createGroup('rGroup', right)
led.off('FaceLeds')

##################################################
############## FINAL INITIALIZATION ##############
##################################################

#### Video Init
#initialiaze cnt
init_img = camProxy.getImageRemote(videoClient)
init_im = Image.fromstring("RGB", (init_img[0], init_img[1]), init_img[6])
init_frame = np.array(init_im)
cnt = init_frame[0]

#### speech and posture init
tts.setVolume(.2)
tts.say("Initiating")
postureProxy.goToPosture("Crouch", 0.8)
postureProxy.goToPosture("StandInit", 0.8)
motionProxy.setAngles(["HeadYaw", "HeadPitch"],[-0.1, 0.5], 0.2)

#### Boolean Init
stillWorking = True
commandGiven = True
sidekick = True
kicking = False
t = True
##################################################
############## PROGRAM ###########################
##################################################
i = 0
开发者ID:brandonstrong,项目名称:school,代码行数:33,代码来源:Naoqi+Penalty+Kick.py

示例13: speech

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
                
                    #lookAndAskForFeedback("How about now?");
        
### --------------------------------------------------------------- MAIN
firstStrokeOfMessage = True;
shapeFinished = False;
if __name__ == "__main__":
    #listen for user-drawn shapes
    shape_subscriber = rospy.Subscriber(USER_DRAWN_SHAPES_TOPIC, Path, read_traj_msg); 
    
    if(naoConnected):
        from naoqi import ALBroker, ALProxy
        #start speech (ROS isn't working..)
        port = 9559;
        myBroker = ALBroker("myBroker", #I'm not sure that pyrobots doesn't already have one of these open called NAOqi?
            "0.0.0.0",   # listen to anyone
            0,           # find a free port and use it
            NAO_IP,      # parent broker IP
            port)        # parent broker port
        textToSpeech = ALProxy("ALTextToSpeech", NAO_IP, port)   
        textToSpeech.setLanguage('English')
        textToSpeech.setVolume(0.2);
        if(naoWriting):
            nao.setpose("StandInit");
            [temp,joints_standInit] = nao.execute([naoqi_request("motion","getAngles",["RArm",True])]);
            nao.execute([naoqi_request("motion","wbEnableEffectorControl",[effector,True])])
        
    print('Waiting for message to write');

    rospy.spin();
开发者ID:dhood,项目名称:nao_ros_cowriter,代码行数:32,代码来源:message_echoer_nao.py

示例14: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setVolume [as 别名]
#NAO Calculator
from Tkinter import * #from the Tkinter module is everything imported
from naoqi import ALProxy #from naoqi module ALProxy is imported

# ip variable
ip = "192.168.0.106" #ip address od robot
port = 9559 #port number

tts = ALProxy("ALTextToSpeech", ip, port) #proxy creation on the tts module
tts.setParameter("pitchShift", 1.0) #sets the pitch shift of Nao's voice
tts.setVolume(1.0) # sets the volume of speech


def frame(root, side): #this is a method used for placing buttons into a frame
    frame = Frame(root) #Frame is a widget used for grouping and organizing other widgets in a somehow friendly way. It works like a container, which is responsible for arranging the position of other widgets.
    frame.pack(side=side, expand=YES, fill=BOTH) #pack is  geometry manager which organizes widgets in blocks before placing them in the parent widget.
    #Side determines which side of the parent widget packs against
    #When expand is set to true, widget expands to fill any space not otherwise used in widget's parent.
    #Fill is used for filling space in X or Y or both directions
    return frame #returns a frame widget


def button(root, side, text, command=None): #method used for creating buttons
    frame = Button(root, text=text, command=command) #The Button widget is used to add buttons in a Python application. These buttons can display text or images that convey the purpose of the buttons. You can attach a function or a method to a button which is called automatically when you click the button.
    frame.pack(side=side, expand=YES, fill=BOTH) #The same as in frame method
    return frame #returns a button widget


class Calculator(Frame): #this class inherits from the Frame container widget
    def __init__(self): #contructor of the Calculator Class. It has a self attribute refering to the instance of the class
        Frame.__init__(self) #calling of the Frame contructor
开发者ID:peterhalachan,项目名称:nao,代码行数:33,代码来源:NAO+Calculator.py


注:本文中的naoqi.ALProxy.setVolume方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。