本文整理汇总了Python中naoqi.ALProxy.stopMicrophonesRecording方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.stopMicrophonesRecording方法的具体用法?Python ALProxy.stopMicrophonesRecording怎么用?Python ALProxy.stopMicrophonesRecording使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类naoqi.ALProxy
的用法示例。
在下文中一共展示了ALProxy.stopMicrophonesRecording方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: stt
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import stopMicrophonesRecording [as 别名]
def stt():
speech_client = speech.Client.from_service_account_json('/home/nao/ourcodes/MyFirstProject-47fa9e048ac2.json')
tts = ALProxy("ALTextToSpeech","localhost",9559)
tts.resetSpeed()
channels = []
channels.append(0)
channels.append(0)
channels.append(1)
channels.append(0)
rec = ALProxy("ALAudioRecorder","localhost",9559)
leds = ALProxy("ALLeds","localhost",9559)
rec.startMicrophonesRecording("/home/nao/ourcodes/test.wav", "wav", 16000, channels)
leds.rotateEyes(0x000000FF,1,5)
rec.stopMicrophonesRecording()
leds.on("FaceLeds")
with open("/home/nao/ourcodes/test.wav", 'rb') as audio_file:
content = audio_file.read()
audio_sample = speech_client.sample(
content=content,
source_uri=None,
encoding='LINEAR16',
sample_rate=16000)
try:
alternatives = speech_client.speech_api.sync_recognize(audio_sample,language_code='en-IN')
return (str(alternatives[0].transcript))
except ValueError:
return ""
示例2: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import stopMicrophonesRecording [as 别名]
def main(robotIP, PORT = 9559):
recorderProxy = ALProxy("ALAudioRecorder", robotIP, PORT)
recorderProxy.stopMicrophonesRecording()
print("recorded")
# configure channels
# left, right, front rear (mics?)
channels = (0, 0, 1, 0); # python tuple, C++ code uses AL:ALValue
recorderProxy.startMicrophonesRecording("~/ICRL/nao-setup/test.wav", "wav", 16000, channels)
# continue recording for 10 seconds
time.sleep(10)
# stop recording
recorderProxy.stopMicrophonesRecording()
示例3: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import stopMicrophonesRecording [as 别名]
def main(robot_IP, robot_PORT=9559):
global tts, audio, record, aup
# ----------> Connect to robot <----------
tts = ALProxy("ALTextToSpeech", robot_IP, robot_PORT)
audio = ALProxy("ALAudioDevice", robot_IP, robot_PORT)
record = ALProxy("ALAudioRecorder", robot_IP, robot_PORT)
aup = ALProxy("ALAudioPlayer", robot_IP, robot_PORT)
# ----------> recording <----------
print 'start recording...'
record_path = '/home/nao/record.wav'
record.startMicrophonesRecording(record_path, 'wav', 16000, (0,0,1,0))
time.sleep(10)
record.stopMicrophonesRecording()
print 'record over'
# ----------> playing the recorded file <----------
fileID = aup.playFile(record_path, 0.7, 0)
示例4: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import stopMicrophonesRecording [as 别名]
def main(robot_IP, robot_PORT = 9559):
global tts, audio, record, aup
# ----------> Connect to robot <----------
sd = ALProxy("ALSoundDetection", robot_IP, robot_PORT)
tts = ALProxy("ALTextToSpeech", robot_IP, robot_PORT)
audio = ALProxy("ALAudioDevice", robot_IP, robot_PORT)
record = ALProxy("ALAudioRecorder", robot_IP, robot_PORT)
aup = ALProxy("ALAudioPlayer", robot_IP, robot_PORT)
mem = ALProxy('ALMemory', robot_IP, robot_PORT)
print(mem.getDataListName())
# ----------> recording <----------
print 'start recording...'
sd.setParameter("Sensibility", 0.9)
audio.openAudioInputs()
record_path = '/home/nao/audio/wista.wav'
# noise_output = wave.open('sample.wav', 'w')
# noise_output.setparams((1, 4, 16000, 0, 'NONE', 'not compressed'))
#(nchannels, sampwidth, framerate, nframes, comptype, compname)
# kanaly, szerokosc probki, czestotliwosc wyswietlania klatek, ilosc ramek
# noise_output = wave.open('file.wav', 'w')
record.startMicrophonesRecording('wista.wav', 'wav', 16000, (1, 0, 0, 0))
print("start!!!")
time.sleep(35)
print("stop!!!")
record.stopMicrophonesRecording()
# record_to_read = aup.playFile('/home/nao/audio/wista.wav', 0.1, 0)
#
r = sr.Recognizer()
with sr.AudioFile('audio/wista.wav') as source:#sr.Microphone()
try:
audio = r.record(source)# read the entire audio file
print("You said " + r.recognize_sphinx(audio))
except sr.UnknownValueError:
tts.say("sorry")#"I don't understand you, sorry! ")
示例5: ALProxy
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import stopMicrophonesRecording [as 别名]
elif 'sit' in '{}'.format(string) and postureProxy is not None:
postureProxy.goToPosture("Sit", 1.0)
elif 'face' in '{}'.format(string):
print "test1"
tts = ALProxy("ALFaceDetection", env.nao_ip, env.nao_port)
tts.enableTracking(True)
tts.learnFace("damien")
ALProxy("ALPhotoCaptureProxy", env.nao_ip, env.nao_port).takePicture()
print "test"
elif 'record' in '{}'.format(string) or 'recall' in '{}'.format(string):
print "yolo bitch!"
audioProxy = ALProxy("ALAudioRecorder", env.nao_ip, env.nao_port)
audioProxy.startMicrophonesRecording("/home/nao/test.wav", "wav", 16000)
time.sleep(15)
audioProxy.stopMicrophonesRecording()
print "done bitch!"
elif 'moonwalk' in '{}'.format(string):
if postureProxy.getPosture() != "Stand":
postureProxy.goToPosture("Stand", 1.0)
walk_to_position(-1.0, 0.0, 0.0, 1)
elif 'walk' in '{}'.format(string):
if postureProxy.getPosture() != "Stand":
postureProxy.goToPosture("Stand", 1.0)
walk_to_position(0.2, 0.3)
elif 'row' in '{}'.format(string):
if env.global_rowing == False:
env.global_rowing = True
示例6: MainWindow
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import stopMicrophonesRecording [as 别名]
#.........这里部分代码省略.........
self.sonarProxy = ALProxy("ALSonar", self.ip.get(), self.port.get())
self.isConnected = True
self.label.config(text='Ready')
# connecting with robot failed
except:
self.isConnected = False
self.label.config(text='Not connected')
def switch_camera(self):
""" Change the recording device on the robot """
# switch camera if connected
if self.isConnected and not self.isRecordingVideo:
self.videoRecorderProxy.setCameraID(1 - self.videoRecorderProxy.getCameraID())
self.camera_label.config(text=self.camera_dict[self.videoRecorderProxy.getCameraID()])
def switch_audio(self):
""" Change the format of audio recording
.ogg is a single channel recording from the front microphone
.wav (default) is a 4-channel recording from all microphones
"""
if not self.isRecordingAudio:
self.audio_id = 1 - self.audio_id
self.audio_label.config(text=self.audio_dict[self.audio_id])
def start(self):
""" Start recording if connected """
if not self.isConnected:
self.label.config(text='Not connected')
return
# use timestamped filenames
filename = time.strftime("%Y%m%d_%H%M%S")+"_"+self.video_label.get()
filename_audio = filename+self.audio_dict[self.audio_id]
# start recording
self.videoRecorderProxy.startRecording("/home/nao/recordings/", filename)
self.isRecordingVideo = True
self.audioRecorderProxy.startMicrophonesRecording("/home/nao/recordings/"+filename_audio)
self.isRecordingAudio = True
self.label.config(text='Recording')
if self.recordSonar.get():
self.log_sonar = open('./sensor_readings/'+filename+'_sonar.txt', 'w')
self.isRecordingSonar = True
self.sonarProxy.subscribe("myApp")
if self.recordTactile.get():
self.isRecordingTactile = True
self.log_tactile = open('./sensor_readings/'+filename+'_tactile.txt', 'w')
self.time_start = time.time()
def stop(self):
""" Stop recording if connected and already recording """
if not self.isConnected:
self.label.config(text='Not connected')
return
if self.isRecordingVideo:
self.videoRecorderProxy.stopRecording()
self.isRecordingVideo = False
if self.isRecordingAudio:
self.audioRecorderProxy.stopMicrophonesRecording()
self.isRecordingAudio = False
if not self.isRecordingAudio and not self.isRecordingVideo:
self.label.config(text='Recording stopped')
if self.isRecordingSonar:
self.isRecordingSonar = False
self.log_sonar.close()
self.sonarProxy.unsubscribe("myApp")
if self.isRecordingTactile:
self.isRecordingTactile = False
self.log_tactile.close()
def close(self):
""" Stop recording and close the program """
if self.isRecordingVideo:
self.videoRecorderProxy.stopRecording()
if self.isRecordingAudio:
self.audioRecorderProxy.stopMicrophonesRecording()
self.master.destroy()
def to_do(self):
""" Do stuff while in the TK main loop """
time_stamp = time.time()-self.time_start
if self.isRecordingSonar:
val_left = self.memoryProxy.getData("Device/SubDeviceList/US/Left/Sensor/Value")
val_right = self.memoryProxy.getData("Device/SubDeviceList/US/Right/Sensor/Value")
self.log_sonar.write(str(time_stamp)+','+str(val_right)+','+str(val_left)+'\n')
if self.isRecordingTactile:
val_left1 = str(self.memoryProxy.getData("HandRightLeftTouched"))
val_left2 = str(self.memoryProxy.getData("HandRightBackTouched"))
val_left3 = str(self.memoryProxy.getData("HandRightRightTouched"))
val_right1 = str(self.memoryProxy.getData("HandLeftLeftTouched"))
val_right2 = str(self.memoryProxy.getData("HandLeftBackTouched"))
val_right3 = str(self.memoryProxy.getData("HandLeftRightTouched"))
val_head1 = str(self.memoryProxy.getData("FrontTactilTouched"))
val_head2 = str(self.memoryProxy.getData("MiddleTactilTouched"))
val_head3 = str(self.memoryProxy.getData("RearTactilTouched"))
self.log_tactile.write(str(time_stamp)+',' +
val_left1 + ',' + val_left2 + ','+val_left3 + ',' +
val_right1 + ',' + val_right2 + ',' + val_right3 + ',' +
val_head1 + ',' + val_head2 + ',' + val_head3 + '\n')
self.master.after(10, self.to_do)
示例7: ALProxy
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import stopMicrophonesRecording [as 别名]
import sys
import time
from naoqi import ALProxy
IP = "nao.local"
PORT = 9559
if (len(sys.argv) < 2):
print "Usage: 'python RecordAudio.py nume'"
sys.exit(1)
fileName = "/home/nao/" + sys.argv[1] + ".wav"
aur = ALProxy("ALAudioRecorder", IP, PORT)
channels = [0,0,1,0]
aur.startMicrophonesRecording(fileName, "wav", 160000, channels)
c=raw_input("Sfarsit?")
aur.stopMicrophonesRecording()
c=raw_input("play?")
aup = ALProxy("ALAudioPlayer", IP, PORT)
#Launchs the playing of a file
aup.playFile(fileName,0.5,-1.0)
c=raw_input("gata?")
#Launchs the playing of a file
#aup.playFile("/usr/share/naoqi/wav/random.wav")
#Launchs the playing of a file on the left speaker to a volume of 50%
#aup.playFile("/usr/share/naoqi/wav/random.wav",0.5,-1.0)