本文整理汇总了Python中naoqi.ALProxy.setParam方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.setParam方法的具体用法?Python ALProxy.setParam怎么用?Python ALProxy.setParam使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类naoqi.ALProxy
的用法示例。
在下文中一共展示了ALProxy.setParam方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
def init(IP,PORT):
global motionProxy
global tts
global post
global sonarProxy
global memoryProxy
global cameraProxy
global videoClient
post = ALProxy("ALRobotPosture", IP, PORT)
tts = ALProxy("ALTextToSpeech", IP, PORT)
motionProxy = ALProxy("ALMotion", IP, PORT)
cameraProxy = ALProxy("ALVideoDevice", IP, PORT)
# init video
resolution = 0 # 0 : QQVGA, 1 : QVGA, 2 : VGA
colorSpace = 11 # RGB
camNum = 0 # 0:top cam, 1: bottom cam
fps = 1; # frame Per Second
cameraProxy.setParam(18, camNum)
try:
videoClient = cameraProxy.subscribe("python_client",
resolution, colorSpace, fps)
except:
cameraProxy.unsubscribe("python_client")
videoClient = cameraProxy.subscribe("python_client",
resolution, colorSpace, fps)
print "Start videoClient: ",videoClient
sonarProxy = ALProxy("ALSonar", IP, PORT)
sonarProxy.subscribe("myApplication")
memoryProxy = ALProxy("ALMemory", IP, PORT)
post.goToPosture("Crouch", 1.0)
time.sleep(2)
示例2: video
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
class video(ALModule):
"""take a video."""
def __init__(self, IP="nao.local", PORT=9559, CameraID=0, parent=None):
"""
Initialization.
"""
self._IP = IP
self._PORT = PORT
self._CameraID = CameraID
self.counter = 0
# Proxy to ALVideoDevice.
self._videoProxy = None
# Our video module name.
self._imgClient = ""
# This will contain this alImage we get from Nao.
self._alImage = None
self._image = []
self._registerImageClient(self._IP, self._PORT)
def _registerImageClient(self, IP, PORT):
"""Register our video module to the robot."""
self._videoProxy = ALProxy("ALVideoDevice", self._IP, self._PORT)
resolution = vision_definitions.kQVGA # 320 * 240
colorSpace = vision_definitions.kRGBColorSpace
self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, 30)
# Select camera.
self._videoProxy.setParam(vision_definitions.kCameraSelectID,
self._CameraID)
def _unregisterImageClient(self):
"""Unregister our naoqi video module."""
if self._imgClient != "":
self._videoProxy.unsubscribe(self._imgClient)
def _updateImage(self):
"""Retrieve a new image from Nao."""
# print "--------------------new image--------------------"
self._alImage = self._videoProxy.getImageRemote(self._imgClient)
imageWidth = self._alImage[0]
imageHeight = self._alImage[1]
array = self._alImage[6]
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
return im
def timerEvent(self, event):
"""evenement lancé par startTimer"""
self._updateImage()
def __del__(self):
"""Destructeur de l'objet, coupe le flux vidéo"""
self._unregisterImageClient()
示例3: NaoVision
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
class NaoVision():
def __init__(self):
rospy.init_node("nao_vision")
# attempt to get the host and port
try:
host = rospy.get_param("/naoqi/host")
except KeyError:
rospy.logerr("Unable to find parameter /naoqi/host")
exit(1)
try:
port = rospy.get_param("/naoqi/port")
except KeyError:
rospy.logerr("Unable to find parameter /naoqi/port")
exit(1)
#connect to the Nao
try:
self.nao_cam = ALProxy("ALVideoDevice", host, port)
except Exception:
rospy.logerr("Unable to create vision proxy.")
exit(1)
# get any optional parameters
resolution = rospy.get_param("~resolution", 1)
cam = rospy.get_param("~camera", 0)
#setup the camera stream
self.proxy_name = self.nao_cam.subscribe("nao_image", resolution, kRGBColorSpace, 30)
self.nao_cam_pub = rospy.Publisher("nao_camera", Image)
self.nao_cam.setParam(kCameraSelectID, cam)
rospy.loginfo("NAO Vision Node Initialized")
def publish_image(self):
# get the image from the Nao
img = self.nao_cam.getImageRemote(self.proxy_name)
# copy the data into the ROS Image
ros_img = Image()
ros_img.width = img[0]
ros_img.height = img[1]
ros_img.step = img[2] * img[0]
ros_img.header.stamp.secs = img[5]
ros_img.data = img[6]
ros_img.is_bigendian = False
ros_img.encoding = "rgb8"
ros_img.data = img[6]
# publish the image
self.nao_cam_pub.publish(ros_img)
def disconnect(self):
self.nao_cam.unsubscribe(self.proxy_name)
示例4: NaoImage
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
class NaoImage(QWidget):
def __init__(self, IP, PORT, CameraID, parent = None):
QWidget.__init__(self, parent)
self._image = QImage()
self.setWindowTitle("Nao")
self._imgWidth = 640 #other options: 320, 1280
self._imgHeight = 480 #other options: 240, 960
self._cameraID = CameraID
self.resize(self._imgWidth, self._imgHeight)
self._array = np.zeros((640, 480)) #change when imgWidth/imgHeight changes
self._videoProxy = None
self._imgClient = ""
self._alImage = None
self._registerImageClient(IP, PORT)
self.startTimer(100) #change for number of milliseconds
def _registerImageClient(self, IP, PORT):
self._videoProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = vision_definitions.kVGA #other options: kQVGA (320 * 240), k4VGA (1280 * 960)
colorSpace = vision_definitions.kRGBColorSpace #may be useful for color conversions? HSY and BGR are options
self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, 5)
self._videoProxy.setParam(vision_definitions.kCameraSelectID, self._cameraID)
def _unregisterImageClient(self):
if self._imgClient != "":
self._videoProxy.unsubscribe(self._imgClient)
def paintEvent(self, event):
painter = QPainter(self)
painter.drawImage(painter.viewport(), self._image)
def convertToCV(self):
self._array = qim.rgb_view(self._image, True)
self._array = cv2.cvtColor(self._array, cv2.cv.CV_RGB2BGR)
#convertToQImage if desired
def _updateImage(self):
self._alImage = self._videoProxy.getImageRemote(self._imgClient)
self._image = QImage(self._alImage[6], self._alImage[0], self._alImage[1], QImage.Format_RGB888) #there may be a way to avoid using QImage but idk yet
self._image = QImage.convertToFormat(self._image, QImage.Format_RGB32)
self.convertToCV()
def timerEvent(self, event):
self._updateImage()
self.update()
def __del__(self):
self._unregisterImageClient()
示例5: ALProxy
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
PORT = 9559
##################################################
############## CREATE PROXYS #####################
##################################################
#### Creat camera proxy
camProxy = ALProxy("ALVideoDevice", IP, PORT)
#### Create text to speech proxy
tts = ALProxy("ALTextToSpeech", IP, 9559)
#### Creat LED Proxy
led = ALProxy("ALLeds", IP, 9559)
#### Get video feed from robot
videoClient = camProxy.subscribe("python_client", 2, 11, 5)
#### Set parameters for video feed
camProxy.setParam(18, 1)
#### Create motion proxy for robot
try:
motionProxy = ALProxy("ALMotion", IP, PORT)
except Exception,e:
print "Could not create proxy to ALMotion"
print "Error was: ",e
sys.exit(1)
#### Create posture proxy for robot
try:
postureProxy = ALProxy("ALRobotPosture", IP, PORT)
except Exception, e:
print "Could not create proxy to ALRobotPosture"
print "Error was: ", e
示例6: animate
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
colorSpace = 11 # RGB
tts.say("Hello")
# posture.goToPosture("Crouch", 1.0)
# motion.rest()
# print "robot is at rest"
time.sleep(5)
tts.say("Sampling")
dataSize = 10000
JointArray = np.zeros((10000, 10))
ImageArrayGray = np.zeros((10000, 400))
ImageArrayRGB = np.zeros((10000, 1200))
jointdata = np.zeros((1, 10))
videoClient = camProxy.subscribe("python_client77", resolution, colorSpace, 30)
camProxy.setParam(vision_definitions.kCameraBrightnessID, 40)
camProxy.setParam(vision_definitions.kCameraAutoWhiteBalanceID, 0)
camProxy.setParam(vision_definitions.kCameraAutoExpositionID, 0)
def animate():
naoImage = camProxy.getImageRemote(videoClient)
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
imrgb = im.resize((20, 20), Image.ANTIALIAS)
imgray = imrgb.convert("L")
示例7: Nao
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
#.........这里部分代码省略.........
return
# Nao walks close to ball
def walkToBall(self):
ballPosition = self.tracker.getPosition()
headYawTreshold = ((math.pi*10)/180)
x = ballPosition[0]/2 + 0.05
self.motion.stiffnessInterpolation("Body", 1.0, 0.1)
self.motion.walkInit()
self.turnToBall()
self.motion.post.walkTo(x,0,0)
while True:
headYawAngle = self.motion.getAngles("HeadYaw", False)
if headYawAngle[0] >= headYawTreshold or headYawAngle[0] <= -headYawTreshold:
self.motion.stopWalk()
self.turnToBall()
self.walkToBall()
break
#dist = self.getDistance()
# if dist < 0.1:
# self.motion.stopWalk()
# self.turnToBall()
# self.safePosition()
# time.sleep(1)
# self.tts.say("You have 5 seconds to move the ball")
# time.sleep(5)
# # naoWalkToPosition()
# self.walkToPosition()
# self.setTopCamera()
# break
return
# nao turns to ball
def turnToBall(self):
if not self.hasBall():
return False
self.ballPosition = self.tracker.getPosition()
b = self.ballPosition[1]/self.ballPosition[0]
z = math.atan(b)
self.motion.stiffnessInterpolation("Body", 1.0, 0.1)
self.motion.walkInit()
self.motion.walkTo(0,0,z)
# checks ball
def checkForBall(self):
newdata = self.tracker.isNewData()
if newdata == True:
self.tracker.getPosition()
return newdata
if newdata == False:
self.tracker.getPosition()
return newdata
# has to be called after walkToBall()
def walkToPosition(self):
x = (self.targetPosition[0]/2)
self.motion.walkTo(x,0,0)
# Determine safe position
def safePosition(self):
if self.hasBall():
self.targetPosition = self.tracker.getPosition()
else:
return False
# gets the distance from ball
def getDistance(self):
if self.hasBall():
ballPosition = self.tracker.getPosition()
return math.sqrt(math.pow(ballPosition[0],2) + math.pow(ballPosition[1],2))
# setting up top camera
def setTopCamera(self):
self.vision.setParam(18,0)
self.currentCamera = 0
# setting up bottom camera
def setBottomCamera(self):
self.vision.setParam(18,1)
self.currentCamera = 1
# protection off to move free
def protectionOff(self):
self.motion.setExternalCollisionProtectionEnabled( "All", False )
print "Protection Off"
# protection on
def protectionOn(self):
self.motion.setExternalCollisionProtectionEnabled( "All", True )
print "Protection On"
示例8: ImageWidget
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
class ImageWidget(QWidget):
"""
Tiny widget to display camera images from Naoqi.
"""
def __init__(self, IP, PORT, CameraID, parent=None):
"""
Initialization.
"""
QWidget.__init__(self, parent)
self._image = QImage()
self.setWindowTitle('Nao')
self._imgWidth = 320
self._imgHeight = 240
self._cameraID = CameraID
self.resize(self._imgWidth, self._imgHeight)
# Proxy to ALVideoDevice.
self._videoProxy = None
# Our video module name.
self._imgClient = ""
# This will contain this alImage we get from Nao.
self._alImage = None
self._registerImageClient(IP, PORT)
# Trigget 'timerEvent' every 100 ms.
self.startTimer(100)
def _registerImageClient(self, IP, PORT):
"""
Register our video module to the robot.
"""
self._videoProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = vision_definitions.kQVGA # 320 * 240
colorSpace = vision_definitions.kRGBColorSpace
self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, 5)
# Select camera.
self._videoProxy.setParam(vision_definitions.kCameraSelectID,
self._cameraID)
def _unregisterImageClient(self):
"""
Unregister our naoqi video module.
"""
if self._imgClient != "":
self._videoProxy.unsubscribe(self._imgClient)
def paintEvent(self, event):
"""
Draw the QImage on screen.
"""
painter = QPainter(self)
painter.drawImage(painter.viewport(), self._image)
def _updateImage(self):
"""
Retrieve a new image from Nao.
"""
self._alImage = self._videoProxy.getImageRemote(self._imgClient)
self._image = QImage(self._alImage[6], # Pixel array.
self._alImage[0], # Width.
self._alImage[1], # Height.
QImage.Format_RGB888)
def timerEvent(self, event):
"""
Called periodically. Retrieve a nao image, and update the widget.
"""
self._updateImage()
self.update()
def __del__(self):
"""
When the widget is deleted, we unregister our naoqi video module.
"""
self._unregisterImageClient()
示例9: ImageWidget
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
class ImageWidget(QWidget):
"""
Tiny widget to display camera images from Naoqi.
"""
def __init__(self, IP, PORT, CameraID, parent=None):
"""
Initialization.
"""
QWidget.__init__(self, parent)
self._image = QImage()
self.setWindowTitle('Nao')
self._imgWidth = 320
self._imgHeight = 240
self._cameraID = CameraID
self.resize(self._imgWidth, self._imgHeight)
# Proxy to ALVideoDevice.
self._videoProxy = None
# Our video module name.
self._imgClient = ""
# This will contain this alImage we get from Nao.
self._alImage = None
self._registerImageClient(IP, PORT)
# Trigget 'timerEvent' every 100 ms.
self.startTimer(000.0001)#(100)
def _registerImageClient(self, IP, PORT):
"""
Register our video module to the robot.
"""
self._videoProxy = ALProxy("ALVideoDevice", IP, PORT)
# this set the video quality, higher quality consumes more bandwidth
#resolution = vision_definitions.k4VGA#1280*960
#resolution = vision_definitions.kVGA
resolution = vision_definitions.kQVGA # 320 * 240 Default [WiFi: Bad, is delay] [*TEST_ Seems ok with newer ['router':'tp-link ac1200',PC':'Eth','NAO':'WiFi'] ]
#resolution = vision_definitions.kQQVGA #__USE_THIS_SETTING__ [WiFi: Good] [Ethernet: Ok @30fps]
#resolution = vision_definitions.kQQQVGA # [WiFi: Good*, no delay @30fps, Fastest,Poor_resol]
#resolution = vision_definitions.kQQQQVGA #low
colorSpace = vision_definitions.kRGBColorSpace
#self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, FPS)
#self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, 25)
self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, int(GLOBAL_FPS))
#::TEST
GLOBAL_VID_RESOLUTION = int(resolution)
#print("::resolution___Printout__::vision_definitions.k4VGA::"),vision_definitions.k4VGA
#print("::resolution___Printout__::vision_definitions.kQQVGA::"),vision_definitions.kQQVGA
print ("::resolution VAR::"), resolution
# Select camera.
self._videoProxy.setParam(vision_definitions.kCameraSelectID,
self._cameraID)
def _unregisterImageClient(self):
"""
Unregister our naoqi video module.
"""
if self._imgClient != "":
self._videoProxy.unsubscribe(self._imgClient)
def paintEvent(self, event):
"""
Draw the QImage on screen.
"""
painter = QPainter(self)
painter.drawImage(painter.viewport(), self._image)
def _updateImage(self):
"""
Retrieve a new image from Nao.
"""
self._alImage = self._videoProxy.getImageRemote(self._imgClient)
self._image = QImage(self._alImage[6], # Pixel array.
self._alImage[0], # Width.
self._alImage[1], # Height.
QImage.Format_RGB888)
#self._image = QImage(self._alImage[6],200,200,QImage.Format_RGB888)#,100,100,QImage.Format_RGB888)
def timerEvent(self, event):
"""
Called periodically. Retrieve a nao image, and update the widget.
"""
self._updateImage()
self.update()
def __del__(self):
"""
When the widget is deleted, we unregister our naoqi video module.
"""
#.........这里部分代码省略.........
示例10: ALProxy
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
import binascii
import time
import threading
ip = "nao.local"
port = 9559
camera = 0
fps = 15.0
tts = ALProxy("ALTextToSpeech", "nao.local", port)
video = ALProxy("ALVideoDevice", ip, port)
motion = ALProxy("ALMotion", ip, port)
resolution = vision_definitions.kQVGA # 320 * 240
colorSpace = vision_definitions.kRGBColorSpace
video.setParam(vision_definitions.kCameraSelectID, camera)
cameraThread = None
def camera():
imgClient = video.subscribe("_client", resolution, colorSpace, 5)
while ord(connection.recv(1)) > 0:
image = video.getImageRemote(imgClient)
pixels = image[6]
connection.send(pixels)
video.unsubscribe(imgClient)
def startCamera():
cameraThread = threading.Thread(target=camera)
示例11: ImageWidget
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
class ImageWidget(QWidget):
"""
Tiny widget to display camera images from Naoqi.
"""
def __init__(self, IP, PORT, CameraID, parent=None):
"""
Initialization.
"""
QWidget.__init__(self, parent)
self._image = QImage()
self.setWindowTitle('Nao')
self._imgWidth = 640
self._imgHeight = 480
self._cameraID = CameraID
self.resize(self._imgWidth, self._imgHeight)
self.count = 0
self._array = np.zeros((640,480))
self._videoProxy = None
self._imgClient = ""
self._alImage = None
self._registerImageClient(IP, PORT)
self.startTimer(100)
self.faces = []
def _registerImageClient(self, IP, PORT):
"""
Register our video module to the robot.
"""
self._videoProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = vision_definitions.kVGA
colorSpace = vision_definitions.kRGBColorSpace
self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, 5)
self._videoProxy.setParam(vision_definitions.kCameraSelectID, self._cameraID)
def _unregisterImageClient(self):
"""
Unregister our naoqi video module.
"""
if self._imgClient != "":
self._videoProxy.unsubscribe(self._imgClient)
def paintEvent(self, event):
"""
Draw the QImage on screen.
"""
painter = QPainter(self)
painter.drawImage(painter.viewport(), self._image)
def convertToCV(self):
"""
Convert a QImage to an array usable by OpenCV.
"""
self._array = qim.rgb_view(self._image, True)
self._array = cv2.cvtColor(self._array, cv2.COLOR_RGB2BGR)
def face(self):
"""
Detects a face in the given image.
"""
while True:
self.convertToCV()
grayscale = cv2.cvtColor(self._array, cv2.COLOR_BGR2GRAY)
self.faces = faceCascade.detectMultiScale(grayscale, scaleFactor = 1.3, minNeighbors = 4, minSize = (15,15), flags = cv2.cv.CV_HAAR_SCALE_IMAGE)
self.faces = sorted(self.faces, key=my_cool_sort)
#print self.faces
if len(self.faces) > 0:
r = self.faces[0][1]
h = self.faces[0][3]
c = self.faces[0][0]
w = self.faces[0][2]
self.track_window = (c,r,w,h)
roi = self._array[r:r+h, c:c+w]
hsv_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv_roi, np.array((0.,60.,32.)), np.array((180.,255.,255.)))
self.roi_hist = cv2.calcHist([hsv_roi], [0], mask, [180], [0,180])
cv2.normalize(self.roi_hist, self.roi_hist, 0, 255, cv2.NORM_MINMAX)
self.term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
break
else:
self._updateImage()
break
def cShift(self):
"""
Uses CamShift to track motion.
"""
hsv = cv2.cvtColor(self._array, cv2.COLOR_BGR2HSV)
dst = cv2.calcBackProject([hsv], [0], self.roi_hist, [0,180], 1)
ret, self.track_window = cv2.CamShift(dst, self.track_window, self.term_crit)
#.........这里部分代码省略.........
示例12: ALProxy
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
'''
Created on Oct 16, 2012
@author: ludo
'''
from naoqi import ALProxy
import vision_definitions
import ConfigParser
IP = "127.0.0.1"
PORT = 9559
kHsyColorSpace = 6
if __name__ == '__main__':
# First, get a proxy on the video input module if you haven't already done it.
cameraProxy = ALProxy("ALVideoDevice",IP,PORT)
params = [vision_definitions.kCameraResolutionID, 11, 12, 17, 6, 33, 0, 1, 2]
config = ConfigParser.RawConfigParser()
config.read('params.cfg')
for i in [0,1]:
cameraProxy.setParam(vision_definitions.kCameraSelectID, i)
section = "Camera" + str(i)
for p in params:
cameraProxy.setParam(p, config.getint(section, str(p)))
示例13: float
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
ledsProxy.setIntensity('AllLedsRed', float(R) / 255.0)
ledsProxy.setIntensity('AllLedsGreen', float(G) / 255.0)
ledsProxy.setIntensity('AllLedsBlue', float(B) / 255.0)
# Motion
motionProxy = ALProxy("ALMotion", IP, PORT)
postureProxy = ALProxy("ALRobotPosture", IP, PORT)
StiffnessOn(motionProxy, True)
postureProxy.goToPosture("StandInit", 1.0)
motionProxy.moveInit()
motionProxy.setWalkArmsEnabled(False, False)
motionProxy.setMotionConfig([["ENABLE_FOOT_CONTACT_PROTECTION", True]])
# Camera
camProxy = ALProxy("ALVideoDevice", IP, PORT)
camProxy.setParam(18, 0) # "kCameraSelectID", 0 : camera top, 1 : camera bottom
resolution = 0 # 0 : QQVGA, 1 : QVGA, 2 : VGA
colorSpace = 11 # RGB
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
from_video = camProxy.getImageRemote(videoClient)
imageWidth, imageHeight = from_video[0], from_video[1]
middle_x, middle_y = float(imageWidth / 2), float(imageHeight / 2)
# Speaker
c3po = ALProxy("ALTextToSpeech", IP, PORT)
for i in range(0, temps):
# Grab the image (it is not a BGR, but a RGB)
from_video = camProxy.getImageRemote(videoClient)
img_nao = from_video[6]
示例14: ALProxy
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
camProxy = ALProxy("ALVideoDevice", IP, PORT)
print "Creating ALAudioPlayer proxy ..."
audioProxy = ALProxy("ALAudioPlayer", IP, PORT)
except Exception,e:
print "Error when creating proxy:"
print str(e)
exit(1)
#Register a Generic Video Module (G.V.M.) to the V.I.M.
camProxy.startFrameGrabber()
nameId = camProxy.subscribe("digicame3py_GVM", cResol, cColor, cFps)
#Set cameraConfs (cameraConfig.py)
print "Setting to camera configuration ..."
print "----------"
camProxy.setParam(kCameraBrightnessID, cBrightness)
camProxy.setParam(kCameraContrastID, cContrast)
camProxy.setParam(kCameraSaturationID, cSaturation)
camProxy.setParam(kCameraRedChromaID, cWhiteRed)
camProxy.setParam(kCameraBlueChromaID, cWhiteBlue)
camProxy.setParam(kCameraGainID, cGain)
camProxy.setParam(kCameraExposureID, cExposure)
camProxy.setParam(kCameraAutoGainID, cAutoGain)
camProxy.setParam(kCameraAutoExpositionID, cAutoExposition)
camProxy.setParam(kCameraAutoWhiteBalanceID, cAutoWhiteBalance)
camProxy.setParam(kCameraHFlipID, cHFlip)
camProxy.setParam(kCameraVFlipID, cVFlip)
camProxy.setParam(kCameraLensXID, cLensX)
camProxy.setParam(kCameraLensYID, cLensY)
示例15: NaoWalk
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import setParam [as 别名]
#.........这里部分代码省略.........
# after calling, NAO either has a redball in vision or there is none in
# his range of sight.
# Maybe include MCC-CALL after failure
def retrieveBall(self):
self.motion.stiffnessInterpolation("Body", 1.0, 0.1)
self.motion.walkInit()
for turnAngle in [0,math.pi/1.8,math.pi/1.8,math.pi/1.8]:
if turnAngle > 0:
self.motion.walkTo(0,0,turnAngle)
if self.hasBall():
self.__turnToBall()
return
for headPitchAngle in [((math.pi*29)/180),((math.pi*12)/180)]:
self.motion.angleInterpolation("HeadPitch", headPitchAngle,0.3,True)
for headYawAngle in [-((math.pi*40)/180),-((math.pi*20)/180),0,((math.pi*20)/180),((math.pi*40)/180)]:
self.motion.angleInterpolation("HeadYaw",headYawAngle,0.3,True)
time.sleep(0.3)
if self.hasBall():
self.__turnToBall()
return
# lets the nao walk close up to the ball
# Redball in vision is mandatory to call this method!
# !! NEED TO INCLUDE MCC-CALL TO MOVE NXT TO NEXT POSITION IN LINE 85 !!
def walkUpToBall(self):
ballPosi = self.tracker.getPosition()
headYawTreshold = ((math.pi*10)/180)
x = ballPosi[0]/2 + 0.05
self.motion.stiffnessInterpolation("Body", 1.0, 0.1)
self.motion.walkInit()
self.__turnToBall()
self.motion.post.walkTo(x,0,0)
while True:
headYawAngle = self.motion.getAngles("HeadYaw", False)
if headYawAngle[0] >= headYawTreshold or headYawAngle[0] <= -headYawTreshold:
self.motion.stopWalk()
self.__turnToBall()
self.walkUpToBall()
break
dist = self.__getDistance()
if dist < 0.65:
self.motion.stopWalk()
self.__turnToBall()
self.__safePosition()
# # !!
# # meldung ans mcc dass nxt weiterlaufen soll
# !!
time.sleep(1)
self.tts.say("You have 5 seconds to get this Lego out of my way")
time.sleep(5)
# walkToPosition() muss vom mcc aufgerufen werden und hier entfernt werden
self.walkToPosition()
self.__setTopCamera()
break
return
# has to be called after walkUpToBall() and the nxt`s gone
# to the next position to make the last few steps
def walkToPosition(self):
x = (self.targetPosition[0]/2)
self.motion.walkTo(x,0,0)
def __checkForBall(self):
argument = self.tracker.isNewData()
if argument == True:
self.tracker.getPosition()
return argument
if argument == False:
self.tracker.getPosition()
return argument
def __safePosition(self):
if self.hasBall():
self.targetPosition = self.tracker.getPosition()
else :
return False
def __setTopCamera(self):
self.vision.setParam(18,0)
self.currentCam = 0
def __setBottomCamera(self):
self.vision.setParam(18,1)
self.currentCam = 1
def __getDistance(self):
if self.hasBall():
ballPos = self.tracker.getPosition()
return math.sqrt(math.pow(ballPos[0],2) + math.pow(ballPos[1],2))
def __turnToBall(self):
if not self.hasBall():
#moeglicher call ans mcc um retrieveBall aufzurufen
return False
self.ballPosition = self.tracker.getPosition()
b = self.ballPosition[1]/self.ballPosition[0]
z = math.atan(b)
self.motion.stiffnessInterpolation("Body", 1.0, 0.1)
self.motion.walkInit()
self.motion.walkTo(0,0,z)