当前位置: 首页>>代码示例>>Python>>正文


Python ALProxy.getTransform方法代码示例

本文整理汇总了Python中naoqi.ALProxy.getTransform方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.getTransform方法的具体用法?Python ALProxy.getTransform怎么用?Python ALProxy.getTransform使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在naoqi.ALProxy的用法示例。


在下文中一共展示了ALProxy.getTransform方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
def main(robotIP, PORT=9559):
    ''' Example showing a path of two positions
    Warning: Needs a PoseInit before executing
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    effector   = "LArm"
    frame      = motion.FRAME_TORSO
    axisMask   = almath.AXIS_MASK_VEL # just control position
    useSensorValues = False

    path = []
    currentTf = motionProxy.getTransform(effector, frame, useSensorValues)
    targetTf  = almath.Transform(currentTf)
    targetTf.r1_c4 += 0.03 # x
    targetTf.r2_c4 += 0.03 # y

    path.append(list(targetTf.toVector()))
    path.append(currentTf)

    # Go to the target and back again
    times      = [2.0, 4.0] # seconds

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # Go to rest position
    motionProxy.rest()
开发者ID:mikemcfarlane,项目名称:nao-emotional-framework,代码行数:37,代码来源:almotion_cartesianArm1.py

示例2: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
def main(robotIP, PORT=9559):
    ''' Example showing a hand ellipsoid
    Warning: Needs a PoseInit before executing
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    #tts = ALProxy("ALTextToSpeech", "192.168.2.17", 9559)
    tts = ALProxy("ALTextToSpeech", "168.156.34.195", 9559)
    tts.say("Hello, I'm going to move my left arm.")
    tts.say("Then I will go back to the rest position!")
    
    effector   = "LArm"
    frame      = motion.FRAME_TORSO
    axisMask   = almath.AXIS_MASK_VEL    # just control position
    useSensorValues = False

    path = []
    currentTf = motionProxy.getTransform(effector, frame, useSensorValues)
    # point 1
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.05 # y
    path.append(list(targetTf.toVector()))

    # point 2
    targetTf  = almath.Transform(currentTf)
    targetTf.r3_c4 += 0.04 # z
    path.append(list(targetTf.toVector()))

    # point 3
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 += 0.04 # y
    path.append(list(targetTf.toVector()))

    # point 4
    targetTf  = almath.Transform(currentTf)
    targetTf.r3_c4 -= 0.02 # z
    path.append(list(targetTf.toVector()))

    # point 5
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.05 # y
    path.append(list(targetTf.toVector()))

    # point 6
    path.append(currentTf)

    times = [0.5, 1.0, 2.0, 3.0, 4.0, 4.5] # seconds

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # Go to rest position
    motionProxy.rest()
开发者ID:AlbertoGarcia,项目名称:JsObjects,代码行数:62,代码来源:arms03.py

示例3: getMarkXYZ

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
def getMarkXYZ (IP, portNumber, markData, landmarkSize):
    print "0"
    currentCamera = "CameraTop"
    print "1"
    # Retrieve landmark angular size in radians.
    angularSize = markData[1][0][0][3]
    print "2"
    # Compute distance to landmark.
    distanceFromCameraToLandmark = landmarkSize / ( 2 * math.tan( angularSize / 2))
    print "3"
    motionProxy = ALProxy("ALMotion", IP, portNumber)
    print "4"
    # Retrieve landmark center position in radians.
    wzCamera = markData[1][0][0][1]
    print "5"
    wyCamera = markData[1][0][0][2]
    print "6"
    # Get current camera position in NAO space.
    transform = motionProxy.getTransform(currentCamera, 2, True)
    print "7"
    transformList = almath.vectorFloat(transform)
    robotToCamera = almath.Transform(transformList)

    # Compute the rotation to point towards the landmark.
    cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

    # Compute the translation to reach the landmark.
    cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

    # Combine all transformations to get the landmark position in NAO space.
    robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform

    return robotToLandmark.r1_c4, robotToLandmark.r2_c4, robotToLandmark.r3_c4
开发者ID:afranka69,项目名称:ShowRobbie,代码行数:35,代码来源:NaoMarkModule.py

示例4: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
def main(robotIP, PORT=9559):
    ''' Example of a cartesian foot trajectory
    Warning: Needs a PoseInit before executing
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    frame      = motion.FRAME_WORLD
    axisMask   = almath.AXIS_MASK_ALL   # full control
    useSensorValues = False

    # Lower the Torso and move to the side
    effector = "Torso"
    initTf   = almath.Transform(
        motionProxy.getTransform(effector, frame, useSensorValues))
    deltaTf  = almath.Transform(0.0, -0.06, -0.03) # x, y, z
    targetTf = initTf*deltaTf
    path     = list(targetTf.toVector())
    times    = 2.0 # seconds
    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # LLeg motion
    effector = "LLeg"
    initTf = almath.Transform()

    try:
        initTf = almath.Transform(motionProxy.getTransform(effector, frame, useSensorValues))
    except Exception, errorMsg:
        print str(errorMsg)
        print "This example is not allowed on this robot."
        exit()
开发者ID:mikemcfarlane,项目名称:nao-emotional-framework,代码行数:40,代码来源:almotion_cartesianFoot.py

示例5: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
def main(robotIP, PORT=9559):
    '''
         Example showing a Hula Hoop Motion
         with the NAO cartesian control of torso
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # end initialize proxy, begin go to Stand Init

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    # end go to Stand Init, begin define control point
    effector        = "Torso"
    frame           =  motion.FRAME_ROBOT
    axisMask        = almath.AXIS_MASK_ALL
    isAbsolute      = True
    useSensorValues = False

    currentTf = almath.Transform(motionProxy.getTransform(effector, frame, useSensorValues))

    # end define control point, begin define target

    # Define the changes relative to the current position
    dx         = 0.03                    # translation axis X (meter)
    dy         = 0.03                    # translation axis Y (meter)
    dwx        = 8.0*almath.TO_RAD       # rotation axis X (rad)
    dwy        = 8.0*almath.TO_RAD       # rotation axis Y (rad)

    # point 01 : forward  / bend backward
    target1Tf = almath.Transform(currentTf.r1_c4, currentTf.r2_c4, currentTf.r3_c4)
    target1Tf *= almath.Transform(dx, 0.0, 0.0)
    target1Tf *= almath.Transform().fromRotY(-dwy)

    # point 02 : right    / bend left
    target2Tf = almath.Transform(currentTf.r1_c4, currentTf.r2_c4, currentTf.r3_c4)
    target2Tf *= almath.Transform(0.0, -dy, 0.0)
    target2Tf *= almath.Transform().fromRotX(-dwx)

    # point 03 : backward / bend forward
    target3Tf = almath.Transform(currentTf.r1_c4, currentTf.r2_c4, currentTf.r3_c4)
    target3Tf *= almath.Transform(-dx, 0.0, 0.0)
    target3Tf *= almath.Transform().fromRotY(dwy)

    # point 04 : left     / bend right
    target4Tf = almath.Transform(currentTf.r1_c4, currentTf.r2_c4, currentTf.r3_c4)
    target4Tf *= almath.Transform(0.0, dy, 0.0)
    target4Tf *= almath.Transform().fromRotX(dwx)

    path = []
    path.append(list(target1Tf.toVector()))
    path.append(list(target2Tf.toVector()))
    path.append(list(target3Tf.toVector()))
    path.append(list(target4Tf.toVector()))

    path.append(list(target1Tf.toVector()))
    path.append(list(target2Tf.toVector()))
    path.append(list(target3Tf.toVector()))
    path.append(list(target4Tf.toVector()))

    path.append(list(target1Tf.toVector()))
    path.append(list(currentTf.toVector()))

    timeOneMove  = 0.5 #seconds
    times = []
    for i in range(len(path)):
        times.append((i+1)*timeOneMove)

    # end define target, begin call motion api

    # call the cartesian control API

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # Go to rest position
    motionProxy.rest()
开发者ID:mikemcfarlane,项目名称:nao-emotional-framework,代码行数:83,代码来源:almotion_hulaHoop.py

示例6: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
#-*- coding: utf-8 -*-
from naoqi import ALProxy
import motion
import argparse

# ----------> Connect to robot <----------
robot_ip = "192.168.1.100"
robot_port = 9559			# default port : 9559

motionProxy = ALProxy("ALMotion", robot_ip, robot_port)

# ----------> get Transform and print <----------

# Example showing how to get the end of the right arm as a transform
# represented in torso space. The result is a 4 by 4 matrix composed
# of a 3*3 rotation matrix and a column vector of positions.

name  = 'RArm'
frame  = motion.FRAME_TORSO
useSensorValues  = True
result = motionProxy.getTransform(name, frame, useSensorValues)
for i in range(0, 4):
	for j in range(0, 4):
			print result[4*i + j],
	print ''

#R R R x
#R R R y
#R R R z
#0 0 0 1
开发者ID:axmtec0eric,项目名称:Nao-Robot,代码行数:32,代码来源:get_transform.py

示例7: NaoMotionController

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]

#.........这里部分代码省略.........
        except:
            exc_type, exc_value, exc_traceback = sys.exc_info()
            traceback.print_exception(exc_type, exc_value, exc_traceback)
            sys.exit(1)

    def _init_effector(self):
        """Initialize the position and orientation of all effectors."""
        for x in self._effectorNames:
            self._effector[x] = 0.0

    def _init_joint(self):
        """Initialize the position and orientation of all joints."""
        for x in self._jointNames:
            self._joint[x] = 0.0

    def _init_camera(self):
        """Initialize the position and orientation of all cameras."""
        for x in self._cameraNames:
            self._camera[x] = 0.0

    def _update_effector(self):
        """Update the position and orientation of all effectors."""
        for x in self._effectorNames:
            self._effector[x] = self._motion_proxy.getPosition(x, self._default_frame, self._use_sensor_values)

    def _update_joint(self):
        """Update the position and orientation of all joints."""
        for x in self._jointNames:
            self._joint[x] = self._motion_proxy.getPosition(x, self._default_frame, self._use_sensor_values)

    def _update_camera(self):
        """Update the position and orientation of all cameras."""
        for x in self._cameraNames:
            self._camera[x] = self._motion_proxy.getPosition(x, self._default_frame, self._use_sensor_values)

    def _calculate_target_tf(self, effector, delta_tf, frame):
        """Calculate the target transform.

        Calculate the target transform from the current position of the effector
        and the relative position change.

        Parameters
        ----------
        effector : str
            String of effector name.
        delta_tf : ndarray[float]
            The relative position change.
        frame : {FRAME_TORSO, FRAME_WORLD, FRAME_ROBOT}
            The task frame .

        """
        init_tf = almath.Transform(self._motion_proxy.getTransform(effector, frame, self._use_sensor_values))

        delta_tf = listify(delta_tf)
        d = delta_tf.pop(0)

        target_tf = init_tf * almath.Transform().fromPosition(*d)
        path = list(target_tf.toVector())

        if delta_tf:
            path = [path]
            for d in delta_tf:
                target_tf *= almath.Transform().fromPosition(*d)
                path.append(list(target_tf.toVector()))
        return path

    def _calculate_target_pos(self, effector, delta_pos, frame):
        """Calculate the target position.

        Calculate the target position from the current position of the effector
        and the relative position change.

        Parameters
        ----------
        effector : str
            String of effector name.
        delta_pos : ndarray[float]
            The relative position change.
        frame : {FRAME_TORSO, FRAME_WORLD, FRAME_ROBOT}
            The task frame .

        """
        init_pos = self._motion_proxy.getPosition(effector, frame, self._use_sensor_values)

        delta_pos = listify(delta_pos)
        d = delta_pos.pop(0)

        target_pos = almath.Position6D(init_pos)
        target_pos += almath.Position6D(d)
        path = list(target_pos.toVector())

        if delta_pos:
            path = [path]
            for d in delta_pos:
                target_pos += almath.Position6D(d)
                path.append(list(target_pos.toVector()))
        return path

    def _generate_id(self, pip, pport):
        return "%s.%s:%s:%i:%i" % (self.__class__.__module__, self.__class__.__name__, pip, pport, next(self._ids))
开发者ID:evenmarbles,项目名称:naobot,代码行数:104,代码来源:kinematics.py

示例8: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
def main(robotIP, PORT=9559):
    ''' Simultaneously control three effectors:
    the Torso, the Left Arm and the Right Arm
    Warning: Needs a PoseInit before executing
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    frame      = motion.FRAME_WORLD
    coef       = 0.5                   # motion speed
    times      = [coef, 2.0*coef, 3.0*coef, 4.0*coef]
    useSensorValues = False

    # Relative movement between current and desired positions
    dy         = +0.03                 # translation axis Y (meters)
    dz         = -0.03                 # translation axis Z (meters)
    dwx        = +8.0*almath.TO_RAD   # rotation axis X (radians)

    # Motion of Torso with post process
    effector   = "Torso"

    path = []
    initTf = almath.Transform(motionProxy.getTransform(effector, frame, useSensorValues))
    # point 1
    deltaTf  = almath.Transform(0.0, -dy, dz)*almath.Transform().fromRotX(-dwx)
    targetTf = initTf*deltaTf
    path.append(list(targetTf.toVector()))

    # point 2
    path.append(list(initTf.toVector()))

    # point 3
    deltaTf  = almath.Transform(0.0, dy, dz)*almath.Transform().fromRotX(dwx)
    targetTf = initTf*deltaTf
    path.append(list(targetTf.toVector()))

    # point 4
    path.append(list(initTf.toVector()))

    axisMask   = almath.AXIS_MASK_ALL  # control all the effector axes
    motionProxy.post.transformInterpolations(effector, frame, path,
                                           axisMask, times)

    # Motion of Arms with block process
    frame     = motion.FRAME_TORSO
    axisMask  = almath.AXIS_MASK_VEL  # control just the position
    times     = [1.0*coef, 2.0*coef]  # seconds

    # Motion of Right Arm during the first half of the Torso motion
    effector  = "RArm"

    path = []
    currentTf = motionProxy.getTransform(effector, frame, useSensorValues)
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.04 # y
    path.append(list(targetTf.toVector()))
    path.append(currentTf)

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # Motion of Left Arm during the last half of the Torso motion
    effector   = "LArm"

    path = []
    currentTf = motionProxy.getTransform(effector, frame, useSensorValues)
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 += 0.04 # y
    path.append(list(targetTf.toVector()))
    path.append(currentTf)

    motionProxy.transformInterpolations(effector, frame, path, axisMask, times)

    # Go to rest position
    motionProxy.rest()
开发者ID:davidlavy88,项目名称:nao_and_opencv-python-,代码行数:83,代码来源:almotion_cartesianTorsoArm1.py

示例9: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
def main(robotIP, PORT=9559):
    '''
        Example of a whole body multiple effectors control "LArm", "RArm" and "Torso"
        Warning: Needs a PoseInit before executing
                 Whole body balancer must be inactivated at the end of the script
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # end initialize proxy, begin go to Stand Init

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    id = postureProxy.post.goToPosture("Stand", 0.5)
    postureProxy.wait(id, 0)

    # end go to Stand Init, begin initialize whole body

    # Enable Whole Body Balancer
    isEnabled  = True
    motionProxy.wbEnable(isEnabled)

    # Legs are constrained fixed
    stateName  = "Fixed"
    supportLeg = "Legs"
    motionProxy.wbFootState(stateName, supportLeg)

    # Constraint Balance Motion
    isEnable   = True
    supportLeg = "Legs"
    motionProxy.wbEnableBalanceConstraint(isEnable, supportLeg)

    # end initialize whole body, define arms motions

    useSensorValues = False

    # ***************************************************************

    # effectorList = ["LArm"]
    # arm = "LArm"
    # axisDirection = 1    # +1 for LArm, -1 for RArm
    # frame = motion.FRAME_WORLD
    # currentTf = motionProxy.getTransform(arm, frame, useSensorValues)

    for i in range(5):



        print i

        # Arms motion
        effectorList = ["LArm"]
        arm = "LArm"
        axisDirection = 1    # +1 for LArm, -1 for RArm

        frame = motion.FRAME_WORLD
        pathArm = []
        
        currentTf = motionProxy.getTransform(arm, frame, useSensorValues)

        # 1 - arm ready out front
        target1Tf = almath.Transform(currentTf)
        target1Tf.r1_c4 += 0.05 # x
        target1Tf.r2_c4 += 0.00 * axisDirection # y
        target1Tf.r3_c4 += 0.00 # z

        # 2 - arm back
        target2Tf = almath.Transform(currentTf)
        target2Tf.r1_c4 += 0.00
        target2Tf.r2_c4 += 0.15
        target2Tf.r3_c4 += 0.15

        # 3 - arm to ball using ball.y
        target3Tf = almath.Transform(currentTf)
        target3Tf.r1_c4 += 0.05
        target3Tf.r2_c4 += 0.00 * axisDirection
        target3Tf.r3_c4 += 0.10

        pathArm.append(list(target1Tf.toVector()))
        pathArm.append(list(target2Tf.toVector()))
        pathArm.append(list(target3Tf.toVector()))

        pathList = [pathArm]

        axisMaskList = [almath.AXIS_MASK_VEL]

        coef = 1.5
        timesList = [coef * (i + 1) for i in range(len(pathArm))]

        # And move!
        id = motionProxy.post.transformInterpolations(effectorList, frame, pathArm, axisMaskList, timesList)
        motionProxy.wait(id, 0)

        # It is necessary to return the robot to the start position so the next target
        # positions are not added to the last move position.
        # id = postureProxy.post.goToPosture("Stand", 0.75)
        # postureProxy.wait(id, 0)
#.........这里部分代码省略.........
开发者ID:mikemcfarlane,项目名称:Code_sprints,代码行数:103,代码来源:almotion_wbMultipleEffectors.py

示例10: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
def main(robotIP, PORT=9559):
    ''' Move the torso and keep arms fixed in nao space
    Warning: Needs a PoseInit before executing
    '''

    motionProxy  = ALProxy("ALMotion", robotIP, PORT)
    postureProxy = ALProxy("ALRobotPosture", robotIP, PORT)

    # Wake up robot
    motionProxy.wakeUp()

    # Send robot to Stand Init
    postureProxy.goToPosture("StandInit", 0.5)

    frame      = motion.FRAME_ROBOT
    useSensorValues = False

    effectorList = ["LArm", "RArm"]

    # Motion of Arms with block process
    axisMaskList = [almath.AXIS_MASK_VEL, almath.AXIS_MASK_VEL]

    timesList    = [[1.0], [1.0]] # seconds

    # LArm path
    pathLArm = []
    targetTf  = almath.Transform(motionProxy.getTransform("LArm", frame, useSensorValues))
    targetTf.r2_c4 -= 0.04 # y
    pathLArm.append(list(targetTf.toVector()))

    # RArm path
    pathRArm = []
    targetTf  = almath.Transform(motionProxy.getTransform("RArm", frame, useSensorValues))
    targetTf.r2_c4 += 0.04 # y
    pathRArm.append(list(targetTf.toVector()))

    pathList = []
    pathList.append(pathLArm)
    pathList.append(pathRArm)

    motionProxy.transformInterpolations(effectorList, frame, pathList,
                                       axisMaskList, timesList)

    effectorList = ["LArm", "RArm", "Torso"]

    # Motion of Arms and Torso with block process
    axisMaskList = [almath.AXIS_MASK_VEL,
                    almath.AXIS_MASK_VEL,
                    almath.AXIS_MASK_ALL]

    timesList    = [[4.0],                  # LArm  in seconds
                    [4.0],                  # RArm  in seconds
                    [1.0, 2.0, 3.0, 4.0]]   # Torso in seconds

    # LArm path
    pathLArm = []
    pathLArm.append(motionProxy.getTransform("LArm", frame, useSensorValues))

    # RArm path
    pathRArm = []
    pathRArm.append(motionProxy.getTransform("RArm", frame, useSensorValues))

    # Torso path
    pathTorso = []
    currentTf = motionProxy.getTransform("Torso", frame, useSensorValues)

    # 1
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 += 0.04 # y
    pathTorso.append(list(targetTf.toVector()))

    # 2
    targetTf  = almath.Transform(currentTf)
    targetTf.r1_c4 -= 0.03 # x
    pathTorso.append(list(targetTf.toVector()))

    # 3
    targetTf  = almath.Transform(currentTf)
    targetTf.r2_c4 -= 0.04 # y
    pathTorso.append(list(targetTf.toVector()))

    # 4
    pathTorso.append(currentTf)

    pathList = []
    pathList.append(pathLArm)
    pathList.append(pathRArm)
    pathList.append(pathTorso)

    motionProxy.transformInterpolations(effectorList, frame, pathList,
                                       axisMaskList, timesList)

    # Go to rest position
    motionProxy.rest()
开发者ID:mikemcfarlane,项目名称:nao-emotional-framework,代码行数:96,代码来源:almotion_cartesianTorsoArm2.py

示例11: planner_move

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]

#.........这里部分代码省略.........
            return
        
    def compass(self, deviation):
        if deviation[0][0] > .3 or deviation[0][0] < -.3:
            self.motionProxy.moveToward(0, 0, 0)
            self.motionProxy.moveTo(0, 0, -deviation[0][0])
            self.motionProxy.moveToward(.5, .02, -.03)
        if deviation[0][1] > .3 or deviation[0][1] < -.3:
            self.motionProxy.moveToward(0, 0, 0)
            self.motionProxy.moveTo(0, 0, -deviation[0][1])
            self.motionProxy.moveToward(.5, .02, -.03)
        return
    
    def landmark(self, detection):
        if detection == []:
            return []
        elif (detection[1][0][0][3] and detection[1][0][0][4]) > .14:
            if int(str(detection[1][0][1]).strip('[]')) == mark[0]:
                #return ["event", detection[1][0][1]]
                return [[0, []], [1, [int(str(detection[1][0][1]).strip('[]'))]], [1, [int(str(detection[1][0][1]).strip('[]'))]]]
            elif int(str(detection[1][0][1]).strip('[]')) == mark[1]:
                    return []
            elif int(str(detection[1][0][1]).strip('[]')) in mark[2]:
                return [[0, []], [1, [int(str(detection[1][0][1]).strip('[]'))]], [1, [int(str(detection[1][0][1]).strip('[]'))]]]
        else:
            self.landmarkYmovement(detection)
            return []
    
    def landmarkYmovement(self, detection):
        zCamera = detection[1][0][0][1]
        yCamera = detection[1][0][0][2]
        angularSize = detection[1][0][0][3]
        distance = .06 / ( 2 * math.tan( angularSize / 2))
        transform = self.motionProxy.getTransform("CameraTop", 2, True)
        transformList = almath.vectorFloat(transform)
        robotToCamera = almath.Transform(transformList)
        cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, yCamera, zCamera)
        cameraToLandmarkTranslationTransform = almath.Transform(distance, 0, 0)
        robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
        if math.fabs(robotToLandmark.r2_c4) > .2 and math.fabs(robotToLandmark.r2_c4) < .45:
            self.motionProxy.moveToward(0, 0, 0)
            self.motionProxy.moveTo(0, robotToLandmark.r2_c4, 0)
            firstDetection = robotToLandmark.r2_c4
            secondDetection = self.memoryProxy.getData("LandmarkDetected", 0)
            if not secondDetection == []:
                zCamera = secondDetection[1][0][0][1]
                yCamera = secondDetection[1][0][0][2]
                angularSize = secondDetection[1][0][0][3]
                distance = .06 / ( 2 * math.tan( angularSize / 2))
                transform = self.motionProxy.getTransform("CameraTop", 2, True)
                transformList = almath.vectorFloat(transform)
                robotToCamera = almath.Transform(transformList)
                cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, yCamera, zCamera)
                cameraToLandmarkTranslationTransform = almath.Transform(distance, 0, 0)
                robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform
                if not robotToLandmark < math.fabs(.2):
                    if robotToLandmark.r2_c4 < 0:
                        self.motionProxy.moveTo(0, 0, ((robotToLandmark.r2_c4 / 1.06) * 5))
                    else:
                        self.motionProxy.moveTo(0, 0, -((robotToLandmark.r2_c4 / 1.06) * 5))
            else:
                if firstDetection < 0:
                    self.motionProxy.moveTo(0, 0, ((firstDetection / 1.06) * 5))
                else:
                    self.motionProxy.moveTo(0, 0, -((firstDetection / 1.06) * 5))
            self.motionProxy.moveToward(.5, .02, -.03)
开发者ID:reesmanp,项目名称:Nao,代码行数:70,代码来源:planner_move.py

示例12: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
tts = ALProxy("ALTextToSpeech", robot_ip, robot_port)
motion = ALProxy("ALMotion", robot_ip, robot_port)
posture = ALProxy("ALRobotPosture", robot_ip, robot_port)

# ----------> set a posture <----------
motion.wakeUp()
posture.goToPosture("StandInit", 0.5)

# ----------> Retrieve a transform matrix using ALMotion <----------
chainName = "RArm"
frame = 1 # FRAME_WORLD
useSensors = True

# Retrieve current transform from ALMotion.
# Convert it to a transform matrix for ALMath.
origTransform = almath.Transform(motion.getTransform(chainName, frame, useSensors))
# std::vector<float> ALMotionProxy::getTransform(
# 			const std::string& name, const int& frame, const bool& useSensorValues);
# Gets an Homogeneous Transform relative to the FRAME. 
# Axis definition: the x axis is positive toward the robot’s front, the y from right to left and the z is vertical.
# 	
#	name - Name of the item. Could be: any joint or chain or sensor
# 	frame -  Task frame {FRAME_TORSO = 0, FRAME_WORLD = 1, FRAME_ROBOT = 2}.
# 	useSensorValues - If true, the sensor values will be used to determine the position.
#
# Returns:	
#	Vector of 16 floats corresponding to the values of the matrix, line by line.

print "Original transform"
print origTransform
开发者ID:axmtec0eric,项目名称:Nao-Robot,代码行数:32,代码来源:transform.py

示例13: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
    markData = memoryProxy.getData("LandmarkDetected")

# Retrieve landmark center position in radians.
wzCamera = markData[1][0][0][1]
wyCamera = markData[1][0][0][2]

# Retrieve landmark angular size in radians.
angularSize = markData[1][0][0][3]

# Compute distance to landmark.
distanceFromCameraToLandmark = landmarkTheoreticalSize / ( 2 * math.tan( angularSize / 2))

motionProxy = ALProxy("ALMotion", ip, 9559)

# Get current camera position in NAO space.
transform = motionProxy.getTransform(currentCamera, 2, True)
transformList = almath.vectorFloat(transform)
robotToCamera = almath.Transform(transformList)

# Compute the rotation to point towards the landmark.
cameraToLandmarkRotationTransform = almath.Transform_from3DRotation(0, wyCamera, wzCamera)

# Compute the translation to reach the landmark.
cameraToLandmarkTranslationTransform = almath.Transform(distanceFromCameraToLandmark, 0, 0)

# Combine all transformations to get the landmark position in NAO space.
robotToLandmark = robotToCamera * cameraToLandmarkRotationTransform *cameraToLandmarkTranslationTransform

print "x " + str(robotToLandmark.r1_c4) + " (in meters)"
print "y " + str(robotToLandmark.r2_c4) + " (in meters)"
print "z " + str(robotToLandmark.r3_c4) + " (in meters)"
开发者ID:AndresGalaviz,项目名称:NAO,代码行数:33,代码来源:vision_localization.py

示例14: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]
 ip = "ra.local"
 port = 9559
 proxy = ALProxy("ALMotion", ip, port)
 print "motionproxy ready"
 space = motion.SPACE_TORSO
 chainName = "Head"
 currentSensor = proxy.getPosition(chainName, space, True)
 current = proxy.getPosition(chainName, space, False)
 print "Position of %s in space %s:"%(chainName, space_to_str(space))
 print currentSensor
 print current
 rpy = currentSensor[3:]
 print rpy
 DCM = apply(transformations.euler_matrix,rpy)
 print DCM
 tTHSensor = proxy.getTransform(chainName, space, True)
 tTH = proxy.getTransform(chainName, space, False)
 print "Transform %s to %s:"%(space_to_str(space),chainName)
 print transform_to_str(tTHSensor)
 #print transform_to_str(tTH)
 chainName = "CameraTop"
 tTCSensor = proxy.getTransform(chainName, space, True)
 tTC = proxy.getTransform(chainName, space, False)
 print "Transform %s to %s:"%(space_to_str(space),chainName)
 print transform_to_str(tTCSensor)
 print
 T = np.matrix([tTCSensor[0:4],tTCSensor[4:8],tTCSensor[8:12],tTCSensor[12:]])
 #print cam_rotation_matrix()
 DCM = T*cam_rotation_matrix()
 print "Transform %s to %s (with rotated coordinate frame):"%(space_to_str(space),chainName)
 print np_mat_to_str(DCM)
开发者ID:Aharobot,项目名称:nao_robot,代码行数:33,代码来源:test_cam_transform.py

示例15: NaoXO

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getTransform [as 别名]

#.........这里部分代码省略.........
            return False
        if not len(self.intersections)==4:
            #print("Wrong number of intersections %s" % len(self.intersections))
            return False
        image_intersections = self.draw_intersections(self.img.copy(), self.intersections)
        cv2.imshow("Intersections", image_intersections)
        cv2.waitKey(1)
        ## otherwise, calculate position by using solvePnP from OpenCV
        ## create image points from intersections
        imgPoints = np.zeros((4,2), dtype=np.float64)
        for i in range(4):
                imgPoints[i,0]=self.intersections[i][0]
                imgPoints[i,1]=self.intersections[i][1]
        ## object points are created by calling setFieldSize method and can be used for solving PnP
        _, self.rvec, self.tvec = cv2.solvePnP(self.objPoints, imgPoints, self.camMatrix, distCoeffs = None)
        ## rotation matrixause minions will die before the tower peaks and reset every minion. Unless the tower 
        R = cv2.cv.CreateMat(3,3,cv2.cv.CV_64FC1)
        ## convert rvec to R
        cv2.cv.Rodrigues2(cv2.cv.fromarray(self.rvec), R)
        
        ## compose homogeneous transformation matrix        
        Rt = np.zeros((3,4), dtype=np.float64)
        T = np.zeros((4,4), dtype = np.float64)
        T[3,3]=1.0
        for i in range(3):
            for j in range(3):
                T[i,j]=R[i,j]
                Rt[i,j]=R[i,j]
            T[i,3]=self.tvec[i]
            Rt[i,3]=self.tvec[i]
            
                
        ## obtain transformation matrix of the camera in FRAME_ROBOT
        T_camAL = self.motion.getTransform("CameraBottom",2,True)
        ## convert to numpy format, needed for matrix multiplication
        T_camAL = np.asarray(T_camAL)
        T_camAL = np.reshape(T_camAL, (4,4))
        
        ## compose transformation from ALMotion coordinate frame to OpenCV coordinate frame
        T_alCV = np.zeros((4,4), dtype = np.float64)
        T_alCV[0,2] =  1    # this means Z'=X
        T_alCV[1,0] = -1    # this means X'=-Y
        T_alCV[2,1] = -1    # this means Y'=-Z
        T_alCV[3,3] =  1    # homogenous coordinates!
        
        ## transformation of OpenCV camera frame in FRAME ROBOT        
        T_camCV = np.dot(T_camAL, T_alCV)
        
        ## transformation of the playing field in FRAME_ROBOT
        self.T_field = np.dot(T_camCV, T)
        
        ## compose projection matrix
        self.P = np.dot(self.camMatrix, Rt)
        
        ## calculate bounds of the playing field
        self.imgproc.getBounds(self.P, self.cornerPoints)        
        
        return True
    
    def cleanup(self):
        '''
        Cleaning up when exiting
        '''
        
        ## unsubscribe from video device module
        self.videoProxy.unsubscribe(self.video)
开发者ID:larics,项目名称:nao-xo,代码行数:70,代码来源:nao_xo.py


注:本文中的naoqi.ALProxy.getTransform方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。