当前位置: 首页>>代码示例>>Python>>正文


Python ALProxy.subscribeCamera方法代码示例

本文整理汇总了Python中naoqi.ALProxy.subscribeCamera方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.subscribeCamera方法的具体用法?Python ALProxy.subscribeCamera怎么用?Python ALProxy.subscribeCamera使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在naoqi.ALProxy的用法示例。


在下文中一共展示了ALProxy.subscribeCamera方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
def main():
    global broker
    broker = ALBroker("myBroker", "0.0.0.0", 0, PEPPER_IP, PEPPER_PORT)

    # init camera
    video = ALProxy('ALVideoDevice')
    subscribers = video.getSubscribers()

    # init depth camera
    exists_camera_name = "{0}_0".format(DEPTH_CAMERA_NAME)
    if exists_camera_name in subscribers:
        depth_subscribe_id = exists_camera_name
    else:
        depth_subscribe_id = video.subscribeCamera(DEPTH_CAMERA_NAME, DEPTH_CAMERA_ID, RESOLUTION, DEPTH_COLOR_SPACE, FRAMERATE)
    print u"subscribe_id: {0}".format(depth_subscribe_id)

    # init rgb camera
    exists_camera_name = "{0}_0".format(RGB_CAMERA_NAME)
    if exists_camera_name in subscribers:
        rgb_subscribe_id = exists_camera_name
    else:
        rgb_subscribe_id = video.subscribeCamera(RGB_CAMERA_NAME, RGB_CAMERA_ID, RESOLUTION, RGB_COLOR_SPACE, FRAMERATE)
    print u"rgb_subscribe_id: {0}".format(rgb_subscribe_id)

    # get image
    for i in range(0, TIMES):
        print u"try: {0} {1}".format(i, datetime.now().strftime("%Y/%m/%d %H:%M:%S"))

        depth_ary = video.getImageRemote(depth_subscribe_id)
        rgb_ary = video.getImageRemote(rgb_subscribe_id)

        depth = QiImage(depth_ary)
        depth_binary = [struct.unpack('B', x)[0] for x in depth.binary]
        depth_save_path = DEPTH_FILE_PREFIX + "_" + str(i) + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + '.bmp'
        with Image.new("L", (depth.width, depth.height)) as im:
            im.putdata(depth_binary[::2])
            im.save(depth_save_path, format='bmp')
        print u"depth image file was created: {0}".format(depth_save_path)

        rgb = QiImage(rgb_ary)
        rgb_save_path = RGB_FILE_PREFIX + "_" + str(i) + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + '.bmp'
        rgb_binary = [struct.unpack('B', x)[0] for x in rgb.binary]
        with Image.new("RGB", (rgb.width, rgb.height)) as im:
            im.putdata(zip(*[iter(rgb_binary)] * 3))
            im.save(rgb_save_path, format='bmp')
        print u"rgb image file was created: {0}".format(rgb_save_path)

        both_save_path = BOTH_FILE_PREFIX + "_" + str(i) + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + '.bmp'
        with Image.new("RGB", (rgb.width, rgb.height * 2)) as im:
            im.putdata(zip(*[iter(rgb_binary)] * 3) + [(x, x, x) for x in depth_binary[::2]])
            im.save(both_save_path, format='bmp')
            im.show()
        print u"both image file was created: {0}".format(both_save_path)

        time.sleep(WAIT)

    video.unsubscribe(depth_subscribe_id)
    video.unsubscribe(rgb_subscribe_id)
开发者ID:swkoubou,项目名称:peppermill-test,代码行数:60,代码来源:capture_depth_and_rgb.py

示例2: VideoController

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
class VideoController(object):
    """
    Represents a virtual controller for NAO's videos devices
    """
    def __init__(self, robot_ip=None, robot_port=None):
        """
        :param robot_ip: the ip address of the robot
        :param robot_port: the port of the robot
        Connect to the robot camera proxy
        """
        if robot_ip is None:
            robot_ip = nao.IP
        if robot_port is None:
            robot_port = nao.PORT
        self.ip = robot_ip
        self.port = robot_port
        self.video_device = ALProxy("ALVideoDevice", robot_ip, robot_port)
        self.disconnectFromCamera()
        self.subscriber_ids = [SUBSCRIBER_ID + "_CAM_0", SUBSCRIBER_ID + "_CAM_1"]
        self.cam_connected = False
        self.cam_matrix = nao.CAM_MATRIX
        self.cam_distorsion = nao.CAM_DISTORSION

    def connectToCamera(self, res=1, fps=11, camera_num=0, color_space=13, subscriber_id="C4N"):
        """
        Subscribe to the robot camera
        :param res: Type of resolution (see NAO documentation)
        :type res: int
        :param fps: Frame per second (max fps set by NAO documentation)
        :type fps: int
        :param camera_num: The camera to subscribe to (0 = Top, 1 = Bottom)
        :type camera_num: int
        :param color_space: The color space that the camera will use (see NAO documentation)
        :type color_space: int
        :param subscriber_id: The subscriber identifier
        :type subscriber_id: str
        :return: -1 in case of error, 0 else
        :rtype: int
        """
        try:
            self.subscriber_ids[camera_num] = subscriber_id + "_CAM_" + str(camera_num)
            self.disconnectFromCamera(camera_num)
            self.cam_connected = True
            self.subscriber_ids[camera_num] = self.video_device.subscribeCamera(subscriber_id, camera_num, res, color_space, fps)
            self.video_device.setAllCameraParametersToDefault(self.subscriber_ids[camera_num])
            self.video_device.setCameraParameter(self.subscriber_ids[camera_num], 2, 255)
            self.video_device.setCameraParameter(self.subscriber_ids[camera_num], 3, -180)
            self.video_device.setCameraParameter(self.subscriber_ids[camera_num], 6, 255)
            self.video_device.setCameraParameter(self.subscriber_ids[camera_num], 12, 1)
            self.video_device.setCameraParameter(self.subscriber_ids[camera_num], 24, 7)
            self.video_device.setCameraParameter(self.subscriber_ids[camera_num], 1, 64)
        except BaseException, err:
            print "ERR: cannot connect to camera : %s" % err
            return -1
        return 0
开发者ID:Angeall,项目名称:pyConnect4NAO,代码行数:57,代码来源:video.py

示例3: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
def main(args):
	# reference
	# https://github.com/ros-aldebaran/romeo_robot/blob/master/romeo_sensors/nodes/camera_depth.py
	# https://github.com/ros-perception/image_pipeline/blob/indigo/depth_image_proc/src/nodelets/point_cloud_xyz.cpp

	video = ALProxy('ALVideoDevice', args.ip, args.port)
	client = video.subscribeCamera(NAME, CAMERA_ID, RESOLUTION, COLOR_SPACE, FRAMERATE)

	try:
		image = video.getImageRemote(client)

		if image is None:
			print 'Cannot obtain depth image.'
			exit()

		width = image[0]
		height = image[1]
		array = image[6]

		cloud = []
		for v in range(height):
			for u in range(width):
				offset = (v * width + u) * 2
				depth = ord(array[offset]) + ord(array[offset+1]) * 256 

				x = (u - CX) * depth * UNIT_SCALING / FX
		  		y = (v - CY) * depth * UNIT_SCALING / FY
		  		z = depth * UNIT_SCALING

		  		cloud.append((x, y, z))
	finally:
		video.unsubscribe(client)

	header = '''# .PCD v0.7 - Point Cloud Data file format
VERSION 0.7
FIELDS x y z
SIZE 4 4 4
TYPE F F F
COUNT 1 1 1
WIDTH %d
HEIGHT 1
VIEWPOINT 0 0 0 1 0 0 0
POINTS %d
DATA ascii'''

	f = open(args.output, 'w')
	num = len(cloud)
	f.write(header % (num, num))
	f.write("\n")
	for c in cloud:
		f.write('%f %f %f' % (c[0], c[1], c[2]))
		f.write("\n")
	f.close()
开发者ID:thorikawa,项目名称:pcl-pepper,代码行数:55,代码来源:get_pcd.py

示例4: BasicVideoProcessing

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
class BasicVideoProcessing(object):
	""" A class that handles some basic operations with NAO's camera. """
	
	def __init__(self):
		self.camProxy = ALProxy("ALVideoDevice")		
        
	def connectToCamera(self):
		try:
			clientName = "nao-app-client"
			cameraNum = 0
			resolutionOptNum = 1
			colorspaceOptNum = 13
			fpsNum = 20
			self.clientId = self.camProxy.subscribeCamera(clientName, cameraNum, resolutionOptNum, colorspaceOptNum, fpsNum)
		except BaseException, err:
			print "Error while connecting camera: " + str(err)
开发者ID:knawrot,项目名称:nao-objects-recognition,代码行数:18,代码来源:app.py

示例5: BasicVideoProcessing

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
class BasicVideoProcessing(object):
	""" A basic module to test camera """
	
	def __init__(self):
		self.camProxy = ALProxy("ALVideoDevice", NAO_IP, NAO_PORT)		
        
	def connectToCamera(self):
		try:
			clientName = "red-ball-client"
			cameraNum = 0
			resolutionOptNum = 1
			colorspaceOptNum = 13
			fpsNum = 20
			self.clientId = self.camProxy.subscribeCamera(clientName, cameraNum, resolutionOptNum, colorspaceOptNum, fpsNum)
		except BaseException, err:
			print "Error while connecting camera: " + str(err)
开发者ID:knawrot,项目名称:nao-objects-recognition,代码行数:18,代码来源:hsvPickerScript.py

示例6: ImageManager

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
class ImageManager():
    def __init__(self,ip,port,img_name):
        self.img_name=img_name
        self.videoDevice = ALProxy("ALVideoDevice", ip,port)
        self.sub_name = "hackathon2016TopCam"
        self.clean_subscribers(self.sub_name)        
        
    def subscribe(self):
        self.sub_name =  self.videoDevice.subscribeCamera(self.sub_name,1,kQVGA,kRGBColorSpace,5)

    def unsubscribe(self):
        self.videoDevice.unsubscribe(self.sub_name)
    def clean_subscribers(self,sub_name):
        for i in range(0,8):
            try :
                videoDevice.unsubscribe(sub_name + "_%d"%i)
            except :                    
                pass
    
    def getAndSaveImage(self,path):
        img_array = self.videoDevice.getImageRemote(self.sub_name)
        width = img_array[0]
        height = img_array[1]
        
        frame=np.asarray(bytearray(img_array[6]), dtype=np.uint8)
        frame=frame.reshape((height,width,3))
        frame = frame[height/5:height-height/5,width/4:width-width/4] 
        im = Image.fromarray(frame)
        im.save(path)
                    
    def recognizeObject(self):
        print "getting image"
        self.getAndSaveImage(self.img_name)
        
        # classify image
        print "getting classification"
        res = classifier.run_inference_on_image(self.img_name)[0]
        print res
        
        if type(res) == dict:
            #get label with highest probabilty 
            label = res["label_name"].split(",")[0]
            prob = str(int(res["score"]*100))
            return [label,prob]            
        else:
            print "result type is not a dict"
            return None
开发者ID:arotyramel,项目名称:NaoQiObjectReco,代码行数:49,代码来源:main.py

示例7: RefreshCam

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
class RefreshCam(ALModule, Thread):
    """docstring for NaoWalks"""
    def __init__(self, name, event, pict):
        Thread.__init__(self)
        ALModule.__init__(self, name)

        self.event = event
        self.pict  = pict

        # Create new object to get logs on computer's terminal.
        self.logs = logs.logs()

        # Create an ALVideoDevice proxy.
        self.camProxy = ALProxy("ALVideoDevice")
        self.logs.display("Subscribed to an ALVideoDevice proxy", "Good")

        # Register a video module.
        colorSpace = vision_definitions.kBGRColorSpace
        fps = 15
        camera = 1      # 1 = mouth camera / 0 = front camera.
        self.followTheLineCam = self.camProxy.subscribeCamera("LowCam",
                                                              camera,
                                                              definition,
                                                              colorSpace,
                                                              fps)
        self.logs.display("Subscribed to Camera 1", "Good")


    # Method called by the Thread.start() method.
    def run(self):
        while True:
            self.event.wait()
            
            # Get the image.
            pict = self.camProxy.getImageRemote(self.followTheLineCam)
            self.camProxy.releaseImage(self.followTheLineCam)
            self.logs.display("Received a picture from Camera 1")
            
            self.event.clear()

        self.camProxy.unsubscribe("LowCam")
        self.logs.display("Camera unsubscribed", "Good")


# ############################### END OF CLASS ############################## #
开发者ID:NSenaud,项目名称:NaoChallengeProject,代码行数:47,代码来源:camLow.py

示例8: BasicVideoProcessing

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
class BasicVideoProcessing(object):
	""" A basic module to test camera """
	
	def __init__(self):
		self.camProxy = ALProxy("ALVideoDevice", NAO_IP, NAO_PORT)		
		#self.vc = cv2.VideoCapture(0)
		self.frameCenterBox = self.calculateWindowCenterBox(320, 240) # for given resolutionOptNum
        
	def connectToCamera(self):
		try:
			clientName = "red-ball-client"
			cameraNum = 0
			resolutionOptNum = 1
			colorspaceOptNum = 13
			fpsNum = 20
			self.clientId = self.camProxy.subscribeCamera(clientName, cameraNum, resolutionOptNum, colorspaceOptNum, fpsNum)
		except BaseException, err:
			print "Error while connecting camera: " + str(err)
开发者ID:knawrot,项目名称:nao-objects-recognition,代码行数:20,代码来源:detectionScript.py

示例9: showNaoImage

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
def showNaoImage(IP, PORT):
  """
  First get an image from Nao, then show it on the screen with PIL.
  """

  camProxy = ALProxy("ALVideoDevice", IP, PORT)
  cameraIndex = 0
  resolution = 2    # VGA
  colorSpace = 11   # RGB

  videoClient = camProxy.subscribeCamera('python_client',0,resolution, colorSpace, 5)

  t0 = time.time()

  # Get a camera image.
  # image[6] contains the image data passed as an array of ASCII chars.
  naoImage = camProxy.getImageRemote(videoClient)

  t1 = time.time()

  # Time the image transfer.
  print "acquisition delay ", t1 - t0

  camProxy.unsubscribe(videoClient)


  # Now we work with the image returned and save it as a PNG  using ImageDraw
  # package.

  # Get the image size and pixel array.
  imageWidth = naoImage[0]
  imageHeight = naoImage[1]
  array = naoImage[6]

  # Create a PIL Image from our pixel array.
  im = Image.fromstring("RGB", (imageWidth, imageHeight), array)

  # Save the image.
  im.save("camImage.png", "PNG")

  im.show()
开发者ID:axmtec0eric,项目名称:Nao-Robot,代码行数:43,代码来源:vision_getandsaveimage.py

示例10: __init__

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
class SimpleVisions:
    def __init__(self):
        self.visionProxy = ALProxy("ALVideoDevice", Config.ROBOT_IP, Config.PORT)
        self.motionProxy = ALProxy("ALMotion", Config.ROBOT_IP, Config.PORT)

        pass

    def takePicture(self, theName):
        #motionObj.moveHeadPitch(0.3, 0.4)
        #time.sleep(2)
        videoClient = self.visionProxy.subscribeCamera("python_client", 0, resolution, colorSpace, 5)
        self.visionProxy.setCameraParameter(videoClient, 18, 0)
        picture = self.visionProxy.getImageRemote(videoClient)
        #picture2 = self.visionProxy.getImageLocal(videoClient)
        self.visionProxy.unsubscribe(videoClient)
        picWidth = picture[0]
        picHeight = picture[1]
        array = picture[6]
        realPicture = Image.fromstring("RGB", (picWidth, picHeight), array)
        realPicture.save(theName, "PNG")
        #realPicture.save("analyzeThis.png", "JPG")
        realPicture.show()
开发者ID:redsphinx,项目名称:Simple-NAO-Controller,代码行数:24,代码来源:SimpleVisions.py

示例11: VideoStream

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
class VideoStream():
  def __init__(self,ip,port=9559,camId=0):
    """Load the video proxy on robot
       
      Keyword arguments:
      ip -- ip of the robot
      port -- port where naoqi listen
      camId -- top cam is 0, bottom cam is 1
    """
    self.strId = "0"     # No idea
    resolution = 2  # VGA
    colorSpace = 11 # RGB
    
    self.video = ALProxy("ALVideoDevice",ip,port)
    self.status = self.video.subscribeCamera(self.strId,camId,resolution,colorSpace,30)
    if self.status == "":
        raise Exception("subscribtion to camera failed")
    else:
        self.strId = self.status
    
    nUsedResolution = self.video.getResolution( self.strId );
    self.nWidth, self.nHeight = self.video.resolutionToSizes( nUsedResolution );
    
  
  def getFrame(self):
    """"Retrieve a frame form the video proxy"""
    resultCamera = self.video.getImageRemote(self.strId)[6]
    if resultCamera is None:
      raise Exception("Cannot read a frame from the video feed")
    else:
      image = resultCamera;
      img = np.fromstring(image, dtype=np.uint8)
      img = np.reshape(img, ( self.nHeight, self.nWidth,3))
      
    return img
  
  def __del__(self):
    """Uninstanciate the video proxy"""
    self.video.unsubscribe(self.status)
开发者ID:marc-moreaux,项目名称:cm_perso,代码行数:41,代码来源:stream.py

示例12: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
def main():
    global broker
    broker = ALBroker("myBroker", "0.0.0.0", 0, PEPPER_IP, PEPPER_PORT)

    # init camera
    video = ALProxy('ALVideoDevice')
    subscribers = video.getSubscribers()

    # init rgb camera
    exists_camera_name = "{0}_0".format(RGB_CAMERA_NAME)
    if exists_camera_name in subscribers:
        rgb_subscribe_id = exists_camera_name
    else:
        rgb_subscribe_id = video.subscribeCamera(RGB_CAMERA_NAME, RGB_CAMERA_ID, RESOLUTION, RGB_COLOR_SPACE, FRAMERATE)
    print u"rgb_subscribe_id: {0}".format(rgb_subscribe_id)

    # get image
    for i in range(0, TIMES):
        print u"try: {0} {1}".format(i, datetime.now().strftime("%Y/%m/%d %H:%M:%S"))

        rgb_ary = video.getDirectRawImageRemote(rgb_subscribe_id)
        video.releaseImage(rgb_subscribe_id)
        rgb = QiImage(rgb_ary)

        savepath = RGB_FILE_PREFIX + "_" + str(i) + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + ".bmp"
        print len(rgb.binary)
        ary = zip(*[iter(list(bytearray(rgb.binary)))] * 4)
        ary2 = ImageConverter.yuv422ToRGB(ary)

        with Image.new("RGB", (rgb.width, rgb.height)) as im:
            im.putdata(ary2)
            im.save(savepath, format='bmp')

        time.sleep(WAIT)

    video.unsubscribe(rgb_subscribe_id)
开发者ID:swkoubou,项目名称:peppermill-test,代码行数:38,代码来源:capture_raw_image.py

示例13: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]

#.........这里部分代码省略.........
            _al_tts_proxy = ALProxy("ALTextToSpeech", NAO_IP, int(NAO_PORT))
            _al_landmark_proxy = ALProxy("ALLandMarkDetection", NAO_IP, int(NAO_PORT))
            _al_face_proxy = ALProxy("ALFaceDetection", NAO_IP, int(NAO_PORT))
            _al_memory_proxy = ALProxy("ALMemory", NAO_IP, int(NAO_PORT))
            _al_video_proxy = ALProxy("ALVideoDevice", NAO_IP, int(NAO_PORT))
            if(RECORD_VIDEO == True): _al_video_rec_proxy = ALProxy("ALVideoRecorder", NAO_IP, int(NAO_PORT))
            if(RECORD_VIDEO == True): _al_video_rec_proxy.stopRecording() #reset a dangling connection
            #_al_speechrecognition_proxy = ALProxy("ALSpeechRecognition", NAO_IP, int(NAO_PORT))

            # Subscribe to the proxies services
            print("[STATE " + str(STATE) + "] " + "ALProxy Landmark init" + "\n")
            period = 500 #in msec
            #_al_face_proxy.subscribe("Test_Face", period, 0.0 )
            _al_face_proxy.subscribe("Test_Face")
            #_al_landmark_proxy.subscribe("Test_LandMark", period, 0.0 )
            _al_landmark_proxy.subscribe("Test_LandMark")
            try:
                #"Test_Video", CameraIndex=1, Resolution=1, ColorSpace=0, Fps=5
                #CameraIndex= 0(Top), 1(Bottom)
                #Resolution= 0(160*120), 1(320*240), VGA=2(640*480), 3(1280*960)
                #ColorSpace= AL::kYuvColorSpace (index=0, channels=1), AL::kYUV422ColorSpace (index=9,channels=3),
                #AL::kRGBColorSpace RGB (index=11, channels=3), AL::kBGRColorSpace BGR (to use in OpenCV) (index=13, channels=3)
                #Fps= OV7670 VGA camera can only run at 30, 15, 10 and 5fps. The MT9M114 HD camera run from 1 to 30fps.
                #Settings for resolution 1 (320x240)
                resolution_type = 1
                fps=15
                cam_w = 320
                cam_h = 240
                #Settigns for resolution 2 (320x240)
                #resolution_type = 2
                #fps = 15
                #cam_w = 640
                #cam_h = 480
                camera_name_id = _al_video_proxy.subscribeCamera("Test_Video", 0, resolution_type, 13, fps)
                print("[STATE " + str(STATE) + "] " + "Connected to the camera with resolution: " + str(cam_w) + "x" + str(cam_h) + "\n")
            except BaseException, err:
                print("[ERROR] connectToCamera: catching error " + str(err))
                return
            #Adding to the speech recognition proxy a vocabulary
            #_al_speechrecognition_proxy.setLanguage("English")
            #vocabulary = ["good", "bad", "nao"]
            #_al_speechrecognition_proxy.setVocabulary(vocabulary, False)
            #_al_speechrecognition_proxy.setVocabulary(vocabulary, False) #If you want to enable word spotting
            #_al_speechrecognition_proxy.subscribe("Test_ASR")

            #Initialise the OpenCV video recorder
            if(RECORD_VIDEO == True):
                print("[STATE " + str(STATE) + "] " + "Starting the video recorder..." + "\n")
                fourcc = cv2.cv.CV_FOURCC(*'XVID')
                video_out = cv2.VideoWriter("./output.avi", fourcc, 20.0, (cam_w,cam_h))
                #Record also the NAO session
                _al_video_rec_proxy.setResolution(2) #Resolution VGA  640*480
		_al_video_rec_proxy.setFrameRate(30)
		#_al_video_rec_proxy.setVideoFormat("MJPG")
		#self._video_proxy.startVideoRecord(complete_path)
		_al_video_rec_proxy.startRecording("/home/nao/recordings/cameras", "last_session", True) #It worked saving in this path!

            #Save all the sentences inside the NAO memory
            #it is usefull if you want a clean audio
            #to present in a video.
            if(RECORD_VOICE == True):
                print("[STATE " + str(STATE) + "] " + "Saving the voice in '/home/nao/recordings/microphones'" + "\n")
                _al_tts_proxy.sayToFile("Hello world!", "/home/nao/recordings/microphones/hello_wolrd.wav")
                _al_tts_proxy.sayToFile("I see only one object! It's a " , "/home/nao/recordings/microphones/i_see_only_one.wav")
                _al_tts_proxy.sayToFile("One object here! It's a " , "/home/nao/recordings/microphones/one_object.wav")
                _al_tts_proxy.sayToFile("There is one object! It's a " , "/home/nao/recordings/microphones/there_is_one.wav")
开发者ID:mpatacchiola,项目名称:pyERA,代码行数:70,代码来源:ex_nao_head_imitation.py

示例14: main

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
def main(port,ip):
    IP=ip
    PORT=port
    proxy=ALProxy('ALVideoDevice','192.168.1.112',9559)
    camera=0 #top camera=0 bottom camera=1
    resolution=vision_definitions.kVGA 
    colorSpace=vision_definitions.kRGBColorSpace
    fps=10
    client=proxy.subscribeCamera("detector", camera, resolution,colorSpace,fps)
    width=640
    height=480
    #image = np.zeros((height,width,3),np.uint8)
    image=None
    result = proxy.getImageRemote(client)
    if result is None:
        print ("cannot get image")
    elif result[6] is None:
        print ("no image data string.")
    else:
        image=np.fromstring(result[6],np.uint8).reshape(480,640,3)
        proxy.releaseImage(client)
        proxy.unsubscribe(client)
        cv2.imwrite('C:\Users\Joanna SanDretto\Downloads\calibration.png',cv2.cvtColor(image,cv2.COLOR_RGB2BGR))
    
    plt.imshow(image,)
    plt.show()
    im=cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
    GREEN_LOW=[60,70,70]
    GREEN_HIGH=[70,255,255]
    PINK_LOW=[163,100,120]
    PINK_HIGH=[167,255,255]
    RED_LOW=[175,150,150]
    RED_HIGH=[175,255,255]
    PURPLE_LOW=[120,50,50]
    PURPLE_HIGH=[122,255,255]
    YELLOW_LOW=[15,166,50]
    YELLOW_HIGH=[25,255,255]
    boundaries=[(GREEN_LOW,GREEN_HIGH),(PINK_LOW,PINK_HIGH),(PURPLE_LOW,PURPLE_HIGH),(YELLOW_LOW,YELLOW_HIGH)]
    kernel = np.ones((5,5),np.uint8)

    for(lower,upper) in boundaries:
        lower=np.array(lower,dtype="uint8")
        upper=np.array(upper,dtype="uint8")
        mask=cv2.inRange(im,lower,upper)
        mask=cv2.dilate(mask,kernel,iterations=1)
       
        plt.imshow(mask,)
        plt.show()
        
        imgray=mask
        ret,thresh=cv2.threshold(imgray,127,255,0)
        im2,contours,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
        areas=[cv2.contourArea(c) for c in contours]
        max_index=np.argmax(areas)
        cnt=contours[max_index]
                
        x,y,w,h=cv2.boundingRect(cnt)
        cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
       # img=cv2.drawContours(image,contours,-1,(0,255,0),3)

        plt.imshow(image,)
        plt.show()
开发者ID:happinessie,项目名称:NaoSimonSays,代码行数:64,代码来源:COLOR_FILTER_WITH_CONTOURS_V1.py

示例15: ALProxy

# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import subscribeCamera [as 别名]
weekdays = ["Lundi", "Mardi", "Mercredi", "Jeudi", "Vendredi", "Samedi", "Dimanche"]
oldAngle = 0 # Global to keep in mind


camProxy = ALProxy("ALVideoDevice", IP, Port)
TTSProxy = ALProxy("ALTextToSpeech", IP, Port)

# Register a video module.
colorSpace = vision_definitions.kBGRColorSpace    # OpenCV sadly does not support YUV.
definition = vision_definitions.k4VGA             # Resolution Max.
fps = 3         # We can't really expect more than 2 treatment per second.
camera = 0      # 1 = mouth camera / 0 = front camera.

readCalendarCam = camProxy.subscribeCamera("CalendarCam",
                                            camera,
                                            definition,
                                            colorSpace,
                                            fps)


# Thanks to http://hetland.org/coding/python/levenshtein.py
def levenshtein(a,b):
    "Calculates the Levenshtein distance between a and b."
    n, m = len(a), len(b)
    if n > m:
        # Make sure n <= m, to use O(min(n,m)) space
        a,b = b,a
        n,m = m,n

    current = range(n+1)
    for i in range(1,m+1):
开发者ID:NSenaud,项目名称:NaoChallengeProject,代码行数:33,代码来源:getCalendarDay.py


注:本文中的naoqi.ALProxy.subscribeCamera方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。