本文整理汇总了Python中naoqi.ALProxy.releaseImage方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.releaseImage方法的具体用法?Python ALProxy.releaseImage怎么用?Python ALProxy.releaseImage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类naoqi.ALProxy
的用法示例。
在下文中一共展示了ALProxy.releaseImage方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: RefreshCam
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import releaseImage [as 别名]
class RefreshCam(ALModule, Thread):
"""docstring for NaoWalks"""
def __init__(self, name, event, pict):
Thread.__init__(self)
ALModule.__init__(self, name)
self.event = event
self.pict = pict
# Create new object to get logs on computer's terminal.
self.logs = logs.logs()
# Create an ALVideoDevice proxy.
self.camProxy = ALProxy("ALVideoDevice")
self.logs.display("Subscribed to an ALVideoDevice proxy", "Good")
# Register a video module.
colorSpace = vision_definitions.kBGRColorSpace
fps = 15
camera = 1 # 1 = mouth camera / 0 = front camera.
self.followTheLineCam = self.camProxy.subscribeCamera("LowCam",
camera,
definition,
colorSpace,
fps)
self.logs.display("Subscribed to Camera 1", "Good")
# Method called by the Thread.start() method.
def run(self):
while True:
self.event.wait()
# Get the image.
pict = self.camProxy.getImageRemote(self.followTheLineCam)
self.camProxy.releaseImage(self.followTheLineCam)
self.logs.display("Received a picture from Camera 1")
self.event.clear()
self.camProxy.unsubscribe("LowCam")
self.logs.display("Camera unsubscribed", "Good")
# ############################### END OF CLASS ############################## #
示例2: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import releaseImage [as 别名]
def main():
global broker
broker = ALBroker("myBroker", "0.0.0.0", 0, PEPPER_IP, PEPPER_PORT)
# init camera
video = ALProxy('ALVideoDevice')
subscribers = video.getSubscribers()
# init rgb camera
exists_camera_name = "{0}_0".format(RGB_CAMERA_NAME)
if exists_camera_name in subscribers:
rgb_subscribe_id = exists_camera_name
else:
rgb_subscribe_id = video.subscribeCamera(RGB_CAMERA_NAME, RGB_CAMERA_ID, RESOLUTION, RGB_COLOR_SPACE, FRAMERATE)
print u"rgb_subscribe_id: {0}".format(rgb_subscribe_id)
# get image
for i in range(0, TIMES):
print u"try: {0} {1}".format(i, datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
rgb_ary = video.getDirectRawImageRemote(rgb_subscribe_id)
video.releaseImage(rgb_subscribe_id)
rgb = QiImage(rgb_ary)
savepath = RGB_FILE_PREFIX + "_" + str(i) + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + ".bmp"
print len(rgb.binary)
ary = zip(*[iter(list(bytearray(rgb.binary)))] * 4)
ary2 = ImageConverter.yuv422ToRGB(ary)
with Image.new("RGB", (rgb.width, rgb.height)) as im:
im.putdata(ary2)
im.save(savepath, format='bmp')
time.sleep(WAIT)
video.unsubscribe(rgb_subscribe_id)
示例3: range
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import releaseImage [as 别名]
nameId = camProxy.register("python_GVM", resolution, colorSpace, fps)
print nameId
print 'getting direct raw images in local'
for i in range(0, 50):
camProxy.getDirectRawImageLocal(nameId)
camProxy.releaseDirectRawImage(nameId)
print 'getting direct raw images in remote'
for i in range(0, 50):
camProxy.getDirectRawImageRemote(nameId)
resolution = 2 #kVGA
camProxy.setResolution(nameId, resolution)
print 'getting images in local'
for i in range(0, 50):
camProxy.getImageLocal(nameId)
camProxy.releaseImage(nameId)
print 'getting images in remote'
for i in range(0, 50):
camProxy.getImageRemote(nameId)
camProxy.unRegister(nameId)
print 'end of gvm_getImageLocal python script'
示例4: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import releaseImage [as 别名]
def main(port,ip):
#IP address and port number of NAO robot
IP=ip
PORT=port
#establish connection to NAO
proxy=ALProxy('ALVideoDevice','192.168.1.112',9559)
#set-up configuration for images
camera=0 #top camera=0 bottom camera=1
resolution=vision_definitions.kVGA
colorSpace=vision_definitions.kBGRColorSpace
fps=10
#subscribe to NAO's camera with given camera configuration
client=proxy.subscribeCamera("detector", camera, resolution,colorSpace,fps)
#width and height of the images
width=640
height=480
#image = np.zeros((height,width,3),np.uint8)
image=None
#get an image from NAO
result = proxy.getImageRemote(client)
#make sure an image was received and it has information
if result is None:
print ("cannot get image")
elif result[6] is None:
print ("no image data string.")
else:
#reshape the image data into a multi-dimensional array
img=np.fromstring(result[6],np.uint8).reshape(480,640,3)
#release the image and unsubscribe from NAO's camera since we won't use it again
proxy.releaseImage(client)
proxy.unsubscribe(client)
#create a window with 6 trackbars: H low, H high, S low, S high, V low, and V high
cv2.namedWindow('result')
cv2.resizeWindow('result',500,500)
cv2.createTrackbar('h_low','result',0,179,nothing)
cv2.createTrackbar('s_low','result',0,255,nothing)
cv2.createTrackbar('v_low','result',0,255,nothing)
cv2.createTrackbar('h_high','result',179,179,nothing)
cv2.createTrackbar('s_high','result',255,255,nothing)
cv2.createTrackbar('v_high','result',255,255,nothing)
h,s,v=100,100,100
#create a loop that will apply filter the image using the values in the trackbars as the boundaries. Display the filtered image.
while(1):
h_low=cv2.getTrackbarPos('h_low','result')
s_low=cv2.getTrackbarPos('s_low','result')
v_low=cv2.getTrackbarPos('v_low','result')
h_high=cv2.getTrackbarPos('h_high','result')
s_high=cv2.getTrackbarPos('s_high','result')
v_high=cv2.getTrackbarPos('v_high','result')
#filter the image and draw a rectangle around the largest contour
lower=np.array([h_low,s_low,v_low])
upper=np.array([h_high,s_high,v_high])
hsv=cv2.cvtColor(img,cv2.COLOR_BGR2HSV)
mask=cv2.inRange(hsv,lower,upper)
kernel = np.ones((5,5),np.uint8)
mask=cv2.erode(mask,kernel,iterations=1)
mask=cv2.dilate(mask,kernel,iterations=1)
result=cv2.bitwise_and(img,img,mask=mask)
im2,contours,hierarchy=cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas=[cv2.contourArea(c) for c in contours]
max_index=np.argmax(areas)
cnt=contours[max_index]
x,y,w,h=cv2.boundingRect(cnt)
cv2.rectangle(result,(x,y),(x+w,y+h),(0,255,0),2)
cv2.imshow('result',result)
k=cv2.waitKey(5) & 0xFF
if k==27:
break
cv2.destroyAllWindows()
示例5: VideoRobot
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import releaseImage [as 别名]
class VideoRobot(object):
FPS = 5
RESOLUTION = vision_definitions.kVGA
COLOR_SPACE = vision_definitions.kRGBColorSpace
def __init__(self):
self.video_proxy = None
self.cameras = [None, None] #two cameras on Nao, top and bottom
def connect(self, host, port):
print 'Video - Connecting to robot on {0}:{1}...'.format(host, port)
try:
self.video_proxy = ALProxy("ALVideoDevice", host, port)
except Exception as exception: # pylint: disable=broad-except
raise Exception('Could not create proxy: {0}', format(exception))
self._subscribe_to_cameras()
def _subscribe_to_cameras(self):
for camera_index in range(len(self.cameras)):
camera_name = 'nc_camera_{0}'.format(camera_index)
camera = self.video_proxy.subscribeCamera(camera_name, camera_index, self.RESOLUTION, self.COLOR_SPACE, self.FPS)
self.cameras[camera_index] = camera
if camera is None:
print 'Failed to subscribe to camera: {0}'.format(camera_index)
def get_remote_image(self, use_bottom_camera=False):
camera_index = vision_definitions.kBottomCamera if use_bottom_camera else vision_definitions.kTopCamera
camera = self.cameras[camera_index]
capture = self.video_proxy.getImageRemote(camera)
if capture is None:
print 'Failed to retrieve image remotely from camera: {0}'.format(camera_index)
image = None
if capture is not None:
width = capture[0]
height = capture[1]
data = capture[6]
try:
image = self._convert_capture_to_image(width, height, 'RGB', data)
#timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
#image.save(timestamp + '.png', "PNG")
#image.show()
except IOError as error:
print 'Error saving image: {0}'.format(error)
finally:
self.video_proxy.releaseImage(camera)
return image
@staticmethod
def _convert_capture_to_image(width, height, mode, data):
image = None
try:
image = Image.frombytes(mode, (width, height), data)
except ValueError as error:
raise ValueError('Invalid parameter passed to image creation: {0}'.format(error))
except TypeError as error:
raise TypeError('Invalid type passed to image creation: {0}'.format(error))
return image
def set_auto_exposure(self, target_exposure, set_bottom_camera=False):
if target_exposure < 0 or target_exposure > 3:
raise ValueError('target_exposure must be between 0 and 3')
camera_index = vision_definitions.kBottomCamera if set_bottom_camera else vision_definitions.kTopCamera
success = self.video_proxy.setParameter(camera_index, vision_definitions.kCameraExposureAlgorithmID, target_exposure)
if success:
print 'Successfully changed camera {0} exposure to {1}'.format(camera_index, target_exposure)
else:
print 'Failed to change camera {0} exposure to {1}'.format(camera_index, target_exposure)
return success
def __del__(self):
print 'Unsubscribing from cameras...'
for camera_index in range(len(self.cameras)):
camera = self.cameras[camera_index]
if not camera is None:
unsubscribe_success = self.video_proxy.unsubscribe(camera)
if not unsubscribe_success:
print 'Failed to unsubscribe to camera: {0}'.format(camera_index)
示例6: __init__
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import releaseImage [as 别名]
#.........这里部分代码省略.........
def enableArmCollisionProtection(self):
chainName = "Arms"
enable = True
isSuccess = self.motionProxy.setCollisionProtectionEnabled(chainName,
enable)
pprint("Anticollision activation on arms: " + str(isSuccess))
def disableArmCollisionProtection(self):
chainName = "Arms"
enable = False
isSuccess = self.motionProxy.setCollisionProtectionEnabled(chainName,
enable)
pprint("Anticollision deactivation on arms: " + str(isSuccess))
def enableExternalCollisionProtection(self):
self.motionProxy.setExternalCollisionProtectionEnabled("All", True)
def disableExternalCollisionProtection(self):
self.motionProxy.setExternalCollisionProtectionEnabled("All", False)
def headRotateAbsolute(self, degrees):
# Example showing a single target angle for one joint
# Interpolate the head yaw to 15 degrees in 1.0 second
names = "HeadYaw"
angleLists = degrees*almath.TO_RAD
timeLists = 1.0 if (degrees == 0.0) else abs(15.0/degrees)
isAbsolute = True
self.motionProxy.angleInterpolation(names, angleLists, timeLists,
isAbsolute)
def headPitchAbsolute(self, degrees):
# Example showing a single target angle for one joint
# Interpolate the head pitch to 15 degrees in 1.0 second
names = "HeadPitch"
angleLists = degrees*almath.TO_RAD
timeLists = 1.0 if (degrees == 0.0) else abs(15.0/degrees)
isAbsolute = True
self.motionProxy.angleInterpolation(names, angleLists, timeLists,
isAbsolute)
def bodyRotateAbsolute(self, degrees):
self.motionProxy.moveTo(0.0, 0.0, degrees*almath.TO_RAD)
def bodyWalkForward(self, distance):
self.motionProxy.moveTo(distance, 0.0, 0.0)
def takePicture(self, camera):
naoImage = self.camProxy.getImageRemote(camera)
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
timestampUS = naoImage[5]
array = naoImage[6]
cameraID = naoImage[7]
leftAngle = naoImage[8]
topAngle = naoImage[9]
rightAngle = naoImage[10]
bottomAngle = naoImage[11]
array = np.fromstring(array, dtype=np.uint8)
# Note: This returns images as BGR
return {"imageWidth": imageWidth,
"imageHeight": imageHeight,
"image": np.reshape(array, (imageHeight, imageWidth, 3)),
"leftAngle": leftAngle,
"topAngle": topAngle,
"rightAngle": rightAngle,
"bottomAngle": bottomAngle,
"cameraID": cameraID,
"timestampUS": timestampUS}
def releasePicture(self, camera):
self.camProxy.releaseImage(camera)
def lookAtPixelInImage(self, image_dict, x, y):
lookAtX, lookAtY = self.camProxy.getAngularPositionFromImagePosition(
image_dict["cameraID"],
[x/image_dict["imageWidth"], y/image_dict["imageHeight"]])
self.motionProxy.angleInterpolation("HeadYaw", lookAtX,
1.0 if (lookAtX == 0.0) else abs((30.0*almath.TO_RAD)/lookAtX),
True)
self.motionProxy.angleInterpolation("HeadPitch", lookAtY,
1.0 if (lookAtY == 0.0) else abs((30.0*almath.TO_RAD)/lookAtY),
True)
def rotateBodyToObjectInImage(self, image_dict, x, y):
lookAtX, _ = self.camProxy.getAngularPositionFromImagePosition(
image_dict["cameraID"],
[x/image_dict["imageWidth"], y/image_dict["imageHeight"]])
self.motionProxy.moveTo(0.0, 0.0, lookAtX)
def __del__(self):
self.postureProxy.goToPosture("Sit", 1.0)
self.disableExternalCollisionProtection()
self.motionProxy.rest()
self.camProxy.unsubscribe(self.topCamera)
self.camProxy.unsubscribe(self.bottomCamera)
self.broker.shutdown()
示例7: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import releaseImage [as 别名]
def main(port,ip):
IP=ip
PORT=port
proxy=ALProxy('ALVideoDevice','192.168.1.112',9559)
camera=0 #top camera=0 bottom camera=1
resolution=vision_definitions.kVGA
colorSpace=vision_definitions.kRGBColorSpace
fps=10
client=proxy.subscribeCamera("detector", camera, resolution,colorSpace,fps)
width=640
height=480
#image = np.zeros((height,width,3),np.uint8)
image=None
result = proxy.getImageRemote(client)
if result is None:
print ("cannot get image")
elif result[6] is None:
print ("no image data string.")
else:
image=np.fromstring(result[6],np.uint8).reshape(480,640,3)
proxy.releaseImage(client)
proxy.unsubscribe(client)
cv2.imwrite('C:\Users\Joanna SanDretto\Downloads\calibration.png',cv2.cvtColor(image,cv2.COLOR_RGB2BGR))
plt.imshow(image,)
plt.show()
im=cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
GREEN_LOW=[60,70,70]
GREEN_HIGH=[70,255,255]
PINK_LOW=[163,100,120]
PINK_HIGH=[167,255,255]
RED_LOW=[175,150,150]
RED_HIGH=[175,255,255]
PURPLE_LOW=[120,50,50]
PURPLE_HIGH=[122,255,255]
YELLOW_LOW=[15,166,50]
YELLOW_HIGH=[25,255,255]
boundaries=[(GREEN_LOW,GREEN_HIGH),(PINK_LOW,PINK_HIGH),(PURPLE_LOW,PURPLE_HIGH),(YELLOW_LOW,YELLOW_HIGH)]
kernel = np.ones((5,5),np.uint8)
for(lower,upper) in boundaries:
lower=np.array(lower,dtype="uint8")
upper=np.array(upper,dtype="uint8")
mask=cv2.inRange(im,lower,upper)
mask=cv2.dilate(mask,kernel,iterations=1)
plt.imshow(mask,)
plt.show()
imgray=mask
ret,thresh=cv2.threshold(imgray,127,255,0)
im2,contours,hierarchy=cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas=[cv2.contourArea(c) for c in contours]
max_index=np.argmax(areas)
cnt=contours[max_index]
x,y,w,h=cv2.boundingRect(cnt)
cv2.rectangle(image,(x,y),(x+w,y+h),(0,255,0),2)
# img=cv2.drawContours(image,contours,-1,(0,255,0),3)
plt.imshow(image,)
plt.show()
示例8: NaoShowMeGame
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import releaseImage [as 别名]
class NaoShowMeGame(object):
def __init__(self,ip):
self._IP=ip
#Get access to Nao's camera
self._video=ALProxy('ALVideoDevice',self._IP,9559)
#Get access to Nao's text to speech
self._tts=ALProxy("ALTextToSpeech",self._IP,9559)
#set which camera to use: top camera=0 bottom camera=1
camera=0
#set which resolution to get pictures in: kVGA=640x480 pixels
resolution=vision_definitions.kVGA
#set which colorSpace to get images: kRGB is RGB values
colorSpace=vision_definitions.kRGBColorSpace
#set framerate: 1-30
fps=10
#subscribe to the camera
self._cameraClient=self._video.subscribeCamera("detector", camera, resolution,colorSpace,fps)
#set width and height of images
self._imageWidth=640
self._imageHeight=480
#Set boundaries for colors...ideally there will be a way to automatically calibrate this
self._GREEN_LOW=[60,70,70]
self._GREEN_HIGH=[70,255,255]
self._PINK_LOW=[163,100,120]
self._PINK_HIGH=[167,255,255]
self._RED_LOW=[175,150,150]
self._RED_HIGH=[175,255,255]
self._PURPLE_LOW=[117,51,50]
self._PURPLE_HIGH=[128,255,255]
self._YELLOW_LOW=[15,166,50]
self._YELLOW_HIGH=[25,255,255]
#put all the color ranges into a single list
self._boundaries=[(self._GREEN_LOW,self._GREEN_HIGH),(self._PINK_LOW,self._PINK_HIGH),(self._PURPLE_LOW,self._PURPLE_HIGH),(self._YELLOW_LOW,self._YELLOW_HIGH)]
def playLoop(self):
print "entering game loop"
print "press ctrl+c to stop the program"
# green="green"
#toSay="show me the "+green+" ball"
# self._tts.say(toSay)
#try:
#while true:
image=None
#get a picture from Nao
result = self._video.getImageRemote(self._cameraClient)
#make sure a picture is received before processing the image
if result is None:
print ("cannot get image")
elif result[6] is None:
print ("no image data string.")
else:
# the image is a bitstring, so reshape it into a multi-dimensional array with the RGB values of pixel with (x,y)
# location at image[x][y]
image=np.fromstring(result[6],np.uint8).reshape(480,640,3)
# release image buffer locked by getImageLocal(). Not mandatory for GetImageRemote(), but recommended
self._video.releaseImage(self._cameraClient)
# Convert the image's RGB values to HSV (Hue, Saturation, Value)...at first I used RGB, but read that HSV can work well for filtering
im=cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
#TO DO: this is where the main loop of the gam will go: give instruction, wait, check if instruction followed, give feedback
# this is just for testing...finds all pixels between lower and upper values, finds contours and then draws a
# square around the contour with the largest area (presumably that is the object we want)
for(lower,upper) in self._boundaries:
#convert the HSV values to the correct format
lower=np.array(lower,dtype="uint8")
upper=np.array(upper,dtype="uint8")
#create a mask of the image where pixels that value in the range [lower,upper] are white and everything else is black
mask=cv2.inRange(im,lower,upper)
#use dilation and erosion to remove some of the noise in the mask
kernel = np.ones((5,5),np.uint8)
mask=cv2.dilate(mask,kernel,iterations=1)
mask=cv2.erode(mask,kernel,iterations=1)
#find boundaries between the black and white areas of the mask
im2,contours,hierarchy=cv2.findContours(mask,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
#get the area of all the regions returned by cv2.findContours
areas=[cv2.contourArea(c) for c in contours]
#get the index of the region with the largest area
if(len(areas)>0):
max_index=np.argmax(areas)
#get the contour with the largest area
#.........这里部分代码省略.........