本文整理汇总了Python中naoqi.ALProxy.getImageRemote方法的典型用法代码示例。如果您正苦于以下问题:Python ALProxy.getImageRemote方法的具体用法?Python ALProxy.getImageRemote怎么用?Python ALProxy.getImageRemote使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类naoqi.ALProxy
的用法示例。
在下文中一共展示了ALProxy.getImageRemote方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def main(IP, PORT):
camProxy = ALProxy("ALVideoDevice", IP, PORT)
# resolution = 2 # VGA 640x480
resolution = 0 # kQQVGA, 160x120
# colorSpace = 11 # RGB
# colorSpace = 10 # YUV
colorSpace = 9 # YUV422
# 程序测试经常挂掉,导致subscriberID未被取消订阅,需要更换订阅号;这里加入随机;
subscriberID = 'send_YUV422_' + str(random.randint(0,100))
videoClient = camProxy.subscribe(subscriberID, resolution, colorSpace, 30)
# ----------> 开启socket服务器监听端口 <----------
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((IP, LISTEN_PORT))
sock.listen(10)
naoImage = camProxy.getImageRemote(videoClient)
array = naoImage[6]
# print 'VIDEO#' + array + '\r'
print 'image data length:', len(array)
print 'print test over.'
print '---------------------------------------------'
global connection
try:
while True: # 等待客户端连接,单线程监听单一客户端
print 'Waiting for a connection'
connection,address = sock.accept()
print 'socket client connected.'
CONNECT = True
while CONNECT == True:
# 发送YUV422图像至客户端
naoImage = camProxy.getImageRemote(videoClient)
array = naoImage[6]
# for i in range(len(array)):
# connection.send(array[i])
# if (i % 5 == 0):
# connection.send('\r')
# connection.send('\r')
connection.send(bytes(array+'\n', 'utf-8'))
# connection.send('VIDEO#' + array + '\r#OVER\r')
print 'send image date successful.'
# time.sleep(1)
CONNECT = False
print 'socket client disconnect.'
except KeyboardInterrupt: # CTRL+C, 关闭服务器端程序;
print ""
print "Interrupted by user, shutting down"
camProxy.unsubscribe(videoClient)
print 'unsubscribe nao video device'
if connection != None:
connection.close()
print 'socket connection closed.'
sys.exit(0)
示例2: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def main():
global broker
broker = ALBroker("myBroker", "0.0.0.0", 0, PEPPER_IP, PEPPER_PORT)
# init camera
video = ALProxy('ALVideoDevice')
subscribers = video.getSubscribers()
# init depth camera
exists_camera_name = "{0}_0".format(DEPTH_CAMERA_NAME)
if exists_camera_name in subscribers:
depth_subscribe_id = exists_camera_name
else:
depth_subscribe_id = video.subscribeCamera(DEPTH_CAMERA_NAME, DEPTH_CAMERA_ID, RESOLUTION, DEPTH_COLOR_SPACE, FRAMERATE)
print u"subscribe_id: {0}".format(depth_subscribe_id)
# init rgb camera
exists_camera_name = "{0}_0".format(RGB_CAMERA_NAME)
if exists_camera_name in subscribers:
rgb_subscribe_id = exists_camera_name
else:
rgb_subscribe_id = video.subscribeCamera(RGB_CAMERA_NAME, RGB_CAMERA_ID, RESOLUTION, RGB_COLOR_SPACE, FRAMERATE)
print u"rgb_subscribe_id: {0}".format(rgb_subscribe_id)
# get image
for i in range(0, TIMES):
print u"try: {0} {1}".format(i, datetime.now().strftime("%Y/%m/%d %H:%M:%S"))
depth_ary = video.getImageRemote(depth_subscribe_id)
rgb_ary = video.getImageRemote(rgb_subscribe_id)
depth = QiImage(depth_ary)
depth_binary = [struct.unpack('B', x)[0] for x in depth.binary]
depth_save_path = DEPTH_FILE_PREFIX + "_" + str(i) + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + '.bmp'
with Image.new("L", (depth.width, depth.height)) as im:
im.putdata(depth_binary[::2])
im.save(depth_save_path, format='bmp')
print u"depth image file was created: {0}".format(depth_save_path)
rgb = QiImage(rgb_ary)
rgb_save_path = RGB_FILE_PREFIX + "_" + str(i) + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + '.bmp'
rgb_binary = [struct.unpack('B', x)[0] for x in rgb.binary]
with Image.new("RGB", (rgb.width, rgb.height)) as im:
im.putdata(zip(*[iter(rgb_binary)] * 3))
im.save(rgb_save_path, format='bmp')
print u"rgb image file was created: {0}".format(rgb_save_path)
both_save_path = BOTH_FILE_PREFIX + "_" + str(i) + "_" + datetime.now().strftime("%Y%m%d_%H%M%S") + '.bmp'
with Image.new("RGB", (rgb.width, rgb.height * 2)) as im:
im.putdata(zip(*[iter(rgb_binary)] * 3) + [(x, x, x) for x in depth_binary[::2]])
im.save(both_save_path, format='bmp')
im.show()
print u"both image file was created: {0}".format(both_save_path)
time.sleep(WAIT)
video.unsubscribe(depth_subscribe_id)
video.unsubscribe(rgb_subscribe_id)
示例3: showNaoImage
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def showNaoImage(IP, PORT):
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # VGA
colorSpace = 11 # RGB
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
t0 = time.time()
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = camProxy.getImageRemote(videoClient)
t1 = time.time()
# Time the image transfer.
print "acquisition delay ", t1 - t0
camProxy.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
nomPhoto = time.strftime("%d-%m-%y_a_%H-%M-%S", time.localtime())
print nomPhoto
# Save the image.
im.save("../public/imgNao/" + nomPhoto + ".jpeg", "JPEG")
示例4: VideoModule
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
class VideoModule():
def __init__(self, resolution=2, colorSpace=11, fps=5):
self.vd = ALProxy('ALVideoDevice')
modules.append(self)
self.vd.subscribe('videoModule', resolution, colorSpace, fps)
def getImage(self):
results = self.vd.getImageRemote('videoModule')
image = NaoImage()
image.width = results[0]
image.height = results[1]
image.layersNumber = results[2]
image.colorSpace = results[3]
image.timestamp = results[4]
image.microtimestamp = results[5]
image.pixels = np.frombuffer(
results[6], dtype=np.uint8).reshape((image.height, image.width, 3))
image.cameraID = results[7]
image.leftAngle = results[8]
image.rightAngle = results[9]
image.topAngle = results[10]
image.bottomAngle = results[11]
return image
def startRecord(self, filename):
self.vd.recordVideo('videoModule', 100, 1)
def stopRecord(self, filename):
self.vd.stopVideo('videoModule')
def close(self):
self.vd.unsubscribe('videoModule')
示例5: getColour
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def getColour(IP, PORT):
"""
First get an image from Nao, then show it on the screen with PIL.
:param IP:
:param PORT:
"""
myBroker = ALBroker("myBroker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
IP, # parent broker IP
PORT) # parent broker port
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # VGA
colorSpace = 11 # RGB
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
t0 = time.time()
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = camProxy.getImageRemote(videoClient)
t1 = time.time()
# Time the image transfer.
#print "Runde: ", b
camProxy.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
#Create a PIL Image Instance from our pixel array.
img0= Image.frombytes("RGB", (imageWidth, imageHeight), array)
#frame=np.asarray(convert2pil(img0)[:,:])
#object_rect2=detectColor(img0, RED_MIN,RED_MAX)
frame=detectShape(img0, RED_MIN,RED_MAX)
#frame=selectDetected(object_rect1,frame)
#frame=selectDetected(object_rect2,frame)
# currentImage = path+ "/camImage1cm.jpg"
# cv2.imwrite(currentImage, frame)
cv2.imshow('contour',frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
示例6: showNaoImage
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def showNaoImage(IP, PORT):
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # VGA
colorSpace = 11 # RGB
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = camProxy.getImageRemote(videoClient)
camProxy.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
# Save the image.
im.save("../public/imgNao/live.jpeg", "JPEG")
示例7: ImageHandler
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
class ImageHandler():
def __init__(self):
self.video = ALProxy("ALVideoDevice", nao_ip, 9559)
self.resolution = 2 #VGA
self.colorSpace = 11 #RGB
self.framerate = 30
self.nameID = self.video.subscribe("NAOLearn", self.resolution, self.colorSpace, self.framerate)
def __del__(self):
self.video.unsubscribe("NAOLearn")
def getLatestFrame(self):
#try:
alimage = self.video.getImageRemote(self.nameID)
imageWidth = alimage[0]
imageHeight = alimage[1]
imageArray = alimage[6]
image = Image.fromstring("RGB", (imageWidth, imageHeight), imageArray)
output = StringIO.StringIO()
image.save(output, format="JPEG",quality=80, optimize=True)
outputString = output.getvalue()
output.close()
return outputString
示例8: getNaoImage
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def getNaoImage(IP, PORT):
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # 640*480px http://doc.aldebaran.com/2-1/family/robots/video_robot.html#cameraresolution-mt9m114
colorSpace = 11 # RGB colorspace http://doc.aldebaran.com/2-1/family/robots/video_robot.html#cameracolorspace-mt9m114
fps = 5 # can be 0-30 fps
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, fps)
t0 = time.time()
naoImage = camProxy.getImageRemote(videoClient)
t1 = time.time()
camProxy.unsubscribe(videoClient)
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
#grab image from PIL and convert to opencv image
img = np.array(im)
img = img[:, :, ::-1].copy()
#im.save(name,"PNG")
print "acquisition delay ", t1 - t0
return img
示例9: showNaoImage
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def showNaoImage(IP, PORT):
"""
First get an image from Nao, then show it on the screen with PIL.
"""
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # VGA
colorSpace = 11 # RGB
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
t0 = time.time()
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = camProxy.getImageRemote(videoClient)
t1 = time.time()
# Time the image transfer.
print "acquisition delay ", t1 - t0
camProxy.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
numLayers = naoImage[2]
array = naoImage[6]
print "Type is", type(array)
vec_im = []
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
print "Type of image is", type(im)
vec_im.append(im)
# nparr = np.fromstring(array, np.uint8).reshape( imageHeight, imageWidth, numLayers)
# img_np = cv2.imdecode(nparr, cv2.CV_LOAD_IMAGE_COLOR)
# cv2.imshow('np',img_np)
# print type(img_np)
# Save the image.
im.save("garbage.png", "PNG")
im.show()
open_cv_image = np.array(im)
# Convert RGB to BGR
open_cv_image = open_cv_image[:, :, ::-1].copy()
# cv2.startWindowThread()
cv2.imshow("image", open_cv_image)
k = cv2.waitKey(0)
if k == 27: # wait for ESC key to exit
cv2.destroyAllWindows()
cv2.waitKey(1)
示例10: video
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
class video(ALModule):
"""take a video."""
def __init__(self, IP="nao.local", PORT=9559, CameraID=0, parent=None):
"""
Initialization.
"""
self._IP = IP
self._PORT = PORT
self._CameraID = CameraID
self.counter = 0
# Proxy to ALVideoDevice.
self._videoProxy = None
# Our video module name.
self._imgClient = ""
# This will contain this alImage we get from Nao.
self._alImage = None
self._image = []
self._registerImageClient(self._IP, self._PORT)
def _registerImageClient(self, IP, PORT):
"""Register our video module to the robot."""
self._videoProxy = ALProxy("ALVideoDevice", self._IP, self._PORT)
resolution = vision_definitions.kQVGA # 320 * 240
colorSpace = vision_definitions.kRGBColorSpace
self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, 30)
# Select camera.
self._videoProxy.setParam(vision_definitions.kCameraSelectID,
self._CameraID)
def _unregisterImageClient(self):
"""Unregister our naoqi video module."""
if self._imgClient != "":
self._videoProxy.unsubscribe(self._imgClient)
def _updateImage(self):
"""Retrieve a new image from Nao."""
# print "--------------------new image--------------------"
self._alImage = self._videoProxy.getImageRemote(self._imgClient)
imageWidth = self._alImage[0]
imageHeight = self._alImage[1]
array = self._alImage[6]
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
return im
def timerEvent(self, event):
"""evenement lancé par startTimer"""
self._updateImage()
def __del__(self):
"""Destructeur de l'objet, coupe le flux vidéo"""
self._unregisterImageClient()
示例11: getColour
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def getColour(IP, PORT, x, y):
"""
First get an image from Nao, then show it on the screen with PIL.
"""
#path = "/home/nao/images/"
path = "/home/guenthse/uni/semesterprojekt/nao_images/"
myBroker = ALBroker("myBroker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
IP, # parent broker IP
PORT) # parent broker port
camProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = 2 # VGA
colorSpace = 11 # RGB
xValue = x
yValue = y
#for b in range(0, 5):
areas = [0,0,0]
colors = ['red', 'green', 'blue']
videoClient = camProxy.subscribe("python_client", resolution, colorSpace, 5)
t0 = datetime.datetime.isoformat(datetime.datetime.now())
# Get a camera image.
# image[6] contains the image data passed as an array of ASCII chars.
naoImage = camProxy.getImageRemote(videoClient)
t1 = time.time()
# Time the image transfer.
#print "Runde: ", b
camProxy.unsubscribe(videoClient)
# Now we work with the image returned and save it as a PNG using ImageDraw
# package.
# Get the image size and pixel array.
imageWidth = naoImage[0]
imageHeight = naoImage[1]
array = naoImage[6]
# Create a PIL Image from our pixel array.
im = Image.fromstring("RGB", (imageWidth, imageHeight), array)
# Save the image.
im.save(path+ "configColour" + str(t0) + ".jpg", "JPEG")
示例12: main
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def main(args):
# reference
# https://github.com/ros-aldebaran/romeo_robot/blob/master/romeo_sensors/nodes/camera_depth.py
# https://github.com/ros-perception/image_pipeline/blob/indigo/depth_image_proc/src/nodelets/point_cloud_xyz.cpp
video = ALProxy('ALVideoDevice', args.ip, args.port)
client = video.subscribeCamera(NAME, CAMERA_ID, RESOLUTION, COLOR_SPACE, FRAMERATE)
try:
image = video.getImageRemote(client)
if image is None:
print 'Cannot obtain depth image.'
exit()
width = image[0]
height = image[1]
array = image[6]
cloud = []
for v in range(height):
for u in range(width):
offset = (v * width + u) * 2
depth = ord(array[offset]) + ord(array[offset+1]) * 256
x = (u - CX) * depth * UNIT_SCALING / FX
y = (v - CY) * depth * UNIT_SCALING / FY
z = depth * UNIT_SCALING
cloud.append((x, y, z))
finally:
video.unsubscribe(client)
header = '''# .PCD v0.7 - Point Cloud Data file format
VERSION 0.7
FIELDS x y z
SIZE 4 4 4
TYPE F F F
COUNT 1 1 1
WIDTH %d
HEIGHT 1
VIEWPOINT 0 0 0 1 0 0 0
POINTS %d
DATA ascii'''
f = open(args.output, 'w')
num = len(cloud)
f.write(header % (num, num))
f.write("\n")
for c in cloud:
f.write('%f %f %f' % (c[0], c[1], c[2]))
f.write("\n")
f.close()
示例13: NaoVision
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
class NaoVision():
def __init__(self):
rospy.init_node("nao_vision")
# attempt to get the host and port
try:
host = rospy.get_param("/naoqi/host")
except KeyError:
rospy.logerr("Unable to find parameter /naoqi/host")
exit(1)
try:
port = rospy.get_param("/naoqi/port")
except KeyError:
rospy.logerr("Unable to find parameter /naoqi/port")
exit(1)
#connect to the Nao
try:
self.nao_cam = ALProxy("ALVideoDevice", host, port)
except Exception:
rospy.logerr("Unable to create vision proxy.")
exit(1)
# get any optional parameters
resolution = rospy.get_param("~resolution", 1)
cam = rospy.get_param("~camera", 0)
#setup the camera stream
self.proxy_name = self.nao_cam.subscribe("nao_image", resolution, kRGBColorSpace, 30)
self.nao_cam_pub = rospy.Publisher("nao_camera", Image)
self.nao_cam.setParam(kCameraSelectID, cam)
rospy.loginfo("NAO Vision Node Initialized")
def publish_image(self):
# get the image from the Nao
img = self.nao_cam.getImageRemote(self.proxy_name)
# copy the data into the ROS Image
ros_img = Image()
ros_img.width = img[0]
ros_img.height = img[1]
ros_img.step = img[2] * img[0]
ros_img.header.stamp.secs = img[5]
ros_img.data = img[6]
ros_img.is_bigendian = False
ros_img.encoding = "rgb8"
ros_img.data = img[6]
# publish the image
self.nao_cam_pub.publish(ros_img)
def disconnect(self):
self.nao_cam.unsubscribe(self.proxy_name)
示例14: NaoImage
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
class NaoImage(QWidget):
def __init__(self, IP, PORT, CameraID, parent = None):
QWidget.__init__(self, parent)
self._image = QImage()
self.setWindowTitle("Nao")
self._imgWidth = 640 #other options: 320, 1280
self._imgHeight = 480 #other options: 240, 960
self._cameraID = CameraID
self.resize(self._imgWidth, self._imgHeight)
self._array = np.zeros((640, 480)) #change when imgWidth/imgHeight changes
self._videoProxy = None
self._imgClient = ""
self._alImage = None
self._registerImageClient(IP, PORT)
self.startTimer(100) #change for number of milliseconds
def _registerImageClient(self, IP, PORT):
self._videoProxy = ALProxy("ALVideoDevice", IP, PORT)
resolution = vision_definitions.kVGA #other options: kQVGA (320 * 240), k4VGA (1280 * 960)
colorSpace = vision_definitions.kRGBColorSpace #may be useful for color conversions? HSY and BGR are options
self._imgClient = self._videoProxy.subscribe("_client", resolution, colorSpace, 5)
self._videoProxy.setParam(vision_definitions.kCameraSelectID, self._cameraID)
def _unregisterImageClient(self):
if self._imgClient != "":
self._videoProxy.unsubscribe(self._imgClient)
def paintEvent(self, event):
painter = QPainter(self)
painter.drawImage(painter.viewport(), self._image)
def convertToCV(self):
self._array = qim.rgb_view(self._image, True)
self._array = cv2.cvtColor(self._array, cv2.cv.CV_RGB2BGR)
#convertToQImage if desired
def _updateImage(self):
self._alImage = self._videoProxy.getImageRemote(self._imgClient)
self._image = QImage(self._alImage[6], self._alImage[0], self._alImage[1], QImage.Format_RGB888) #there may be a way to avoid using QImage but idk yet
self._image = QImage.convertToFormat(self._image, QImage.Format_RGB32)
self.convertToCV()
def timerEvent(self, event):
self._updateImage()
self.update()
def __del__(self):
self._unregisterImageClient()
示例15: get_nao_image
# 需要导入模块: from naoqi import ALProxy [as 别名]
# 或者: from naoqi.ALProxy import getImageRemote [as 别名]
def get_nao_image():
resolution = vision_definitions.kQVGA
colorSpace = vision_definitions.kBGRColorSpace
cam = ALProxy("ALVideoDevice", na.IP, PORT)
vidya = cam.subscribe("nao", resolution, colorSpace, 5)
shot = cam.getImageRemote(vidya)
cam.unsubscribe(vidya)
size = (shot[0], shot[1])
image = cv.CreateImageHeader(size, cv.IPL_DEPTH_8U, 3)
cv.SetData(image, shot[6], shot[0]*3)
image = np.asarray(image[:,:])
return image