當前位置: 首頁>>代碼示例>>Python>>正文


Python Image.frombytes方法代碼示例

本文整理匯總了Python中PIL.Image.frombytes方法的典型用法代碼示例。如果您正苦於以下問題:Python Image.frombytes方法的具體用法?Python Image.frombytes怎麽用?Python Image.frombytes使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在PIL.Image的用法示例。


在下文中一共展示了Image.frombytes方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: snapshot

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def snapshot(self, filename="tmp.png"):
        """
        Take a screenshot and save it to `tmp.png` filename by default

        Args:
            filename: name of file where to store the screenshot

        Returns:
            display the screenshot

        """
        w, h = self.get_current_resolution()
        dsp = display.Display()
        root = dsp.screen().root
        raw = root.get_image(0, 0, w, h, X.ZPixmap, 0xffffffff)
        image = Image.frombytes("RGB", (w, h), raw.data, "raw", "BGRX")
        from airtest.aircv.utils import pil_2_cv2
        image = pil_2_cv2(image)
        return image 
開發者ID:AirtestProject,項目名稱:Airtest,代碼行數:21,代碼來源:linux.py

示例2: _capture2dImage

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def _capture2dImage(self, cameraId):
        # Capture Image in RGB

        # WARNING : The same Name could be used only six time.
        strName = "capture2DImage_{}".format(random.randint(1,10000000000))

        clientRGB = self.video_service.subscribeCamera(strName, cameraId, AL_kVGA, 11, 10)
        imageRGB = self.video_service.getImageRemote(clientRGB)

        imageWidth   = imageRGB[0]
        imageHeight  = imageRGB[1]
        array        = imageRGB[6]
        image_string = str(bytearray(array))

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)

        # Save the image.
        image_name_2d = "images/img2d-" + str(self.imageNo2d) + ".png"
        im.save(image_name_2d, "PNG") # Stored in images folder in the pwd, if not present then create one
        self.imageNo2d += 1
        im.show()

        return 
開發者ID:maverickjoy,項目名稱:pepper-robot-programming,代碼行數:26,代碼來源:asthama_search.py

示例3: _capture3dImage

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def _capture3dImage(self):
        # Depth Image in RGB

        # WARNING : The same Name could be used only six time.
        strName = "capture3dImage_{}".format(random.randint(1,10000000000))


        clientRGB = self.video_service.subscribeCamera(strName, AL_kDepthCamera, AL_kQVGA, 11, 10)
        imageRGB = self.video_service.getImageRemote(clientRGB)

        imageWidth  = imageRGB[0]
        imageHeight = imageRGB[1]
        array       = imageRGB[6]
        image_string = str(bytearray(array))

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)
        # Save the image.
        image_name_3d = "images/img3d-" + str(self.imageNo3d) + ".png"
        im.save(image_name_3d, "PNG") # Stored in images folder in the pwd, if not present then create one
        self.imageNo3d += 1
        im.show()

        return 
開發者ID:maverickjoy,項目名稱:pepper-robot-programming,代碼行數:26,代碼來源:asthama_search.py

示例4: _capture2dImage

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def _capture2dImage(self, cameraId):
        # Capture Image in RGB

        # WARNING : The same Name could be used only six time.
        strName = "capture2DImage_{}".format(random.randint(1,10000000000))


        clientRGB = self.video.subscribeCamera(strName, cameraId, AL_kVGA, 11, 10)
        imageRGB = self.video.getImageRemote(clientRGB)

        imageWidth = imageRGB[0]
        imageHeight = imageRGB[1]
        array = imageRGB[6]
        image_string = str(bytearray(array))

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)

        # Save the image inside the images foler in pwd.
        image_name_2d = "images/img2d-" + str(self.imageNo2d) + ".png"
        im.save(image_name_2d, "PNG") # Stored in images folder in the pwd, if not present then create one
        self.imageNo2d += 1
        im.show()

        return 
開發者ID:maverickjoy,項目名稱:pepper-robot-programming,代碼行數:27,代碼來源:remote_voice.py

示例5: _capture3dImage

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def _capture3dImage(self):
        # Depth Image in RGB

        # WARNING : The same Name could be used only six time.
        strName = "capture3dImage_{}".format(random.randint(1,10000000000))

        clientRGB = self.video.subscribeCamera(strName, AL_kDepthCamera, AL_kQVGA, 11, 15)
        imageRGB = self.video.getImageRemote(clientRGB)

        imageWidth = imageRGB[0]
        imageHeight = imageRGB[1]
        array = imageRGB[6]
        image_string = str(bytearray(array))

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)

        # Save the image inside the images foler in pwd.
        image_name_3d = "images/img3d-" + str(self.imageNo3d) + ".png"
        im.save(image_name_3d, "PNG") # Stored in images folder in the pwd, if not present then create one
        self.imageNo3d += 1
        im.show()

        return 
開發者ID:maverickjoy,項目名稱:pepper-robot-programming,代碼行數:26,代碼來源:remote_voice.py

示例6: find_game_position

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def find_game_position(self, threshold) -> Dict:
        monitor = self.shooter.monitors[0]
        buffer = self.shooter.grab(monitor)
        image = Image.frombytes('RGB', buffer.size, buffer.rgb).convert('L')
        image = np.array(image)
        dino_template = cv2.imread(os.path.join('templates', 'dino.png'), 0)
        res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED)
        loc = np.where(res >= threshold)
        if len(loc[0]) == 0:
            dino_template = cv2.imread(os.path.join('templates', 'dino2.png'), 0)
            res = cv2.matchTemplate(image, dino_template, cv2.TM_CCOEFF_NORMED)
            loc = np.where(res >= threshold)
        if len(loc[0]):
            pt = next(zip(*loc[::-1]))
            w, h = dino_template.shape[::-1]
            lw, lh = self.landscape_template.shape[::-1]
            return dict(monitor, height=lh, left=pt[0], top=pt[1] - lh + h, width=lw)
        return {} 
開發者ID:pauloalves86,項目名稱:go_dino,代碼行數:20,代碼來源:dino_api.py

示例7: test_transparent_optimize

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def test_transparent_optimize(self):
        # from issue #2195, if the transparent color is incorrectly
        # optimized out, gif loses transparency
        # Need a palette that isn't using the 0 color, and one
        # that's > 128 items where the transparent color is actually
        # the top palette entry to trigger the bug.

        data = bytes(bytearray(range(1, 254)))
        palette = ImagePalette.ImagePalette("RGB", list(range(256))*3)

        im = Image.new('L', (253, 1))
        im.frombytes(data)
        im.putpalette(palette)

        out = self.tempfile('temp.gif')
        im.save(out, transparency=253)
        reloaded = Image.open(out)

        self.assertEqual(reloaded.info['transparency'], 253) 
開發者ID:holzschu,項目名稱:python3_ios,代碼行數:21,代碼來源:test_file_gif.py

示例8: run

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def run(raw_data):
    transform = transforms.transforms.Compose([
        transforms.transforms.ToTensor(),
        transforms.transforms.Normalize(
            (0.1307,), (0.3081,))
    ])
    img = Image.frombytes(
        '1', (28, 28), str(json.loads(json.dumps(raw_data))['data']).encode())
    input_data = transform(img)

    inputs_dc.collect(input_data)

    input_data = input_data.unsqueeze(0)
    classes = ['tshirt', 'Trouser', 'Pullover', 'Dress', 'Coat',
               'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
    output = model(input_data)

    prediction_dc.collect(output)

    index = torch.argmax(output, 1)
    return classes[index] 
開發者ID:Azure-Samples,項目名稱:MLOpsDatabricks,代碼行數:23,代碼來源:score.py

示例9: getmssimage

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def getmssimage(self):
        import mss
        
        with mss.mss() as sct:
            mon = sct.monitors[1]
            
            L = mon["left"] + self.X
            T = mon["top"] + self.Y
            W = L + self.width
            H = T + self.height
            bbox = (L,T,W,H)
            #print(bbox)
            sct_img = sct.grab(bbox)

            img_pil = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')
            img_np = np.array(img_pil)
            #finalimg = cv2.cvtColor(img_np, cv2.COLOR_RGB2GRAY)
            return img_np 
開發者ID:asingh33,項目名稱:SupervisedChromeTrex,代碼行數:20,代碼來源:main.py

示例10: convert

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def convert(data_path):
    # iterate through the data splits
    for data_split in ['train', 'test']:
        os.makedirs(os.path.join(data_path, data_split))
        data_split_path = os.path.join(data_path, 'softmotion30_44k', data_split)
        data_split_files = gfile.Glob(os.path.join(data_split_path, '*'))
        # iterate through the TF records
        for f in data_split_files:
            print('Current file: ' + f)
            ind = int(f.split('/')[-1].split('_')[1]) # starting video index
            # iterate through the sequences in this TF record
            for serialized_example in tf.python_io.tf_record_iterator(f):
                os.makedirs(os.path.join(data_path, data_split, str(ind)))
                example = tf.train.Example()
                example.ParseFromString(serialized_example)
                # iterate through the sequence
                for i in range(30):
                    image_name = str(i) + '/image_aux1/encoded'
                    byte_str = example.features.feature[image_name].bytes_list.value[0]
                    img = Image.frombytes('RGB', (64, 64), byte_str)
                    img = np.array(img.getdata()).reshape(img.size[1], img.size[0], 3) / 255.
                    imsave(os.path.join(data_path, data_split, str(ind), str(i) + '.png'), img)
                print('     Finished processing sequence ' + str(ind))
                ind += 1 
開發者ID:joelouismarino,項目名稱:amortized-variational-filtering,代碼行數:26,代碼來源:convert_bair.py

示例11: save_to_disk

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def save_to_disk(self, filename, format='.png'):
        """Save this image to disk (requires PIL installed)."""
        filename = _append_extension(filename, format)

        try:
            from PIL import Image as PImage
        except ImportError:
            raise RuntimeError(
                'cannot import PIL, make sure pillow package is installed')

        image = PImage.frombytes(
            mode='RGBA',
            size=(self.width, self.height),
            data=self.raw_data,
            decoder_name='raw')
        color = image.split()
        image = PImage.merge("RGB", color[2::-1])

        folder = os.path.dirname(filename)
        if not os.path.isdir(folder):
            os.makedirs(folder)
        image.save(filename, quality=100) 
開發者ID:felipecode,項目名稱:coiltraine,代碼行數:24,代碼來源:sensor.py

示例12: screen_game

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def screen_game(region, save_to=None):
	x, y, width, height = region
	try:
		raw = root.get_image(x, y, width, height, X.ZPixmap, 0xffffffff)
		if hasattr(Image, 'frombytes'):
			# for Pillow
			screenshot = Image.frombytes('RGB', (width, height), raw.data, 'raw', 'BGRX')
		else:
			# for PIL
			screenshot = Image.fromstring('RGB', (width, height), raw.data, 'raw', 'BGRX')
		if save_to is not None:
			screenshot.save(save_to + '.png')
	except:
		filename = save_to + '.png' if save_to is not None else None
		screenshot = pyautogui.screenshot(filename, region)
	return screenshot

# Return pixel color of given x, y coordinates 
開發者ID:AXeL-dev,項目名稱:Dindo-Bot,代碼行數:20,代碼來源:tools.py

示例13: show_pca

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def show_pca(self, dest_image, updater):
        colors = ['navy', 'turquoise', 'darkorange']
        if getattr(updater, 'pca', None) is None:
            return dest_image
        pca_discriminator = updater.pca.reshape(3, -1, updater.n_components_pca)

        plt.figure()
        for i, color, in enumerate(colors):
            plt.scatter(pca_discriminator[i, :, 0], pca_discriminator[i, :, 1], color=color, lw=2)
        plt.legend(['fake', 'real', 'anchor'])

        canvas = plt.get_current_fig_manager().canvas
        canvas.draw()
        image = Image.frombytes('RGB', canvas.get_width_height(), canvas.tostring_rgb())
        image = image.resize((self.image_size.width, self.image_size.height), Image.LANCZOS)
        dest_image.paste(image, (self.image_size.width, self.image_size.height))
        plt.close()
        return dest_image 
開發者ID:Bartzi,項目名稱:kiss,代碼行數:20,代碼來源:bbox_plotter.py

示例14: fig_to_im

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def fig_to_im(fig):

    fig.canvas.draw()

    # Get the RGBA buffer from the figure
    w, h = fig.canvas.get_width_height()
    buf = np.fromstring(fig.canvas.tostring_argb(), dtype=np.uint8)
    buf.shape = (w, h, 4)

    # canvas.tostring_argb give pixmap in ARGB mode. Roll the ALPHA channel to have it in RGBA mode
    buf = np.roll(buf, 3, axis=2)

    w, h, d = buf.shape

    im_pil = Image.frombytes("RGBA", (w, h), buf.tostring())
    im_np = np.array(im_pil)[:,:,:3]

    return im_np 
開發者ID:dingmyu,項目名稱:D4LCN,代碼行數:20,代碼來源:util.py

示例15: run

# 需要導入模塊: from PIL import Image [as 別名]
# 或者: from PIL.Image import frombytes [as 別名]
def run(self):
        while True:
            task = self._queue.get()

            ctx = self._get_ctx(task.width, task.height)
            ctx.ctx.clear(*task.background_rgb)

            ctx.prog['mvp'].value = task.mvp

            if task.vertices:
                vbo = ctx.ctx.buffer(task.vertices)
                # noinspection PyTypeChecker
                vao = ctx.ctx.simple_vertex_array(ctx.prog, vbo, 'in_vert', 'in_color')
                vao.render()

            color_rbo2 = ctx.ctx.renderbuffer((task.width, task.height))
            fbo2 = ctx.ctx.framebuffer([color_rbo2])
            ctx.ctx.copy_framebuffer(fbo2, ctx.fbo)

            img = Image.frombytes('RGB', (task.width, task.height), fbo2.read(components=3))

            f = io.BytesIO()
            img.save(f, 'PNG')
            f.seek(0)
            task.set_result(f.read()) 
開發者ID:c3nav,項目名稱:c3nav,代碼行數:27,代碼來源:opengl.py


注:本文中的PIL.Image.frombytes方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。