當前位置: 首頁>>代碼示例>>Python>>正文


Python ImageOps.grayscale方法代碼示例

本文整理匯總了Python中PIL.ImageOps.grayscale方法的典型用法代碼示例。如果您正苦於以下問題:Python ImageOps.grayscale方法的具體用法?Python ImageOps.grayscale怎麽用?Python ImageOps.grayscale使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在PIL.ImageOps的用法示例。


在下文中一共展示了ImageOps.grayscale方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: addNoiseAndGray

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def addNoiseAndGray(surf):
    # https://stackoverflow.com/questions/34673424/how-to-get-numpy-array-of-rgb-colors-from-pygame-surface
    imgdata = pygame.surfarray.array3d(surf)
    imgdata = imgdata.swapaxes(0, 1)
    # print('imgdata shape %s' % imgdata.shape)  # shall be IMG_HEIGHT * IMG_WIDTH
    imgdata2 = noise_generator('s&p', imgdata)

    img2 = Image.fromarray(np.uint8(imgdata2))
    # img2.save('/home/zhichyu/Downloads/2sp.jpg')
    grayscale2 = ImageOps.grayscale(img2)
    # grayscale2.save('/home/zhichyu/Downloads/2bw2.jpg')
    # return grayscale2

    array = np.asarray(np.uint8(grayscale2))
    # print('array.shape %s' % array.shape)
    selem = disk(random.randint(0, 1))
    eroded = erosion(array, selem)
    return eroded 
開發者ID:deepinsight,項目名稱:insightocr,代碼行數:20,代碼來源:gen.py

示例2: get_noise_from_file

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def get_noise_from_file(file_name):
  original = Image.open(file_name)
  greyscale = ImageOps.grayscale(original)
  greyscale_vector = numpy.fromstring(greyscale.tostring(), dtype=numpy.uint8)
  greyscale_matrix = numpy.reshape(greyscale_vector,
                                   (original.size[1], original.size[0]))
  return get_noise(greyscale_matrix)

# Command line utility for creating the characteristic. 
開發者ID:andrewlewis,項目名稱:camera-id,代碼行數:11,代碼來源:make_characteristic.py

示例3: _load_cv2

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def _load_cv2(img, grayscale=None):
    """
    TODO
    """
    # load images if given filename, or convert as needed to opencv
    # Alpha layer just causes failures at this point, so flatten to RGB.
    # RGBA: load with -1 * cv2.CV_LOAD_IMAGE_COLOR to preserve alpha
    # to matchTemplate, need template and image to be the same wrt having alpha

    if grayscale is None:
        grayscale = GRAYSCALE_DEFAULT
    if isinstance(img, (str, unicode)):
        # The function imread loads an image from the specified file and
        # returns it. If the image cannot be read (because of missing
        # file, improper permissions, unsupported or invalid format),
        # the function returns an empty matrix
        # http://docs.opencv.org/3.0-beta/modules/imgcodecs/doc/reading_and_writing_images.html
        if grayscale:
            img_cv = cv2.imread(img, LOAD_GRAYSCALE)
        else:
            img_cv = cv2.imread(img, LOAD_COLOR)
        if img_cv is None:
            raise IOError("Failed to read %s because file is missing, "
                          "has improper permissions, or is an "
                          "unsupported or invalid format" % img)
    elif isinstance(img, numpy.ndarray):
        # don't try to convert an already-gray image to gray
        if grayscale and len(img.shape) == 3:  # and img.shape[2] == 3:
            img_cv = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            img_cv = img
    elif hasattr(img, 'convert'):
        # assume its a PIL.Image, convert to cv format
        img_array = numpy.array(img.convert('RGB'))
        img_cv = img_array[:, :, ::-1].copy()  # -1 does RGB -> BGR
        if grayscale:
            img_cv = cv2.cvtColor(img_cv, cv2.COLOR_BGR2GRAY)
    else:
        raise TypeError('expected an image filename, OpenCV numpy array, or PIL image')
    return img_cv 
開發者ID:asweigart,項目名稱:pyscreeze,代碼行數:42,代碼來源:__init__.py

示例4: __getitem__

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def __getitem__(self, index):
        datafiles = self.files[self.split][index]

        img_file = datafiles["img"]
        img = Image.open(img_file).convert('RGB')
        np3ch = np.array(img)
        if self.input_ch == 1:
            img = ImageOps.grayscale(img)

        elif self.input_ch == 4:
            extended_np3ch = np.concatenate([np3ch, np3ch[:, :, 0:1]], axis=2)
            img = Image.fromarray(np.uint8(extended_np3ch))

        label_file = datafiles["label"]
        label = Image.open(label_file).convert("P")

        if self.img_transform:
            img = self.img_transform(img)

        if self.label_transform:
            label = self.label_transform(label)

        if self.test:
            return img, label, img_file

        return img, label 
開發者ID:mil-tokyo,項目名稱:MCD_DA,代碼行數:28,代碼來源:datasets.py

示例5: get_example

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def get_example(self, i):
        # type: (any) -> typing.Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray]
        """
        :return: (RGB array [0~255], gray array [0~255], RGB array [0~255])
        """
        image = self.base[i]
        rgb_image_data = numpy.asarray(image, dtype=self._dtype).transpose(2, 0, 1)[:3, :, :]
        gray_image = ImageOps.grayscale(image)
        gray_image_data = numpy.asarray(gray_image, dtype=self._dtype)[:, :, numpy.newaxis].transpose(2, 0, 1)
        return rgb_image_data, gray_image_data, rgb_image_data 
開發者ID:DwangoMediaVillage,項目名稱:Comicolorization,代碼行數:12,代碼來源:image_dataset.py

示例6: invert

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def invert(image, invert=False, **kwargs):
    if invert:
        image = ImageOps.grayscale(image)
        image = ImageOps.invert(image)
        enhancer = ImageEnhance.Contrast(image)
        image = enhancer.enhance(2.5)

    return image 
開發者ID:astrobin,項目名稱:astrobin,代碼行數:10,代碼來源:thumbnail_processors.py

示例7: test_sanity

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def test_sanity(self):

        ImageOps.autocontrast(hopper("L"))
        ImageOps.autocontrast(hopper("RGB"))

        ImageOps.autocontrast(hopper("L"), cutoff=10)
        ImageOps.autocontrast(hopper("L"), ignore=[0, 255])

        ImageOps.colorize(hopper("L"), (0, 0, 0), (255, 255, 255))
        ImageOps.colorize(hopper("L"), "black", "white")

        ImageOps.pad(hopper("L"), (128, 128))
        ImageOps.pad(hopper("RGB"), (128, 128))

        ImageOps.crop(hopper("L"), 1)
        ImageOps.crop(hopper("RGB"), 1)

        ImageOps.deform(hopper("L"), self.deformer)
        ImageOps.deform(hopper("RGB"), self.deformer)

        ImageOps.equalize(hopper("L"))
        ImageOps.equalize(hopper("RGB"))

        ImageOps.expand(hopper("L"), 1)
        ImageOps.expand(hopper("RGB"), 1)
        ImageOps.expand(hopper("L"), 2, "blue")
        ImageOps.expand(hopper("RGB"), 2, "blue")

        ImageOps.fit(hopper("L"), (128, 128))
        ImageOps.fit(hopper("RGB"), (128, 128))

        ImageOps.flip(hopper("L"))
        ImageOps.flip(hopper("RGB"))

        ImageOps.grayscale(hopper("L"))
        ImageOps.grayscale(hopper("RGB"))

        ImageOps.invert(hopper("L"))
        ImageOps.invert(hopper("RGB"))

        ImageOps.mirror(hopper("L"))
        ImageOps.mirror(hopper("RGB"))

        ImageOps.posterize(hopper("L"), 4)
        ImageOps.posterize(hopper("RGB"), 4)

        ImageOps.solarize(hopper("L"))
        ImageOps.solarize(hopper("RGB"))

        ImageOps.exif_transpose(hopper("L"))
        ImageOps.exif_transpose(hopper("RGB")) 
開發者ID:holzschu,項目名稱:python3_ios,代碼行數:53,代碼來源:test_imageops.py

示例8: _locateAll_opencv

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def _locateAll_opencv(needleImage, haystackImage, grayscale=None, limit=10000, region=None, step=1,
                      confidence=0.999):
    """
    TODO - rewrite this
        faster but more memory-intensive than pure python
        step 2 skips every other row and column = ~3x faster but prone to miss;
            to compensate, the algorithm automatically reduces the confidence
            threshold by 5% (which helps but will not avoid all misses).
        limitations:
          - OpenCV 3.x & python 3.x not tested
          - RGBA images are treated as RBG (ignores alpha channel)
    """
    if grayscale is None:
        grayscale = GRAYSCALE_DEFAULT

    confidence = float(confidence)

    needleImage = _load_cv2(needleImage, grayscale)
    needleHeight, needleWidth = needleImage.shape[:2]
    haystackImage = _load_cv2(haystackImage, grayscale)

    if region:
        haystackImage = haystackImage[region[1]:region[1]+region[3],
                                      region[0]:region[0]+region[2]]
    else:
        region = (0, 0)  # full image; these values used in the yield statement
    if (haystackImage.shape[0] < needleImage.shape[0] or
        haystackImage.shape[1] < needleImage.shape[1]):
        # avoid semi-cryptic OpenCV error below if bad size
        raise ValueError('needle dimension(s) exceed the haystack image or region dimensions')

    if step == 2:
        confidence *= 0.95
        needleImage = needleImage[::step, ::step]
        haystackImage = haystackImage[::step, ::step]
    else:
        step = 1

    # get all matches at once, credit: https://stackoverflow.com/questions/7670112/finding-a-subimage-inside-a-numpy-image/9253805#9253805
    result = cv2.matchTemplate(haystackImage, needleImage, cv2.TM_CCOEFF_NORMED)
    match_indices = numpy.arange(result.size)[(result > confidence).flatten()]
    matches = numpy.unravel_index(match_indices[:limit], result.shape)

    if len(matches[0]) == 0:
        if USE_IMAGE_NOT_FOUND_EXCEPTION:
            raise ImageNotFoundException('Could not locate the image (highest confidence = %.3f)' % result.max())
        else:
            return

    # use a generator for API consistency:
    matchx = matches[1] * step + region[0]  # vectorized
    matchy = matches[0] * step + region[1]
    for x, y in zip(matchx, matchy):
        yield Box(x, y, needleWidth, needleHeight)


# TODO - We should consider renaming _locateAll_python to _locateAll_pillow, since Pillow is the real dependency. 
開發者ID:asweigart,項目名稱:pyscreeze,代碼行數:59,代碼來源:__init__.py

示例9: received_message

# 需要導入模塊: from PIL import ImageOps [as 別名]
# 或者: from PIL.ImageOps import grayscale [as 別名]
def received_message(self, m):
		global depth_image
		payload = m.data

		dat = msgpack.unpackb(payload)
		screen = Image.open(io.BytesIO(bytearray(dat['image'])))
		x = screen
		reward = dat['reward']
		end_episode = dat['endEpisode']

		depth_image = ImageOps.grayscale(Image.open(io.BytesIO(bytearray(dat['depth']))))

		if not self.agent_initialized:
			self.agent_initialized = True

			AgentServer.mode='start'
			action = workout(x)
			self.send(str(action))
			with open(self.log_file, 'w') as the_file:
				the_file.write('cycle, episode_reward_sum \n')			
		else:
			self.thread_event.wait()
			self.cycle_counter += 1
			self.reward_sum += reward

			if end_episode:
				AgentServer.mode='end'
				workout(x)
				#self.agent.agent_end(reward)
				AgentServer.mode='start'
				#action = self.agent.agent_start(image)  # TODO
				action = workout(x)
				self.send(str(action))
				with open(self.log_file, 'a') as the_file:
					the_file.write(str(self.cycle_counter) +
								   ',' + str(self.reward_sum) + '\n')
				self.reward_sum = 0

			else:
				#action, rl_action, eps, Q_now, obs_array, returnAction = self.agent.agent_step(reward, image)
				#self.agent.agent_step_after(reward, image, rl_action, eps, Q_now, obs_array, returnAction)
				AgentServer.mode='step'
				ag,action, eps, Q_now, obs_array = workout(x)
				self.send(str(action))
				ag.step_after(reward, action, eps, Q_now, obs_array)

		self.thread_event.set() 
開發者ID:uei,項目名稱:deel,代碼行數:49,代碼來源:agentServer.py


注:本文中的PIL.ImageOps.grayscale方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。