當前位置: 首頁>>代碼示例>>Python>>正文


Python utils.resize_image方法代碼示例

本文整理匯總了Python中utils.resize_image方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.resize_image方法的具體用法?Python utils.resize_image怎麽用?Python utils.resize_image使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在utils的用法示例。


在下文中一共展示了utils.resize_image方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _build_image_grid

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def _build_image_grid(input_images, gt_projs, pred_projs, pred_voxels):
  """Build the visualization grid with py_func."""
  quantity, img_height, img_width = input_images.shape[:3]
  for row in xrange(int(quantity / 3)):
    for col in xrange(3):
      index = row * 3 + col
      input_img_ = input_images[index, :, :, :]
      gt_proj_ = gt_projs[index, :, :, :]
      pred_proj_ = pred_projs[index, :, :, :]
      pred_voxel_ = utils.display_voxel(pred_voxels[index, :, :, :, 0])
      pred_voxel_ = utils.resize_image(pred_voxel_, img_height, img_width)
      if col == 0:
        tmp_ = np.concatenate([input_img_, gt_proj_, pred_proj_, pred_voxel_],
                              1)
      else:
        tmp_ = np.concatenate(
            [tmp_, input_img_, gt_proj_, pred_proj_, pred_voxel_], 1)
    if row == 0:
      out_grid = tmp_
    else:
      out_grid = np.concatenate([out_grid, tmp_], 0)

  out_grid = out_grid.astype(np.uint8)
  return out_grid 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:26,代碼來源:model_voxel_generation.py

示例2: __getitem__

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def __getitem__(self, image_index):
        image_id = self.image_ids[image_index]
        # Load image, which is [H, W, D, C] first.
        image = self.dataset.load_image(image_id)
        # Load mask, which is [H, W, D] first.
        mask = self.dataset.load_mask(image_id)
        # Note that window has already been (z1, y1, x1, z2, y2, x2) here.
        image, window, scale, padding, crop = utils.resize_image(
            image,
            min_dim=self.config.IMAGE_MIN_DIM,
            max_dim=self.config.IMAGE_MAX_DIM,
            min_scale=self.config.IMAGE_MIN_SCALE,
            mode=self.config.IMAGE_RESIZE_MODE)
        mask = utils.resize_mask(mask, scale, padding, max_dim=self.config.IMAGE_MAX_DIM,
                                 min_dim=self.config.IMAGE_MIN_DIM, crop=crop, mode=self.config.IMAGE_RESIZE_MODE)

        # Active classes
        # Different datasets have different classes, so track the classes supported in the dataset of this image.
        active_class_ids = np.zeros([self.dataset.num_classes], dtype=np.int32)
        source_class_ids = self.dataset.source_class_ids[self.dataset.image_info[image_id]["source"]]
        active_class_ids[source_class_ids] = 1
        # Image meta data
        image_meta = compose_image_meta(image_id, image.shape, window, active_class_ids)

        return image, image_meta, mask 
開發者ID:Wuziyi616,項目名稱:CFUN,代碼行數:27,代碼來源:model.py

示例3: preproc_image

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def preproc_image(x, nlabels=None):

    x_b = np.squeeze(x)

    ims = x_b.shape[:2]

    if nlabels:
        x_b = np.uint8((x_b / (nlabels)) * 255)  # not nlabels - 1 because I prefer gray over white
    else:
        x_b = utils.convert_to_uint8(x_b)

    # x_b = cv2.cvtColor(np.squeeze(x_b), cv2.COLOR_GRAY2BGR)
    # x_b = utils.histogram_equalization(x_b)
    x_b = utils.resize_image(x_b, (2 * ims[0], 2 * ims[1]), interp=cv2.INTER_NEAREST)

    # ims_n = x_b.shape[:2]
    # x_b = x_b[ims_n[0]//4:3*ims_n[0]//4, ims_n[1]//4: 3*ims_n[1]//4,...]
    return x_b 
開發者ID:baumgach,項目名稱:PHiSeg-code,代碼行數:20,代碼來源:phiseg_generate_samples.py

示例4: mold_inputs

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def mold_inputs(self, images):
        """Takes a list of images and modifies them to the format expected
        as an input to the neural network.
        images: List of image matricies [height,width,depth]. Images can have
            different sizes.

        Returns 3 Numpy matricies:
        molded_images: [N, h, w, 3]. Images resized and normalized.
        image_metas: [N, length of meta data]. Details about each image.
        windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
            original image (padding excluded).
        """
        molded_images = []
        image_metas = []
        windows = []
        for image in images:
            # Resize image to fit the model expected size
            # TODO: move resizing to mold_image()
            molded_image, window, scale, padding = utils.resize_image(
                image,
                min_dim=self.config.IMAGE_MIN_DIM,
                max_dim=self.config.IMAGE_MAX_DIM,
                padding=self.config.IMAGE_PADDING)
            molded_image = mold_image(molded_image, self.config)
            # Build image_meta
            image_meta = compose_image_meta(
                0, image.shape, window,
                np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
            # Append
            molded_images.append(molded_image)
            windows.append(window)
            image_metas.append(image_meta)
        # Pack into arrays
        molded_images = np.stack(molded_images)
        image_metas = np.stack(image_metas)
        windows = np.stack(windows)
        return molded_images, image_metas, windows 
開發者ID:olgaliak,項目名稱:segmentation-unet-maskrcnn,代碼行數:39,代碼來源:model.py

示例5: _build_image_grid

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def _build_image_grid(input_images,
                      gt_projs,
                      pred_projs,
                      input_voxels,
                      output_voxels,
                      vis_size=128):
  """Builds a grid image by concatenating the input images."""
  quantity = input_images.shape[0]

  for row in xrange(int(quantity / 3)):
    for col in xrange(3):
      index = row * 3 + col
      input_img_ = utils.resize_image(input_images[index, :, :, :], vis_size,
                                      vis_size)
      gt_proj_ = utils.resize_image(gt_projs[index, :, :, :], vis_size,
                                    vis_size)
      pred_proj_ = utils.resize_image(pred_projs[index, :, :, :], vis_size,
                                      vis_size)
      gt_voxel_vis = utils.resize_image(
          utils.display_voxel(input_voxels[index, :, :, :, 0]), vis_size,
          vis_size)
      pred_voxel_vis = utils.resize_image(
          utils.display_voxel(output_voxels[index, :, :, :, 0]), vis_size,
          vis_size)
      if col == 0:
        tmp_ = np.concatenate(
            [input_img_, gt_proj_, pred_proj_, gt_voxel_vis, pred_voxel_vis], 1)
      else:
        tmp_ = np.concatenate([
            tmp_, input_img_, gt_proj_, pred_proj_, gt_voxel_vis, pred_voxel_vis
        ], 1)
    if row == 0:
      out_grid = tmp_
    else:
      out_grid = np.concatenate([out_grid, tmp_], 0)

  return out_grid 
開發者ID:rky0930,項目名稱:yolo_v2,代碼行數:39,代碼來源:model_ptn.py

示例6: preprocess_image

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def preprocess_image(img):
    if img.shape[1] / img.shape[0] < 6.4:
        img = pad_image(img, (cfg.width, cfg.height), cfg.nb_channels)
    else:
        img = resize_image(img, (cfg.width, cfg.height))
    if cfg.nb_channels == 1:
        img = img.transpose([1, 0])
    else:
        img = img.transpose([1, 0, 2])
    img = np.flip(img, 1)
    img = img / 255.0
    if cfg.nb_channels == 1:
        img = img[:, :, np.newaxis]
    return img 
開發者ID:kurapan,項目名稱:CRNN,代碼行數:16,代碼來源:eval.py

示例7: mold_inputs

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def mold_inputs(self, images):
        """Takes a list of images and modifies them to the format expected
        as an input to the neural network.
        images: List of image matrices [height,width,depth]. Images can have
            different sizes.

        Returns 3 Numpy matrices:
        molded_images: [N, h, w, 3]. Images resized and normalized.
        image_metas: [N, length of meta data]. Details about each image.
        windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
            original image (padding excluded).
        """
        molded_images = []
        image_metas = []
        windows = []
        for image in images:
            # Resize image
            # TODO: move resizing to mold_image()
            molded_image, window, scale, padding, crop = utils.resize_image(
                image,
                min_dim=self.config.IMAGE_MIN_DIM,
                min_scale=self.config.IMAGE_MIN_SCALE,
                max_dim=self.config.IMAGE_MAX_DIM,
                mode=self.config.IMAGE_RESIZE_MODE)
            molded_image = mold_image(molded_image, self.config)
            # Build image_meta
            image_meta = compose_image_meta(
                0, image.shape, molded_image.shape, window, scale,
                np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
            # Append
            molded_images.append(molded_image)
            windows.append(window)
            image_metas.append(image_meta)
        # Pack into arrays
        molded_images = np.stack(molded_images)
        image_metas = np.stack(image_metas)
        windows = np.stack(windows)
        return molded_images, image_metas, windows 
開發者ID:Esri,項目名稱:raster-deep-learning,代碼行數:40,代碼來源:model.py

示例8: _get_frame_resizer

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def _get_frame_resizer(cls, env, config):
    """
    Returns a lambda that takes a screen frame and resizes it to the
    configured width and height. If the state doesn't need to be resized
    for the environment, returns an identity function.

    @return: lambda (frame -> resized_frame)
    """
    width, height = config.resize_width, config.resize_height
    if width > 0 and height > 0:
      return partial(utils.resize_image, width=width, height=height)
    return lambda x: x 
開發者ID:viswanathgs,項目名稱:dist-dqn,代碼行數:14,代碼來源:dqn_agent.py

示例9: mold_inputs

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def mold_inputs(self, images):
        """Takes a list of images and modifies them to the format expected
        as an input to the neural network.
        images: List of image matrices [height, width, depth, channels]. Images can have
            different sizes.
        Returns 3 Numpy matrices:
        molded_images: [N, 1, d, h, w]. Images resized and normalized.
        image_metas: [N, length of meta data]. Details about each image.
        windows: [N, (z1, y1, x1, z2, y2, x2)]. The portion of the image that has the
            original image (padding excluded).
        """
        molded_images = []
        image_metas = []
        windows = []
        for image in images:
            # Resize image to fit the model expected size
            molded_image, window, scale, padding, crop = utils.resize_image(
                image,
                min_dim=self.config.IMAGE_MIN_DIM,
                max_dim=self.config.IMAGE_MAX_DIM,
                min_scale=self.config.IMAGE_MIN_SCALE,
                mode=self.config.IMAGE_RESIZE_MODE)
            molded_image = mold_image(molded_image)
            molded_image = molded_image.transpose((3, 2, 0, 1))  # [C, D, H, W]
            # Build image_meta
            image_meta = compose_image_meta(
                0, image.shape, window,
                np.zeros([self.config.NUM_CLASSES], dtype=np.int32))
            # Append
            molded_images.append(molded_image)
            windows.append(window)
            image_metas.append(image_meta)
        # Pack into arrays
        molded_images = np.stack(molded_images)
        image_metas = np.stack(image_metas)
        windows = np.stack(windows)
        return molded_images, image_metas, windows 
開發者ID:Wuziyi616,項目名稱:CFUN,代碼行數:39,代碼來源:model.py

示例10: __load_data

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def __load_data(self):
        """
            Load all the images in the folder
        """

        print('Loading data')

        examples = []

        count = 0
        skipped = 0
        for i, f in enumerate(os.listdir(self.examples_path)):
            if i > 100000:
                break
            if len(f.split('_')[0]) > self.max_char_count:
                continue
            arr, initial_len = resize_image(
                os.path.join(self.examples_path, f),
                self.max_image_width
            )
            examples.append(
                (
                    arr,
                    f.split('_')[0].lower(),
                    label_to_array(f.split('_')[0].lower()),
                    label_to_array_2(f.split('_')[0].lower())
                )
            )
            count += 1

        print(count)

        return examples, len(examples) 
開發者ID:Belval,項目名稱:NRTR,代碼行數:35,代碼來源:data_manager.py

示例11: get_action

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def get_action(self, obs):

        ### determine manual override
        manual_override = self.real_controller.LeftBumper == 1

        if not manual_override:
            ## Look
            vec = resize_image(obs)
            vec = np.expand_dims(vec, axis=0) # expand dimensions for predict, it wants (1,66,200,3) not (66, 200, 3)
            ## Think
            joystick = self.model.predict(vec, batch_size=1)[0]

        else:
            joystick = self.real_controller.read()
            joystick[1] *= -1 # flip y (this is in the config when it runs normally)


        ## Act

        ### calibration
        output = [
            int(joystick[0] * 80),
            int(joystick[1] * 80),
            int(round(joystick[2])),
            int(round(joystick[3])),
            int(round(joystick[4])),
        ]

        ### print to console
        if manual_override:
            cprint("Manual: " + str(output), 'yellow')
        else:
            cprint("AI: " + str(output), 'green')

        return output 
開發者ID:kevinhughes27,項目名稱:TensorKart,代碼行數:37,代碼來源:play.py

示例12: mold_inputs

# 需要導入模塊: import utils [as 別名]
# 或者: from utils import resize_image [as 別名]
def mold_inputs(self, images):
        """Takes a list of images and modifies them to the format expected
        as an input to the neural network.
        images: List of image matricies [height,width,depth]. Images can have
            different sizes.

        Returns 3 Numpy matricies:
        molded_images: [N, h, w, 3]. Images resized and normalized.
        image_metas: [N, length of meta data]. Details about each image.
        windows: [N, (y1, x1, y2, x2)]. The portion of the image that has the
            original image (padding excluded).
        """
        molded_images = []
        windows = []
        for image in images:
            # Resize image to fit the model expected size
            # TODO: move resizing to mold_image()
            molded_image, window, scale, padding = utils.resize_image(
                image,
                min_dim=self.config.IMAGE_MIN_DIM,
                max_dim=self.config.IMAGE_MAX_DIM,
                padding=self.config.IMAGE_PADDING)
            molded_image = mold_image(molded_image, self.config)
            # Append
            molded_images.append(molded_image)
            windows.append(window)
        # Pack into arrays
        molded_images = np.stack(molded_images)
        windows = np.stack(windows)
        return molded_images, windows 
開發者ID:jacobkie,項目名稱:2018DSB,代碼行數:32,代碼來源:model_rcnn_weight.py


注:本文中的utils.resize_image方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。