当前位置: 首页>>代码示例>>Python>>正文


Python image.load_img方法代码示例

本文整理汇总了Python中keras.preprocessing.image.load_img方法的典型用法代码示例。如果您正苦于以下问题:Python image.load_img方法的具体用法?Python image.load_img怎么用?Python image.load_img使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.preprocessing.image的用法示例。


在下文中一共展示了image.load_img方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: predict

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def predict(self, f, k=5, resize_mode='fill'):
        from keras.preprocessing import image
        from vergeml.img import resize_image

        filename = os.path.basename(f)

        if not os.path.exists(f):
            return dict(filename=filename, prediction=[])

        img = image.load_img(f)
        img = resize_image(img, self.image_size, self.image_size, 'antialias', resize_mode)

        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = self.preprocess_input(x)
        preds = self.model.predict(x)
        pred = self._decode(preds, top=k)[0]
        prediction=[dict(probability=np.asscalar(perc), label=klass) for _, klass, perc in pred]

        return dict(filename=filename, prediction=prediction) 
开发者ID:mme,项目名称:vergeml,代码行数:22,代码来源:imagenet.py

示例2: extract_features

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def extract_features(filename, model, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Loading and resizing image
	image = load_img(filename, target_size=target_size)
	# Convert the image pixels to a numpy array
	image = img_to_array(image)
	# Reshape data for the model
	image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
	# Prepare the image for the CNN Model model
	image = preprocess_input(image)
	# Pass image into model to get encoded features
	features = model.predict(image, verbose=0)
	return features

# Load the tokenizer 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:22,代码来源:test.py

示例3: extract_features

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:29,代码来源:preprocessing.py

示例4: data_loader

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def data_loader(q, ):
    for bi in batch_indices:
        start, end = bi
        x_batch = []
        filenames_batch = filenames[start:end]

        for filename in filenames_batch:
            imgs = []
            for d in dirs:
                img = img_to_array(load_img(os.path.join(d, filename), grayscale=True))
                imgs.append(np.squeeze(img))
            x_batch.append(np.array(imgs).transpose((1, 2, 0)))
        q.put((filenames_batch, np.array(x_batch)))

    for gpu in gpus:
        q.put((None, None)) 
开发者ID:killthekitten,项目名称:kaggle-carvana-2017,代码行数:18,代码来源:ensemble_gpu.py

示例5: load_image_pixels

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def load_image_pixels(filename, shape):
	# load the image to get its shape
	image = load_img(filename)
	width, height = image.size
	# load the image with the required size
	image = load_img(filename, target_size=shape)
	# convert to numpy array
	image = img_to_array(image)
	# scale pixel values to [0, 1]
	image = image.astype('float32')
	image /= 255.0
	# add a dimension so that we have one sample
	image = expand_dims(image, 0)
	return image, width, height

# get all of the results above a threshold 
开发者ID:produvia,项目名称:ai-platform,代码行数:18,代码来源:yolo_image.py

示例6: predict

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def predict(img_dir, model):
    img_files = []
    for root, dirs, files in os.walk(img_dir, topdown=False):
        for name in files:
            img_files.append(os.path.join(root, name))
    img_files = sorted(img_files)

    y_pred = []
    y_test = []

    for img_path in tqdm(img_files):
        # print(img_path)
        img = image.load_img(img_path, target_size=(224, 224))
        x = image.img_to_array(img)
        preds = model.predict(x[None, :, :, :])
        decoded = decode_predictions(preds, top=1)
        pred_label = decoded[0][0][0]
        # print(pred_label)
        y_pred.append(pred_label)
        tokens = img_path.split(os.pathsep)
        class_id = int(tokens[-2])
        # print(str(class_id))
        y_test.append(class_id)

    return y_pred, y_test 
开发者ID:foamliu,项目名称:Car-Recognition,代码行数:27,代码来源:analyze.py

示例7: create_test_data

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def create_test_data(self):
		# 测试集生成npy
		i = 0
		print('-' * 30)
		print('Creating test images...')
		print('-' * 30)
		imgs = glob.glob(self.test_path + "/*." + self.img_type)           # ../data_set/train
		print(len(imgs))
		imgdatas = np.ndarray((len(imgs), self.out_rows, self.out_cols, 1), dtype=np.uint8)
		for imgname in imgs:
			midname = imgname[imgname.rindex("/") + 1:]   # 图像的名字
			img = load_img(self.test_path + "/" + midname, grayscale=True)   # 转换为灰度图
			img = img_to_array(img)
			imgdatas[i] = img
			if i % 100 == 0:
				print('Done: {0}/{1} images'.format(i, len(imgs)))
			i += 1
		print('loading done', imgdatas.shape)
		np.save(self.npy_path + '/imgs_test.npy', imgdatas)            # 将30张训练集和30张label生成npy数据
		# np.save(self.npy_path + '/imgs_mask_train.npy', imglabels)
		print('Saving to .npy files done.') 
开发者ID:DuFanXin,项目名称:U-net,代码行数:23,代码来源:data_Keras.py

示例8: main

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def main(self):
        self.logger.info('Will load keras model')
        model = ResNet50(weights='imagenet')
        self.logger.info('Keras model loaded')
        feature_list = []
        img_path_list = []
        for raw_file in self.inp.raw_files:
            media_path = raw_file.path
            file_list = os.listdir(media_path)
            total = float(len(file_list))
            for index, img_file in enumerate(file_list):
                img_path = os.path.join(media_path, img_file)
                img_path_list.append(img_path)
                img = image.load_img(img_path, target_size=(224, 224))
                x = keras_image.img_to_array(img)
                x = np.expand_dims(x, axis=0)
                x = preprocess_input(x)
                # extract features
                scores = model.predict(x)
                sim_class = np.argmax(scores)
                print('Scores {}\nSimClass: {}'.format(scores, sim_class))
                self.outp.request_annos(img_path, img_sim_class=sim_class)
                self.logger.info('Requested annotation for: {} (cluster: {})'.format(img_path, sim_class))
                self.update_progress(index*100/total) 
开发者ID:l3p-cv,项目名称:lost,代码行数:26,代码来源:cluster_resnet.py

示例9: preprocess_image_crop

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def preprocess_image_crop(image_path, img_size):
    '''
    Preprocess the image scaling it so that its smaller size is img_size.
    The larger size is then cropped in order to produce a square image.
    '''
    img = load_img(image_path)
    scale = float(img_size) / min(img.size)
    new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
    # print('old size: %s,new size: %s' %(str(img.size), str(new_size)))
    img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    crop_h = img.shape[0] - img_size
    crop_v = img.shape[1] - img_size
    img = img[crop_h:img_size+crop_h, crop_v:img_size+crop_v, :]
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img

# util function to open, resize and format pictures into appropriate tensors 
开发者ID:robertomest,项目名称:neural-style-keras,代码行数:21,代码来源:utils.py

示例10: preprocess_image_scale

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def preprocess_image_scale(image_path, img_size=None):
    '''
    Preprocess the image scaling it so that its larger size is max_size.
    This function preserves aspect ratio.
    '''
    img = load_img(image_path)
    if img_size:
        scale = float(img_size) / max(img.size)
        new_size = (int(np.ceil(scale * img.size[0])), int(np.ceil(scale * img.size[1])))
        img = img.resize(new_size, resample=Image.BILINEAR)
    img = img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = vgg16.preprocess_input(img)
    return img


# util function to convert a tensor into a valid image 
开发者ID:robertomest,项目名称:neural-style-keras,代码行数:19,代码来源:utils.py

示例11: display_heatmap

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def display_heatmap(new_model, img_path, ids, preprocessing=None):
    # The quality is reduced.
    # If you have more than 8GB of RAM, you can try to increase it.
    img = image.load_img(img_path, target_size=(800, 1280))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    if preprocessing is not None:
        x = preprocess_input(x)

    out = new_model.predict(x)

    heatmap = out[0]  # Removing batch axis.

    if K.image_data_format() == 'channels_first':
        heatmap = heatmap[ids]
        if heatmap.ndim == 3:
            heatmap = np.sum(heatmap, axis=0)
    else:
        heatmap = heatmap[:, :, ids]
        if heatmap.ndim == 3:
            heatmap = np.sum(heatmap, axis=2)

    plt.imshow(heatmap, interpolation="none")
    plt.show() 
开发者ID:gabrieldemarmiesse,项目名称:heatmaps,代码行数:26,代码来源:demo.py

示例12: helper_test

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def helper_test(model):
    img_path = "../examples/dog.jpg"
    new_model = to_heatmap(model)

    # Loading the image
    img = image.load_img(img_path, target_size=(800, 800))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)

    out = new_model.predict(x)

    s = "n02084071"  # Imagenet code for "dog"
    ids = synset_to_dfs_ids(s)
    heatmap = out[0]
    if K.image_data_format() == 'channels_first':
        heatmap = heatmap[ids]
        heatmap = np.sum(heatmap, axis=0)
    else:
        heatmap = heatmap[:, :, ids]
        heatmap = np.sum(heatmap, axis=2)
    print(heatmap.shape)
    assert heatmap.shape[0] == heatmap.shape[1]
    K.clear_session() 
开发者ID:gabrieldemarmiesse,项目名称:heatmaps,代码行数:26,代码来源:helper.py

示例13: predict

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def predict(imagePath):
    img = load_img(imagePath)
    img = img_to_array(img)
    output = img.copy()
    # make prediction
    results = rcnn.detect([img], verbose=0)
    r = results[0]
    for (box, score) in zip(r['rois'], r['scores']):
          # filter out weak detections
          if score < 0.5:
               continue
          label = "{}: {:.2f}".format('table', score)
          cv2.rectangle(output, (box[1], box[0]), (box[3], box[2]),(0, 255, 0), 2)
          cv2.putText(output, label, (box[1], box[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
    cv2.imwrite("prediction.jpg", output)
    return r['rois'] 
开发者ID:holms-ur,项目名称:fine-tuning,代码行数:18,代码来源:predict.py

示例14: test_01_image_classifier_with_image_as_input

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def test_01_image_classifier_with_image_as_input(self):
        
        cnn_pmml = KerasToPmml(self.model_final,model_name="MobileNetImage",description="Demo",\
            copyright="Internal User",dataSet='image',predictedClasses=['dogs','cats'])
        cnn_pmml.export(open('2classMBNet.pmml', "w"), 0)

        img = image.load_img('nyoka/tests/resizedCat.png')
        img = img_to_array(img)
        img = preprocess_input(img)
        imgtf = np.expand_dims(img, axis=0)
        model_pred=self.model_final.predict(imgtf)
        model_preds = {'dogs':model_pred[0][0],'cats':model_pred[0][1]}

        model_name  = self.adapa_utility.upload_to_zserver('2classMBNet.pmml')

        predictions, probabilities = self.adapa_utility.score_in_zserver(model_name, 'nyoka/tests/resizedCat.png','DN')
  
        self.assertEqual(abs(probabilities['cats'] - model_preds['cats']) < 0.00001, True)
        self.assertEqual(abs(probabilities['dogs'] - model_preds['dogs']) < 0.00001, True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:21,代码来源:testScoreWithAdapaKeras.py

示例15: _compute_stats

# 需要导入模块: from keras.preprocessing import image [as 别名]
# 或者: from keras.preprocessing.image import load_img [as 别名]
def _compute_stats(self, mean = None, std = None):
        """ Computes channel-wise mean and standard deviation of all images in the dataset.
        
        If `mean` and `std` arguments are given, they will just be stored instead of being re-computed.

        The channel order of both is always "RGB", independent of `color_mode`.
        """
        
        if mean is None:
            mean = 0
            for fn in tqdm(self.train_img_files, desc = 'Computing channel mean'):
                mean += np.mean(np.asarray(load_img(fn), dtype=np.float64), axis = (0,1))
            mean /= len(self.train_img_files)
            print('Channel-wise mean:               {}'.format(mean))
        self.mean = np.asarray(mean, dtype=np.float32)
        if (mean is None) or (std is None):
            std = 0
            for fn in tqdm(self.train_img_files, desc = 'Computing channel variance'):
                std += np.mean((np.asarray(load_img(fn), dtype=np.float64) - self.mean) ** 2, axis = (0,1))
            std = np.sqrt(std / (len(self.train_img_files) - 1))
            print('Channel-wise standard deviation: {}'.format(std))
        self.std = np.asarray(std, dtype=np.float32) 
开发者ID:cvjena,项目名称:semantic-embeddings,代码行数:24,代码来源:common.py


注:本文中的keras.preprocessing.image.load_img方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。