当前位置: 首页>>代码示例>>Python>>正文


Python image.img_to_array方法代码示例

本文整理汇总了Python中tensorflow.keras.preprocessing.image.img_to_array方法的典型用法代码示例。如果您正苦于以下问题:Python image.img_to_array方法的具体用法?Python image.img_to_array怎么用?Python image.img_to_array使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.preprocessing.image的用法示例。


在下文中一共展示了image.img_to_array方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _get_image

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def _get_image(cls, image_file: str, model: Model) -> np.ndarray:
        input_shape = model.inputs[0].shape
        size = input_shape[1:3].as_list()
        assert len(size) == 2, 'The model {} does not have enough dimensions to process an image (shape: {})'.format(
            model.name, size)

        colors = input_shape[3:]
        assert colors, ('The model {} requires a tensor with at least 3 inputs in order to process images: ' +
                        '[WIDTH, HEIGHT, COLORS]').format(model.name)

        if colors[0] == 1:
            color_mode = 'grayscale'
        elif colors[0] == 3:
            color_mode = 'rgb'
        elif colors[0] == 4:
            color_mode = 'rgba'
        else:
            raise AssertionError('The input tensor should have either 1 (grayscale), 3 (rgb) or 4 (rgba) units. ' +
                                 'Found: {}'.format(colors[0]))

        img = image.load_img(image_file, target_size=size, color_mode=color_mode)
        return image.img_to_array(img) 
开发者ID:BlackLight,项目名称:platypush,代码行数:24,代码来源:__init__.py

示例2: extract

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def extract(self, img):
        """
        Extract a deep feature from an input image
        Args:
            img: from PIL.Image.open(path) or tensorflow.keras.preprocessing.image.load_img(path)

        Returns:
            feature (np.ndarray): deep feature with the shape=(4096, )
        """
        img = img.resize((224, 224))  # VGG must take a 224x224 img as an input
        img = img.convert('RGB')  # Make sure img is color
        x = image.img_to_array(img)  # To np.array. Height x Width x Channel. dtype=float32
        x = np.expand_dims(x, axis=0)  # (H, W, C)->(1, H, W, C), where the first elem is the number of img
        x = preprocess_input(x)  # Subtracting avg values for each pixel
        feature = self.model.predict(x)[0]  # (1, 4096) -> (4096, )
        return feature / np.linalg.norm(feature)  # Normalize 
开发者ID:matsui528,项目名称:sis,代码行数:18,代码来源:feature_extractor.py

示例3: _transform_request

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def _transform_request(request):
    request = request.decode('utf-8')
    request = unquote(request) 

    # Direct http example
    if request.startswith('http'):
        request = download_image(request)
    else:
        # Slack Label Example
        request_array = request.split('&')
        print(request_array)

        result = [value for value in request_array if value.startswith('text=')]
        if len(result) > 0:
            request = download_image(result[0][5:])
            print(request)
               
    predict_img = image.load_img(request, target_size=(224, 224))
    predict_img_array = image.img_to_array(predict_img)
    predict_img_array = np.expand_dims(predict_img_array, axis=0)
    predict_preprocess_img = preprocess_input(predict_img_array)

    return predict_preprocess_img 
开发者ID:PipelineAI,项目名称:models,代码行数:25,代码来源:pipeline_invoke_python.py

示例4: preprocessing_fn

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def preprocessing_fn(x: np.ndarray) -> np.ndarray:
    shape = (299, 299)  # Expected input shape of model
    output = []
    for i in range(x.shape[0]):
        im_raw = image.array_to_img(x[i])
        im = image.img_to_array(im_raw.resize(shape))
        output.append(im)
    output = preprocess_input_inception_resnet_v2(np.array(output))
    return output 
开发者ID:twosixlabs,项目名称:armory,代码行数:11,代码来源:inception_resnet_v2.py

示例5: preprocessing_fn

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def preprocessing_fn(x: np.ndarray) -> np.ndarray:
    shape = (224, 224)  # Expected input shape of model
    output = []
    for i in range(x.shape[0]):
        im_raw = image.array_to_img(x[i])
        im = image.img_to_array(im_raw.resize(shape))
        output.append(im)
    output = preprocess_input_densenet121_resisc(np.array(output))
    return output 
开发者ID:twosixlabs,项目名称:armory,代码行数:11,代码来源:densenet121_resisc45.py

示例6: preprocessing_fn

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def preprocessing_fn(x: np.ndarray) -> np.ndarray:
    shape = (224, 224)  # Expected input shape of model
    output = []
    for i in range(x.shape[0]):
        im_raw = image.array_to_img(x[i])
        im = image.img_to_array(im_raw.resize(shape))
        output.append(im)
    output = preprocess_input_resnet50(np.array(output))
    return output 
开发者ID:twosixlabs,项目名称:armory,代码行数:11,代码来源:resnet50.py

示例7: predict

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def predict(TEST_SET,image_size):
    print("载入网络权重中……")
    try:
        model = Unet(6,(32,32,4),0.001,0.00001) # build UNet
        model.load_weights('UnetDen169SGD.h5')
    except:
        print("载入失败!")
    stride = image_size
    print("进行预测分割拼图中……")
    for n in range(len(TEST_SET)):
        path = TEST_SET[n]
        image = Image.open(basePath+path)
        w,h = image.size
        padding_h = (h//stride + 1) * stride
        padding_w = (w//stride + 1) * stride
        padding_img = np.zeros((padding_h,padding_w,4),dtype=np.uint8)
        image=img_to_array(image)
        padding_img[0:h,0:w,:] = image[:,:,:]
        padding_img = padding_img.astype("float") / 255.0
        mask_whole = np.zeros((padding_h,padding_w),dtype=np.uint8)
        for i in range(padding_h//stride):
            for j in range(padding_w//stride):
                crop = padding_img[i*stride:i*stride+image_size,j*stride:j*stride+image_size,:4]
                ch,cw,_ = crop.shape
                if ch != 32 or cw != 32:
                    print('尺寸不正确,请检查!')
                    continue

                crop = np.expand_dims(crop, axis=0) 
                pred = model.predict(crop)
                pred = np.argmax(pred,axis=3)
                pred = pred.flatten()
                pred = labelencoder.inverse_transform(pred)
                pred = pred.reshape((32,32)).astype(np.uint8)
                mask_whole[i*stride:i*stride+image_size,j*stride:j*stride+image_size] = pred[:,:]

        cv2.imwrite(basePath2+'predict\\%s'%path,mask_whole[0:h,0:w]) 
开发者ID:1044197988,项目名称:Semantic-segmentation-of-remote-sensing-images,代码行数:39,代码来源:Predict.py

示例8: predict

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def predict(args):
    # load the trained convolutional neural network
    print("载入网络权重中……")
    model = load_model(args["model"])
    stride = args['stride']
    print("进行预测分割拼图中……")
    for n in range(len(TEST_SET)):
        path = TEST_SET[n]
        #load the image
        image = cv2.imread(basePath+'train\\' + path)
        h,w,_ = image.shape
        padding_h = (h//stride + 1) * stride 
        padding_w = (w//stride + 1) * stride
        padding_img = np.zeros((padding_h,padding_w,3),dtype=np.uint8)
        padding_img[0:h,0:w,:] = image[:,:,:]
        padding_img = padding_img.astype("float") / divisor
        padding_img = img_to_array(padding_img)
        mask_whole = np.zeros((padding_h,padding_w),dtype=np.uint8)
        for i in range(padding_h//stride):
            for j in range(padding_w//stride):
                crop = padding_img[i*stride:i*stride+image_size,j*stride:j*stride+image_size,:3]
                ch,cw,_= crop.shape
                if ch != 32 or cw != 32:
                    print('尺寸不正确,请检查!')
                    continue
                    
                crop = np.expand_dims(crop, axis=0)
                pred = model.predict_classes(crop,verbose=2)
                pred = labelencoder.inverse_transform(pred[0])
                pred = pred.reshape((32,32)).astype(np.uint8)
                mask_whole[i*stride:i*stride+image_size,j*stride:j*stride+image_size] = pred[:,:]    
        cv2.imwrite(basePath+'predict/'+path,mask_whole[0:h,0:w]) 
开发者ID:1044197988,项目名称:Semantic-segmentation-of-remote-sensing-images,代码行数:34,代码来源:Segnet预测.py

示例9: generateData

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def generateData(batch_size,data=[]):
    while True:  
        train_data = []  
        train_label = []  
        batch = 0  
        for i in (range(len(data))): 
            url = data[i]
            batch += 1 
            img = load_img(filepath + 'train/' + url)
            img = img_to_array(img) 
            train_data.append(img)  
            label = load_img(filepath + 'label/' + url, grayscale=True)
            label = img_to_array(label).reshape((img_w * img_h,))  
            train_label.append(label)  
            if batch % batch_size==0: 
                train_data = np.array(train_data)  
                train_label = np.array(train_label).flatten()      #拍平
                train_label = labelencoder.transform(train_label)  
                train_label = to_categorical(train_label, num_classes=n_label)  #编码输出便签
                train_label = train_label.reshape((batch_size,img_w,img_h,n_label))
                yield (train_data,train_label)  
                train_data = []  
                train_label = []  
                batch = 0  

                


#生成测试的数据 
开发者ID:1044197988,项目名称:Semantic-segmentation-of-remote-sensing-images,代码行数:31,代码来源:训练.py

示例10: generateValidData

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def generateValidData(batch_size,data=[]):
    while True:  
        valid_data = []  
        valid_label = []  
        batch = 0  
        for i in (range(len(data))):  
            url = data[i]
            batch += 1  
            img = load_img(filepath + 'train/' + url)
            img = img_to_array(img)  
            valid_data.append(img)  
            label = load_img(filepath + 'label/' + url, grayscale=True)
            label = img_to_array(label).reshape((img_w * img_h,))  
            valid_label.append(label)  
            if batch % batch_size==0:  
                valid_data = np.array(valid_data)  
                valid_label = np.array(valid_label).flatten()  
                valid_label = labelencoder.transform(valid_label)  
                valid_label = to_categorical(valid_label, num_classes=n_label)
                valid_label = valid_label.reshape((batch_size,img_w,img_h,n_label))
                yield (valid_data,valid_label)  
                valid_data = []  
                valid_label = []  
                batch = 0
                

#定义模型-网络模型 
开发者ID:1044197988,项目名称:Semantic-segmentation-of-remote-sensing-images,代码行数:29,代码来源:训练.py

示例11: predict

# 需要导入模块: from tensorflow.keras.preprocessing import image [as 别名]
# 或者: from tensorflow.keras.preprocessing.image import img_to_array [as 别名]
def predict(args):
    # load the trained convolutional neural network
    print("载入网络权重中……")
    model = load_model(args["model"],custom_objects={'dice_coef': dice_coef})
    stride = args['stride']
    print("进行预测分割拼图中……")
    for n in range(len(TEST_SET)):
        path = TEST_SET[n]
        #load the image
        image = cv2.imread(basePath+'train\\' + path)
        h,w,_ = image.shape
        padding_h = (h//stride + 1) * stride 
        padding_w = (w//stride + 1) * stride
        padding_img = np.zeros((padding_h,padding_w,3),dtype=np.uint8)
        padding_img[0:h,0:w,:] = image[:,:,:]
        padding_img = padding_img.astype("float") / 255.0
        padding_img = img_to_array(padding_img)
        mask_whole = np.zeros((padding_h,padding_w),dtype=np.uint8)
        for i in range(padding_h//stride):
            for j in range(padding_w//stride):
                crop = padding_img[i*stride:i*stride+image_size,j*stride:j*stride+image_size,:3]
                ch,cw,_ = crop.shape
                #print(ch,cw,_)
                if ch != 32 or cw != 32:
                    print('尺寸不正确,请检查!')
                    continue
                crop = np.expand_dims(crop, axis=0) 
                pred = model.predict(crop,verbose=2)
                pred=np.argmax(pred,axis=3)
                pred=pred.flatten()
                pred = labelencoder.inverse_transform(pred)
                pred = pred.reshape((32,32)).astype(np.uint8)
                mask_whole[i*stride:i*stride+image_size,j*stride:j*stride+image_size] = pred[:,:]

        cv2.imwrite(basePath+'predict/'+path,mask_whole[0:h,0:w]) 
开发者ID:1044197988,项目名称:Semantic-segmentation-of-remote-sensing-images,代码行数:37,代码来源:FCN8S预测.py


注:本文中的tensorflow.keras.preprocessing.image.img_to_array方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。