當前位置: 首頁>>代碼示例>>Python>>正文


Python inception_v3.preprocess_input方法代碼示例

本文整理匯總了Python中keras.applications.inception_v3.preprocess_input方法的典型用法代碼示例。如果您正苦於以下問題:Python inception_v3.preprocess_input方法的具體用法?Python inception_v3.preprocess_input怎麽用?Python inception_v3.preprocess_input使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.applications.inception_v3的用法示例。


在下文中一共展示了inception_v3.preprocess_input方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: extract_features

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def extract_features(path, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Get CNN Model from model.py
	model = CNNModel(model_type)
	features = dict()
	# Extract features from each photo
	for name in tqdm(os.listdir(path)):
		# Loading and resizing image
		filename = path + name
		image = load_img(filename, target_size=target_size)
		# Convert the image pixels to a numpy array
		image = img_to_array(image)
		# Reshape data for the model
		image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
		# Prepare the image for the CNN Model model
		image = preprocess_input(image)
		# Pass image into model to get encoded features
		feature = model.predict(image, verbose=0)
		# Store encoded features for the image
		image_id = name.split('.')[0]
		features[image_id] = feature
	return features 
開發者ID:dabasajay,項目名稱:Image-Caption-Generator,代碼行數:29,代碼來源:preprocessing.py

示例2: extract_features

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def extract_features(filename, model, model_type):
	if model_type == 'inceptionv3':
		from keras.applications.inception_v3 import preprocess_input
		target_size = (299, 299)
	elif model_type == 'vgg16':
		from keras.applications.vgg16 import preprocess_input
		target_size = (224, 224)
	# Loading and resizing image
	image = load_img(filename, target_size=target_size)
	# Convert the image pixels to a numpy array
	image = img_to_array(image)
	# Reshape data for the model
	image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))
	# Prepare the image for the CNN Model model
	image = preprocess_input(image)
	# Pass image into model to get encoded features
	features = model.predict(image, verbose=0)
	return features

# Load the tokenizer 
開發者ID:dabasajay,項目名稱:Image-Caption-Generator,代碼行數:22,代碼來源:test.py

示例3: test_load_image_vs_keras

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def test_load_image_vs_keras(self):
        g = tf.Graph()
        with g.as_default():
            image_arr = utils.imageInputPlaceholder()
            # keras expects array in RGB order, we get it from image schema in BGR => need to flip
            preprocessed = preprocess_input(imageIO._reverseChannels(image_arr))

        output_col = "transformed_image"
        transformer = TFImageTransformer(channelOrder='BGR', inputCol="image", outputCol=output_col, graph=g,
                                         inputTensor=image_arr, outputTensor=preprocessed.name,
                                         outputMode="vector")

        image_df = image_utils.getSampleImageDF()
        df = transformer.transform(image_df.limit(5))

        for row in df.collect():
            processed = np.array(row[output_col]).astype(np.float32)
            # compare to keras loading
            images = self._loadImageViaKeras(row["image"]['origin'])
            image = images[0]
            image.shape = (1, image.shape[0] * image.shape[1] * image.shape[2])
            keras_processed = image[0]
            np.testing.assert_array_almost_equal(keras_processed, processed, decimal=6) 
開發者ID:databricks,項目名稱:spark-deep-learning,代碼行數:25,代碼來源:tf_image_test.py

示例4: test_load_image_vs_keras_RGB

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def test_load_image_vs_keras_RGB(self):
        g = tf.Graph()
        with g.as_default():
            image_arr = utils.imageInputPlaceholder()
            # keras expects array in RGB order, we get it from image schema in BGR => need to flip
            preprocessed = preprocess_input(image_arr)

        output_col = "transformed_image"
        transformer = TFImageTransformer(channelOrder='RGB', inputCol="image", outputCol=output_col, graph=g,
                                         inputTensor=image_arr, outputTensor=preprocessed.name,
                                         outputMode="vector")

        image_df = image_utils.getSampleImageDF()
        df = transformer.transform(image_df.limit(5))

        for row in df.collect():
            processed = np.array(row[output_col], dtype = np.float32)
            # compare to keras loading
            images = self._loadImageViaKeras(row["image"]['origin'])
            image = images[0]
            image.shape = (1, image.shape[0] * image.shape[1] * image.shape[2])
            keras_processed = image[0]
            np.testing.assert_array_almost_equal(keras_processed, processed, decimal = 6)

    # Test full pre-processing for InceptionV3 as an example of a simple computation graph 
開發者ID:databricks,項目名稱:spark-deep-learning,代碼行數:27,代碼來源:tf_image_test.py

示例5: _imagenet_preprocess_input

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def _imagenet_preprocess_input(x, input_shape):
    """
    For ResNet50, VGG models. For InceptionV3 and Xception it's okay to use the
    keras version (e.g. InceptionV3.preprocess_input) as the code path they hit
    works okay with tf.Tensor inputs. The following was translated to tf ops from
    https://github.com/fchollet/keras/blob/fb4a0849cf4dc2965af86510f02ec46abab1a6a4/keras/applications/imagenet_utils.py#L52
    It's a possibility to change the implementation in keras to look like the
    following and modified to work with BGR images (standard in Spark), but not doing it for now.
    """
    # assuming 'BGR'
    # Zero-center by mean pixel
    mean = np.ones(input_shape + (3,), dtype=np.float32)
    mean[..., 0] = 103.939
    mean[..., 1] = 116.779
    mean[..., 2] = 123.68
    return x - mean 
開發者ID:databricks,項目名稱:spark-deep-learning,代碼行數:18,代碼來源:keras_applications.py

示例6: predict

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def predict(model, img, target_size):
    """Run model prediction on image
    Args:
        model: keras model
        img: PIL format image
        target_size: (w,h) tuple
    Returns:
        list of predicted labels and their probabilities 
    """
    if img.size != target_size:
        img = img.resize(target_size)

    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    return preds[0] 
開發者ID:DhavalThkkar,項目名稱:Transfer-Learning,代碼行數:19,代碼來源:predict.py

示例7: gen

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def gen(session, data, labels, batch_size):
    def _f():
        start = 0
        end = start + batch_size
        n = data.shape[0]

        while True:
            X_batch = session.run(resize_op, {img_placeholder: data[start:end]})
            X_batch = preprocess_input(X_batch)
            y_batch = labels[start:end]
            start += batch_size
            end += batch_size
            if start >= n:
                start = 0
                end = batch_size

            print(start, end)
            yield (X_batch, y_batch)

    return _f 
開發者ID:udacity,項目名稱:CarND-Transfer-Learning-Lab,代碼行數:22,代碼來源:run_bottleneck.py

示例8: extract

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def extract(self, image_path):
        img = image.load_img(image_path, target_size=(299, 299))
        x = image.img_to_array(img)
        x = np.expand_dims(x, axis=0)
        x = preprocess_input(x)

        # Get the prediction.
        features = self.model.predict(x)

        if self.weights is None:
            # For imagenet/default network:
            features = features[0]
        else:
            # For loaded network:
            features = features[0]

        return features 
開發者ID:harvitronix,項目名稱:five-video-classification-methods,代碼行數:19,代碼來源:extractor.py

示例9: predict

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def predict(image_file):
    """
    Predict the top 3 categories for the given image file.

    """
    img = image.load_img(image_file, target_size=(299, 299))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    results = model.predict(x)

    top3 = decode_predictions(results, top=3)[0]
    return [
        {'label': label, 'description': description,
         'probability': probability * 100.0}
        for label, description, probability in top3
    ] 
開發者ID:PacktPublishing,項目名稱:Learning-Generative-Adversarial-Networks,代碼行數:19,代碼來源:kerasModel.py

示例10: predict

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def predict(model, img, target_size):
  """Run model prediction on image
  Args:
    model: keras model
    img: PIL format image
    target_size: (w,h) tuple
  Returns:
    list of predicted labels and their probabilities
  """
  if img.size != target_size:
    img = img.resize(target_size)

  x = image.img_to_array(img)
  x = np.expand_dims(x, axis=0)
  x = preprocess_input(x)
  preds = model.predict(x)
  return preds[0] 
開發者ID:PacktPublishing,項目名稱:Learning-Generative-Adversarial-Networks,代碼行數:19,代碼來源:predict.py

示例11: preprocess_image

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def preprocess_image(image_path):
    img = image.load_img(image_path)
    img = image.img_to_array(img)
    img = np.expand_dims(img, axis=0)
    img = inception_v3.preprocess_input(img)
    return img 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:8,代碼來源:utils.py

示例12: loadAndPreprocessKerasInceptionV3

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def loadAndPreprocessKerasInceptionV3(raw_uri):
    # this is the canonical way to load and prep images in keras
    uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri
    image = img_to_array(load_img(uri, target_size=InceptionV3Constants.INPUT_SHAPE))
    image = np.expand_dims(image, axis=0)
    return preprocess_input(image) 
開發者ID:databricks,項目名稱:spark-deep-learning,代碼行數:8,代碼來源:image_utils.py

示例13: _loadImageViaKeras

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def _loadImageViaKeras(self, raw_uri):
        uri = raw_uri[5:] if raw_uri.startswith("file:/") else raw_uri
        image = img_to_array(load_img(uri))
        image = np.expand_dims(image, axis=0)
        return preprocess_input(image) 
開發者ID:databricks,項目名稱:spark-deep-learning,代碼行數:7,代碼來源:tf_image_test.py

示例14: _preprocessingInceptionV3Transformed

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def _preprocessingInceptionV3Transformed(self, outputMode, outputCol):
        g = tf.Graph()
        with g.as_default():
            image_arr = utils.imageInputPlaceholder()
            resized_images = tf.image.resize_images(image_arr, InceptionV3Constants.INPUT_SHAPE)
            # keras expects array in RGB order, we get it from image schema in BGR => need to flip
            processed_images = preprocess_input(imageIO._reverseChannels(resized_images))
        self.assertEqual(processed_images.shape[1], InceptionV3Constants.INPUT_SHAPE[0])
        self.assertEqual(processed_images.shape[2], InceptionV3Constants.INPUT_SHAPE[1])

        transformer = TFImageTransformer(channelOrder='BGR', inputCol="image", outputCol=outputCol, graph=g,
                                         inputTensor=image_arr.name, outputTensor=processed_images,
                                         outputMode=outputMode)
        image_df = image_utils.getSampleImageDF()
        return transformer.transform(image_df.limit(5)) 
開發者ID:databricks,項目名稱:spark-deep-learning,代碼行數:17,代碼來源:tf_image_test.py

示例15: test_keras_consistency

# 需要導入模塊: from keras.applications import inception_v3 [as 別名]
# 或者: from keras.applications.inception_v3 import preprocess_input [as 別名]
def test_keras_consistency(self):
        """ Exported model in Keras should get same result as original """

        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        def keras_load_and_preproc(fpath):
            img = load_img(fpath, target_size=(299, 299))
            img_arr = img_to_array(img)
            img_iv3_input = iv3.preprocess_input(img_arr)
            return np.expand_dims(img_iv3_input, axis=0)

        imgs_iv3_input = np.vstack([keras_load_and_preproc(fp) for fp in img_fpaths])

        model_ref = InceptionV3(weights="imagenet")
        preds_ref = model_ref.predict(imgs_iv3_input)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            model = InceptionV3(weights="imagenet")
            gfn = issn.asGraphFunction(model.inputs, model.outputs)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            feeds, fetches = issn.importGraphFunction(gfn, prefix="InceptionV3")
            preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_iv3_input})

            np.testing.assert_array_almost_equal(preds_tgt, preds_ref, decimal=5) 
開發者ID:databricks,項目名稱:spark-deep-learning,代碼行數:29,代碼來源:test_builder.py


注:本文中的keras.applications.inception_v3.preprocess_input方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。