当前位置: 首页>>代码示例>>Python>>正文


Python applications.InceptionV3方法代码示例

本文整理汇总了Python中keras.applications.InceptionV3方法的典型用法代码示例。如果您正苦于以下问题:Python applications.InceptionV3方法的具体用法?Python applications.InceptionV3怎么用?Python applications.InceptionV3使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.applications的用法示例。


在下文中一共展示了applications.InceptionV3方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: executeKerasInceptionV3

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def executeKerasInceptionV3(image_df, uri_col="filePath"):
    """
    Apply Keras InceptionV3 Model on input DataFrame.
    :param image_df: Dataset. contains a column (uri_col) for where the image file lives.
    :param uri_col: str. name of the column indicating where each row's image file lives.
    :return: ({str => np.array[float]}, {str => (str, str, float)}).
      image file uri to prediction probability array,
      image file uri to top K predictions (class id, class description, probability).
    """
    K.set_learning_phase(0)
    model = InceptionV3(weights="imagenet")

    values = {}
    topK = {}
    for row in image_df.select(uri_col).collect():
        raw_uri = row[uri_col]
        image = loadAndPreprocessKerasInceptionV3(raw_uri)
        values[raw_uri] = model.predict(image)
        topK[raw_uri] = decode_predictions(values[raw_uri], top=5)[0]
    return values, topK 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:22,代码来源:image_utils.py

示例2: test_load_image_vs_keras_RGB

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def test_load_image_vs_keras_RGB(self):
        g = tf.Graph()
        with g.as_default():
            image_arr = utils.imageInputPlaceholder()
            # keras expects array in RGB order, we get it from image schema in BGR => need to flip
            preprocessed = preprocess_input(image_arr)

        output_col = "transformed_image"
        transformer = TFImageTransformer(channelOrder='RGB', inputCol="image", outputCol=output_col, graph=g,
                                         inputTensor=image_arr, outputTensor=preprocessed.name,
                                         outputMode="vector")

        image_df = image_utils.getSampleImageDF()
        df = transformer.transform(image_df.limit(5))

        for row in df.collect():
            processed = np.array(row[output_col], dtype = np.float32)
            # compare to keras loading
            images = self._loadImageViaKeras(row["image"]['origin'])
            image = images[0]
            image.shape = (1, image.shape[0] * image.shape[1] * image.shape[2])
            keras_processed = image[0]
            np.testing.assert_array_almost_equal(keras_processed, processed, decimal = 6)

    # Test full pre-processing for InceptionV3 as an example of a simple computation graph 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:27,代码来源:tf_image_test.py

示例3: test_image_output

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def test_image_output(self):
        output_col = "resized_image"
        preprocessed_df = self._preprocessingInceptionV3Transformed("image", output_col)
        self.assertDfHasCols(preprocessed_df, [output_col])
        for row in preprocessed_df.collect():
            original = row["image"]
            processed = row[output_col]
            errMsg = "nChannels must match: original {} v.s. processed {}"
            errMsg = errMsg.format(original.nChannels, processed.nChannels)
            self.assertEqual(original.nChannels, processed.nChannels, errMsg)
            self.assertEqual(processed.height, InceptionV3Constants.INPUT_SHAPE[0])
            self.assertEqual(processed.width, InceptionV3Constants.INPUT_SHAPE[1])

    # TODO: add tests for non-RGB8 images, at least RGB-float32.

    # Test InceptionV3 prediction as an example of applying a trained model. 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:18,代码来源:tf_image_test.py

示例4: prepInceptionV3KerasModelFile

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def prepInceptionV3KerasModelFile(fileName):
    model_dir_tmp = tempfile.mkdtemp("sparkdl_keras_tests", dir="/tmp")
    path = model_dir_tmp + "/" + fileName

    height, width = InceptionV3Constants.INPUT_SHAPE
    input_shape = (height, width, 3)
    model = InceptionV3(weights="imagenet", include_top=True, input_shape=input_shape)
    model.save(path)
    return path 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:11,代码来源:image_utils.py

示例5: test_prediction_vs_tensorflow_inceptionV3

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def test_prediction_vs_tensorflow_inceptionV3(self):
        output_col = "prediction"
        image_df = image_utils.getSampleImageDF()

        # An example of how a pre-trained keras model can be used with TFImageTransformer
        with KSessionWrap() as (sess, g):
            with g.as_default():
                K.set_learning_phase(0)    # this is important but it's on the user to call it.
                # nChannels needed for input_tensor in the InceptionV3 call below
                image_string = utils.imageInputPlaceholder(nChannels=3)
                resized_images = tf.image.resize_images(image_string,
                                                        InceptionV3Constants.INPUT_SHAPE)
                # keras expects array in RGB order, we get it from image schema in BGR =>
                # need to flip
                preprocessed = preprocess_input(imageIO._reverseChannels(resized_images))
                model = InceptionV3(input_tensor=preprocessed, weights="imagenet")
                graph = tfx.strip_and_freeze_until([model.output], g, sess, return_graph=True)

        transformer = TFImageTransformer(channelOrder='BGR', inputCol="image", outputCol=output_col, graph=graph,
                                         inputTensor=image_string, outputTensor=model.output,
                                         outputMode="vector")
        transformed_df = transformer.transform(image_df.limit(10))
        self.assertDfHasCols(transformed_df, [output_col])
        collected = transformed_df.collect()
        transformer_values, transformer_topK = self.transformOutputToComparables(collected,
                                                                                 output_col, lambda row: row['image']['origin'])

        tf_values, tf_topK = self._executeTensorflow(graph, image_string.name, model.output.name,
                                                     image_df)
        self.compareClassSets(tf_topK, transformer_topK)
        self.compareClassOrderings(tf_topK, transformer_topK)
        self.compareArrays(tf_values, transformer_values, decimal=5) 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:34,代码来源:tf_image_test.py

示例6: test_keras_consistency

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def test_keras_consistency(self):
        """ Exported model in Keras should get same result as original """

        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        def keras_load_and_preproc(fpath):
            img = load_img(fpath, target_size=(299, 299))
            img_arr = img_to_array(img)
            img_iv3_input = iv3.preprocess_input(img_arr)
            return np.expand_dims(img_iv3_input, axis=0)

        imgs_iv3_input = np.vstack([keras_load_and_preproc(fp) for fp in img_fpaths])

        model_ref = InceptionV3(weights="imagenet")
        preds_ref = model_ref.predict(imgs_iv3_input)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            model = InceptionV3(weights="imagenet")
            gfn = issn.asGraphFunction(model.inputs, model.outputs)

        with IsolatedSession(using_keras=True) as issn:
            K.set_learning_phase(0)
            feeds, fetches = issn.importGraphFunction(gfn, prefix="InceptionV3")
            preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_iv3_input})

            np.testing.assert_array_almost_equal(preds_tgt, preds_ref, decimal=5) 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:29,代码来源:test_builder.py

示例7: test_bare_keras_module

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def test_bare_keras_module(self):
        """ Keras GraphFunctions should give the same result as standard Keras models """
        img_fpaths = glob(os.path.join(_getSampleJPEGDir(), '*.jpg'))

        for model_gen, preproc_fn, target_size in [(InceptionV3, iv3.preprocess_input, model_sizes['InceptionV3']),
                                      (Xception, xcpt.preprocess_input, model_sizes['Xception']),
                                      (ResNet50, rsnt.preprocess_input, model_sizes['ResNet50'])]:

            keras_model = model_gen(weights="imagenet")
            _preproc_img_list = []
            for fpath in img_fpaths:
                img = load_img(fpath, target_size=target_size)
                # WARNING: must apply expand dimensions first, or ResNet50 preprocessor fails
                img_arr = np.expand_dims(img_to_array(img), axis=0)
                _preproc_img_list.append(preproc_fn(img_arr))

            imgs_input = np.vstack(_preproc_img_list)

            preds_ref = keras_model.predict(imgs_input)

            gfn_bare_keras = GraphFunction.fromKeras(keras_model)

            with IsolatedSession(using_keras=True) as issn:
                K.set_learning_phase(0)
                feeds, fetches = issn.importGraphFunction(gfn_bare_keras)
                preds_tgt = issn.run(fetches[0], {feeds[0]: imgs_input})

            np.testing.assert_array_almost_equal(preds_tgt,
                                                 preds_ref,
                                                 decimal=self.featurizerCompareDigitsExact) 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:32,代码来源:test_pieces.py

示例8: __init__

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def __init__(self, input_size):
        input_image = Input(shape=(input_size, input_size, 3))

        inception = InceptionV3(input_shape=(input_size,input_size,3), include_top=False)
        inception.load_weights(INCEPTION3_BACKEND_PATH)

        x = inception(input_image)

        self.feature_extractor = Model(input_image, x) 
开发者ID:pranoyr,项目名称:head-detection-using-yolo,代码行数:11,代码来源:backend.py

示例9: load_fine_tune_googlenet_v3

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def load_fine_tune_googlenet_v3(img):
    # 加载fine-tuning googlenet v3模型,并做预测
    model = InceptionV3(include_top=True, weights='imagenet')
    model.summary()
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x)
    preds = model.predict(x)
    print('Predicted:', decode_predictions(preds))
    plt.subplot(212)
    plt.plot(preds.ravel())
    plt.show()
    return model, x 
开发者ID:huxiaoman7,项目名称:PaddlePaddle_code,代码行数:15,代码来源:keras_model_visualization.py

示例10: test_validate_keras_inception

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def test_validate_keras_inception(self):
        input_tensor = Input(shape=(224, 224, 3))
        model = InceptionV3(weights="imagenet", input_tensor=input_tensor)
        file_name = "keras"+model.name+".pmml"
        pmml_obj = KerasToPmml(model,dataSet="image",predictedClasses=[str(i) for i in range(1000)])
        pmml_obj.export(open(file_name,'w'),0)
        self.assertEqual(self.schema.is_valid(file_name), True) 
开发者ID:nyoka-pmml,项目名称:nyoka,代码行数:9,代码来源:_validateSchema.py

示例11: create_model

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def create_model():
    #Data format:tensorflow,channels_last;theano,channels_last
    if DATA_FORMAT=='channels_first':
        INP_SHAPE=(3,299,299)
        img_input=Input(shape=INP_SHAPE)
        CONCAT_AXIS=1
    elif DATA_FORMAT=='channels_last':
        INP_SHAPE=(299,299,3)
        img_input=Input(shape=INP_SHAPE)
        CONCAT_AXIS=3
    else:
        raise Exception('Invalid Dim Ordering')
    base_model = InceptionV3(weights='imagenet', include_top=False)
    base_model.summary()
    for layer in base_model.layers:
        layer.trainable = False

    x =  base_model.get_layer('mixed7').output
       

    x = Convolution2D(512, (1, 1), kernel_initializer="glorot_uniform", padding="same", name="DenseNet_initial_conv2D", use_bias=False,
                      kernel_regularizer=l2(WEIGHT_DECAY))(x)

    x = BatchNormalization()(x)

    x, nb_filter = dense_block(x, 5, 512, growth_rate=64,dropout_rate=0.5)

    x = AveragePooling2D(pool_size=(7, 7), strides=1, padding='valid', data_format=DATA_FORMAT)(x)

    x = Dense(512, activation='relu')(x)
    #x = Dropout(0.5)(x)
    x = Dense(16)(x)

    x = Lambda(lambda x:tf.nn.l2_normalize(x))(x)

    model = Model(inputs=base_model.input, outputs=x)

    return model 
开发者ID:GerardLiu96,项目名称:FECNet,代码行数:40,代码来源:FECWithPretrained.py

示例12: test_inceptionv3

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def test_inceptionv3():
    app = applications.InceptionV3
    last_dim = 2048
    _test_application_basic(app)
    _test_application_notop(app, last_dim)
    _test_application_variable_input_channels(app, last_dim)
    _test_app_pooling(app, last_dim) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:9,代码来源:applications_test.py

示例13: load_model

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def load_model(self, model_path, pretrained=False):
        self.pretrained = pretrained

        if not pretrained:   
            try:
                if os.path.isfile(model_path):
                    self.model = load_model(model_path)
                    print('[+] Model loading complete')

                else:
                    print('[-] Model loading incomplete, could not find model - {}'.format(model_path))

            except Exception as err:
                print('[-] Model loading unsuccessful, please check your model file:')
                print(err)
        else:
            from keras.applications import InceptionV3
            self.model = InceptionV3(weights='imagenet')

        # a "begin" marker to time how long it takes (in real time) to compile
        start_compile = d.now()

        # actually compile the model
        self.model.compile(
            loss=l_type,
            optimizer=opt,
            metrics=met
        )

        # a calculation of the compile time, in seconds
        compile_time = (d.now() - start_compile).total_seconds()

        print('[+] Model successfully compiled in {:.3f} seconds'.format(compile_time))


    # a method for loading in the data given path (and many optional arguments)
    # note, this data path should point to a folder with the data 
开发者ID:powerhouseofthecell,项目名称:machine_feeling,代码行数:39,代码来源:ml_model.py

示例14: make_model

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def make_model(model, image_size):
    if model == "inceptionv3":
        base_model = InceptionV3(include_top=False, input_shape=image_size + (3,))
    elif model == "vgg16" or model is None:
        base_model = VGG16(include_top=False, input_shape=image_size + (3,))
    elif model == "mobilenet":
        base_model = MobileNet(include_top=False, input_shape=image_size + (3,))
    return base_model 
开发者ID:seongahjo,项目名称:Mosaicer,代码行数:10,代码来源:file_util.py

示例15: get_imagenet_architecture

# 需要导入模块: from keras import applications [as 别名]
# 或者: from keras.applications import InceptionV3 [as 别名]
def get_imagenet_architecture(architecture, variant, size, alpha, output_layer, include_top=False, weights='imagenet'):
    from keras import applications, Model

    if include_top:
        assert output_layer == 'last'

    if size == 'auto':
        size = get_image_size(architecture, variant, size)

    shape = (size, size, 3)

    if architecture == 'densenet':
        if variant == 'auto':
            variant = 'densenet-121'
        if variant == 'densenet-121':
            model = applications.DenseNet121(weights=weights, include_top=include_top, input_shape=shape)
        elif variant == 'densenet-169':
            model = applications.DenseNet169(weights=weights, include_top=include_top, input_shape=shape)
        elif variant == 'densenet-201':
            model = applications.DenseNet201(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'inception-resnet-v2':
        model = applications.InceptionResNetV2(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'mobilenet':
        model = applications.MobileNet(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
    elif architecture == 'mobilenet-v2':
        model = applications.MobileNetV2(weights=weights, include_top=include_top, input_shape=shape, alpha=alpha)
    elif architecture == 'nasnet':
        if variant == 'auto':
            variant = 'large'
        if variant == 'large':
            model = applications.NASNetLarge(weights=weights, include_top=include_top, input_shape=shape)
        else:
            model = applications.NASNetMobile(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'resnet-50':
        model = applications.ResNet50(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'vgg-16':
        model = applications.VGG16(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'vgg-19':
        model = applications.VGG19(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'xception':
        model = applications.Xception(weights=weights, include_top=include_top, input_shape=shape)
    elif architecture == 'inception-v3':
        model = applications.InceptionV3(weights=weights, include_top=include_top, input_shape=shape)

    if output_layer != 'last':
        try:
            if isinstance(output_layer, int):
                layer = model.layers[output_layer]
            else:
                layer = model.get_layer(output_layer)
        except Exception:
            raise VergeMLError('layer not found: {}'.format(output_layer))
        model = Model(inputs=model.input, outputs=layer.output)

    return model 
开发者ID:mme,项目名称:vergeml,代码行数:57,代码来源:features.py


注:本文中的keras.applications.InceptionV3方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。