当前位置: 首页>>代码示例>>Python>>正文


Python inception_v3.InceptionV3方法代码示例

本文整理汇总了Python中keras.applications.inception_v3.InceptionV3方法的典型用法代码示例。如果您正苦于以下问题:Python inception_v3.InceptionV3方法的具体用法?Python inception_v3.InceptionV3怎么用?Python inception_v3.InceptionV3使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.applications.inception_v3的用法示例。


在下文中一共展示了inception_v3.InceptionV3方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: RNNModel

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
	image_model = Dense(embedding_size, activation='relu')(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
	caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	return model 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:27,代码来源:model.py

示例2: model

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def model(self, preprocessed, featurize):
        # Model provided by Keras. All cotributions by Keras are provided subject to the
        # MIT license located at https://github.com/fchollet/keras/blob/master/LICENSE
        # and subject to the below additional copyrights and licenses.
        #
        # Copyright 2016 The TensorFlow Authors.  All rights reserved.
        #
        # Licensed under the Apache License, Version 2.0 (the "License");
        # you may not use this file except in compliance with the License.
        # You may obtain a copy of the License at
        #
        # http://www.apache.org/licenses/LICENSE-2.0
        #
        # Unless required by applicable law or agreed to in writing, software
        # distributed under the License is distributed on an "AS IS" BASIS,
        # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
        # See the License for the specific language governing permissions and
        # limitations under the License.
        """
        From Keras: These weights are released under the Apache License 2.0.
        """
        return inception_v3.InceptionV3(input_tensor=preprocessed, weights="imagenet",
                                        include_top=(not featurize)) 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:25,代码来源:keras_applications.py

示例3: _imagenet_preprocess_input

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def _imagenet_preprocess_input(x, input_shape):
    """
    For ResNet50, VGG models. For InceptionV3 and Xception it's okay to use the
    keras version (e.g. InceptionV3.preprocess_input) as the code path they hit
    works okay with tf.Tensor inputs. The following was translated to tf ops from
    https://github.com/fchollet/keras/blob/fb4a0849cf4dc2965af86510f02ec46abab1a6a4/keras/applications/imagenet_utils.py#L52
    It's a possibility to change the implementation in keras to look like the
    following and modified to work with BGR images (standard in Spark), but not doing it for now.
    """
    # assuming 'BGR'
    # Zero-center by mean pixel
    mean = np.ones(input_shape + (3,), dtype=np.float32)
    mean[..., 0] = 103.939
    mean[..., 1] = 116.779
    mean[..., 2] = 123.68
    return x - mean 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:18,代码来源:keras_applications.py

示例4: __init__

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def __init__(self):
		parser = argparse.ArgumentParser(description='Process the inputs')
		parser.add_argument('--path',help='image directory')
		parser.add_argument('--class_folders',help='class images folder names')
		parser.add_argument('--dim',type=int,help='Image dimensions to process')
		parser.add_argument('--lr',type=float,help='learning rate',default=1e-4)
		parser.add_argument('--batch_size',type=int,help='batch size')
		parser.add_argument('--epochs',type=int,help='no of epochs to train')
		parser.add_argument('--initial_layers_to_freeze',type=int,help='the initial layers to freeze')
		parser.add_argument('--model',help='Standard Model to load',default='InceptionV3')
		parser.add_argument('--folds',type=int,help='num of cross validation folds',default=5)
		parser.add_argument('--outdir',help='output directory')
		
		
		args = parser.parse_args()
		self.path = args.path
		self.class_folders = json.loads(args.class_folders)
		self.dim  = int(args.dim)
		self.lr   = float(args.lr)
		self.batch_size = int(args.batch_size)
		self.epochs =  int(args.epochs)
		self.initial_layers_to_freeze = int(args.initial_layers_to_freeze)
		self.model = args.model
		self.folds = int(args.folds)
		self.outdir = args.outdir 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:27,代码来源:TransferLearning.py

示例5: inception_pseudo

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
		model = InceptionV3(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# ResNet50 Model for transfer Learning 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:18,代码来源:TransferLearning.py

示例6: inception_pseudo

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def inception_pseudo(self,dim=224,freeze_layers=30,full_freeze='N'):
		model = InceptionV3(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(1)(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# ResNet50 Model for transfer Learning 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:18,代码来源:TransferLearning_reg.py

示例7: inception_pseudo

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def inception_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
        model = InceptionV3(weights='imagenet',include_top=False)
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        out = Dense(5,activation='softmax')(x)
        model_final = Model(input = model.input,outputs=out)
        if full_freeze != 'N':
            for layer in model.layers[0:freeze_layers]:
                layer.trainable = False
        return model_final

# ResNet50 Model for transfer Learning 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:18,代码来源:TransferLearning_ffd.py

示例8: _build_image_embedding

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def _build_image_embedding(self):
        image_model = InceptionV3(include_top=False, weights='imagenet',
                                  pooling='avg')
        for layer in image_model.layers:
            layer.trainable = False

        dense_input = BatchNormalization(axis=-1)(image_model.output)
        image_dense = Dense(units=self._embedding_size,
                            kernel_regularizer=self._regularizer,
                            kernel_initializer=self._initializer
                            )(dense_input)
        # Add timestep dimension
        image_embedding = RepeatVector(1)(image_dense)

        image_input = image_model.input
        return image_input, image_embedding 
开发者ID:danieljl,项目名称:keras-image-captioning,代码行数:18,代码来源:models.py

示例9: inception

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def inception(self):
        """Build the structure of a convolutional neural network from input
        image data to the last hidden layer on the model of a similar manner
        than Inception-V4

        See: Szegedy, Vanhoucke, Ioffe, Shlens. Rethinking the Inception
        Architecture for Computer Vision. ArXiv technical report, 2015.

        Returns
        -------
        tensor
            (batch_size, nb_labels)-shaped output predictions, that have to be
        compared with ground-truth values

        """
        inception_model = inception_v3.InceptionV3(
            input_tensor=self.X, include_top=False
        )
        y = K.layers.GlobalAveragePooling2D()(inception_model.output)
        return self.output_layer(y, depth=self.nb_labels) 
开发者ID:Oslandia,项目名称:deeposlandia,代码行数:22,代码来源:feature_detection.py

示例10: _get_base_model

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def _get_base_model(self):
        """
        :return: base model from Keras based on user-supplied model name
        """
        if self.model_name == 'inception_v3':
            return InceptionV3(weights='imagenet', include_top=False)
        elif self.model_name == 'xception':
            return Xception(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg16':
            return VGG16(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg19':
            return VGG19(weights='imagenet', include_top=False)
        elif self.model_name == 'resnet50':
            return ResNet50(weights='imagenet', include_top=False)
        else:
            raise ValueError('Cannot find base model %s' % self.model_name) 
开发者ID:thoughtworksarts,项目名称:EmoPy,代码行数:18,代码来源:neuralnets.py

示例11: cnn_spatial

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def cnn_spatial(self, weights='imagenet'):
        # create the base pre-trained model
        base_model = InceptionV3(weights=weights, include_top=False)
    
        # add a global spatial average pooling layer
        x = base_model.output
        x = GlobalAveragePooling2D()(x)
        # let's add a fully-connected layer
        x = Dense(1024, activation='relu')(x)
        # and a logistic layer
        predictions = Dense(self.nb_classes, activation='softmax')(x)
    
        # this is the model we will train
        model = Model(inputs=base_model.input, outputs=predictions)

        return model 
开发者ID:wushidonguc,项目名称:two-stream-action-recognition-keras,代码行数:18,代码来源:spatial_validate_model.py

示例12: build_model_feature_extraction

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def build_model_feature_extraction():
    # create the base pre-trained model
    base_model = InceptionV3(weights='imagenet', include_top=False)

    # add a global spatial average pooling layer
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    # let's add a fully-connected layer
    x = Dense(1024, activation='relu')(x)
    # and a logistic layer
    predictions = Dense(1, activation='sigmoid')(x)

    # this is the model we will train
    model = Model(inputs=base_model.input, outputs=predictions)

    # first: train only the top layers (which were randomly initialized)
    # i.e. freeze all convolutional InceptionV3 layers
    for layer in base_model.layers:
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
    return model 
开发者ID:PacktPublishing,项目名称:Deep-Learning-Quick-Reference,代码行数:25,代码来源:inceptionV3.py

示例13: unfreeze

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def unfreeze(self,layers):
        """
        unfreeze a specified number of InceptionV3 layers ard recompile model
        """
        inception_layers = 311
        slice = inception_layers-layers

        for layer in self.model.layers[:slice]:
           layer.trainable = False
        for layer in self.model.layers[slice:]:
           layer.trainable = True

        self.model.compile(optimizer=SGD(lr=self.lr, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])

    # train a model from scratch given a set of training parameters
    # choose whether to save the model 
开发者ID:921kiyo,项目名称:3d-dl,代码行数:18,代码来源:retrain.py

示例14: CNNModel

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def CNNModel(model_type):
	if model_type == 'inceptionv3':
		model = InceptionV3()
	elif model_type == 'vgg16':
		model = VGG16()
	model.layers.pop()
	model = Model(inputs=model.inputs, outputs=model.layers[-1].output)
	return model 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:10,代码来源:model.py

示例15: AlternativeRNNModel

# 需要导入模块: from keras.applications import inception_v3 [as 别名]
# 或者: from keras.applications.inception_v3 import InceptionV3 [as 别名]
def AlternativeRNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dense(embedding_size, activation='relu')(image_input)
	image_model = RepeatVector(max_len)(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	# Since we are going to predict the next word using the previous words
	# (length of previous words changes with every iteration over the caption), we have to set return_sequences = True.
	caption_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=True)(caption_model_1)
	# caption_model = TimeDistributed(Dense(embedding_size, activation='relu'))(caption_model_2)
	caption_model = TimeDistributed(Dense(embedding_size))(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	# final_model_2 = LSTM(rnnConfig['LSTM_units'], return_sequences=False)(final_model_1)
	final_model_2 = Bidirectional(LSTM(rnnConfig['LSTM_units'], return_sequences=False))(final_model_1)
	# final_model_3 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_2)
	# final_model = Dense(vocab_size, activation='softmax')(final_model_3)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	# model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
	return model 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:34,代码来源:model.py


注:本文中的keras.applications.inception_v3.InceptionV3方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。