当前位置: 首页>>代码示例>>Python>>正文


Python vgg16.VGG16属性代码示例

本文整理汇总了Python中keras.applications.vgg16.VGG16属性的典型用法代码示例。如果您正苦于以下问题:Python vgg16.VGG16属性的具体用法?Python vgg16.VGG16怎么用?Python vgg16.VGG16使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在keras.applications.vgg16的用法示例。


在下文中一共展示了vgg16.VGG16属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: RNNModel

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def RNNModel(vocab_size, max_len, rnnConfig, model_type):
	embedding_size = rnnConfig['embedding_size']
	if model_type == 'inceptionv3':
		# InceptionV3 outputs a 2048 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(2048,))
	elif model_type == 'vgg16':
		# VGG16 outputs a 4096 dimensional vector for each image, which we'll feed to RNN Model
		image_input = Input(shape=(4096,))
	image_model_1 = Dropout(rnnConfig['dropout'])(image_input)
	image_model = Dense(embedding_size, activation='relu')(image_model_1)

	caption_input = Input(shape=(max_len,))
	# mask_zero: We zero pad inputs to the same length, the zero mask ignores those inputs. E.g. it is an efficiency.
	caption_model_1 = Embedding(vocab_size, embedding_size, mask_zero=True)(caption_input)
	caption_model_2 = Dropout(rnnConfig['dropout'])(caption_model_1)
	caption_model = LSTM(rnnConfig['LSTM_units'])(caption_model_2)

	# Merging the models and creating a softmax classifier
	final_model_1 = concatenate([image_model, caption_model])
	final_model_2 = Dense(rnnConfig['dense_units'], activation='relu')(final_model_1)
	final_model = Dense(vocab_size, activation='softmax')(final_model_2)

	model = Model(inputs=[image_input, caption_input], outputs=final_model)
	model.compile(loss='categorical_crossentropy', optimizer='adam')
	return model 
开发者ID:dabasajay,项目名称:Image-Caption-Generator,代码行数:27,代码来源:model.py

示例2: test_ShapGradientExplainer

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def test_ShapGradientExplainer(self):

    #     model = VGG16(weights='imagenet', include_top=True)
    #     X, y = shap.datasets.imagenet50()
    #     to_explain = X[[39, 41]]
    #
    #     url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
    #     fname = shap.datasets.cache(url)
    #     with open(fname) as f:
    #         class_names = json.load(f)
    #
    #     def map2layer(x, layer):
    #         feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
    #         return K.get_session().run(model.layers[layer].input, feed_dict)
    #
    #     e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
    #                           map2layer(preprocess_input(X.copy()), 7))
    #     shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
    #
          print("Skipped Shap GradientExplainer") 
开发者ID:IBM,项目名称:AIX360,代码行数:22,代码来源:test_shap.py

示例3: build_vgg16

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def build_vgg16(image_size=None):
	image_size = image_size or (240, 240)
	if K.image_dim_ordering() == 'th':
	    input_shape = (3,) + image_size
	else:
	    input_shape = image_size + (3, )
	bottleneck_model = vgg16.VGG16(include_top=False, 
	                               input_tensor=Input(input_shape))
	#bottleneck_model.trainable = False
	for layer in bottleneck_model.layers:
	    layer.trainable = False

	x = bottleneck_model.input
	y = bottleneck_model.output
	y = Flatten()(y)
	y = BatchNormalization()(y)
	y = Dense(2048, activation='relu')(y)
	y = Dropout(.5)(y)
	y = Dense(1024, activation='relu')(y)
	y = Dropout(.5)(y)
	y = Dense(1)(y)

	model = Model(input=x, output=y)
	model.compile(optimizer=Adam(lr=1e-4), loss = 'mse')
	return model 
开发者ID:dolaameng,项目名称:udacity-SDC-baseline,代码行数:27,代码来源:model.py

示例4: get_loss_net

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def get_loss_net(pastiche_net_output, input_tensor=None):
    '''
    Instantiates a VGG net and applies its layers on top of the pastiche net's
    output.
    '''
    loss_net = vgg16.VGG16(weights='imagenet', include_top=False,
                           input_tensor=input_tensor)
    targets_dict = dict([(layer.name, layer.output) for layer in loss_net.layers])
    i = pastiche_net_output
    # We need to apply all layers to the output of the style net
    outputs_dict = {}
    for l in loss_net.layers[1:]: # Ignore the input layer
        i = l(i)
        outputs_dict[l.name] = i

    return loss_net, outputs_dict, targets_dict 
开发者ID:robertomest,项目名称:neural-style-keras,代码行数:18,代码来源:training.py

示例5: video_to_frames

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def video_to_frames(self,video):
        
        with open(os.devnull, "w") as ffmpeg_log:
            if os.path.exists(self.temp_dest):
                print(" cleanup: " + self.temp_dest + "/")
                shutil.rmtree(self.temp_dest)
            os.makedirs(self.temp_dest)
            video_to_frames_cmd = ["ffmpeg",
                                       
                                       '-y',
                                       '-i', video,  
                                       '-vf', "scale=400:300", 
                                       '-qscale:v', "2", 
                                       '{0}/%06d.jpg'.format(self.temp_dest)]
            subprocess.call(video_to_frames_cmd,
                            stdout=ffmpeg_log, stderr=ffmpeg_log)
                        
# Load the pre-trained VGG16 Model and extract the dense features as output 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:20,代码来源:VideoCaptioningPreProcessing.py

示例6: resnet_pseudo

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = ResNet50(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(5,activation='softmax')(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# VGG16 Model for transfer Learning 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:18,代码来源:TransferLearning.py

示例7: resnet_pseudo

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
		model = ResNet50(weights='imagenet',include_top=False)
		x = model.output
		x = GlobalAveragePooling2D()(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		x = Dense(512, activation='relu')(x)
		x = Dropout(0.5)(x)
		out = Dense(1)(x)
		model_final = Model(input = model.input,outputs=out)
		if full_freeze != 'N':
			for layer in model.layers[0:freeze_layers]:
				layer.trainable = False
		return model_final

	# VGG16 Model for transfer Learning 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:18,代码来源:TransferLearning_reg.py

示例8: resnet_pseudo

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def resnet_pseudo(self,dim=224,freeze_layers=10,full_freeze='N'):
        model = ResNet50(weights='imagenet',include_top=False)
        x = model.output
        x = GlobalAveragePooling2D()(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        x = Dense(512, activation='relu')(x)
        x = Dropout(0.5)(x)
        out = Dense(5,activation='softmax')(x)
        model_final = Model(input = model.input,outputs=out)
        if full_freeze != 'N':
            for layer in model.layers[0:freeze_layers]:
                layer.trainable = False
        return model_final

# VGG16 Model for transfer Learning 
开发者ID:PacktPublishing,项目名称:Intelligent-Projects-Using-Python,代码行数:18,代码来源:TransferLearning_ffd.py

示例9: _model_backbone_headless

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def _model_backbone_headless(self):
        if self.config.backbone_nn_type == 'vgg':
            model = VGG16(weights='imagenet', include_top=False)
            # 畳み込み層の後のプーリング層を除く
            # https://github.com/keras-team/keras/issues/2371
            # https://github.com/keras-team/keras/issues/6229
            # http://forums.fast.ai/t/how-to-finetune-with-new-keras-api/2328/9
            model.layers.pop()
        else:
            model = ResNet50(weights='imagenet', include_top=False)
        # VGGの重みは学習対象外
        for layer in model.layers:
            layer.trainable = False
        output = model.layers[-1].output
        _input = model.input
        return _input, output 
开发者ID:shtamura,项目名称:maskrcnn,代码行数:18,代码来源:frcnn.py

示例10: fTrainInner

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def fTrainInner(X_train, y_train, X_test, y_test, sOutPath, patchSize, batchSize=None, learningRate=None,iEpochs=None):
    # parse inputs
    batchSize = 64 if batchSize is None else batchSize
    learningRate = 0.01 if learningRate is None else learningRate
    iEpochs = 300 if iEpochs is None else iEpochs

    print('Training(pre) CNN (VGGNet)')
    print('with lr = ' + str(i) + ' , batchSize = ' + str(j))

    # build model
    base = VGG16(include_top=False, weights=None, input_shape=(1, 180, 180))

    top_model = Sequential()
    top_model.add(Flatten(input_shape=base.output_shape[1:]))
    top_model.add(Dense(11, activation='softmax'))
    # top_model.load_weights('fc_model.h5')
    model = base.add(top_model)

    # save names
	_, sPath = os.path.splitdrive(sOutPath) 
开发者ID:thomaskuestner,项目名称:CNNArt,代码行数:22,代码来源:multiclass_VGGNet.py

示例11: deprocess_image

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + K.epsilon())
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:20,代码来源:conv_filter_visualization.py

示例12: deprocess_image

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def deprocess_image(x):
    # normalize tensor: center on 0., ensure std is 0.1
    x -= x.mean()
    x /= (x.std() + 1e-5)
    x *= 0.1

    # clip to [0, 1]
    x += 0.5
    x = np.clip(x, 0, 1)

    # convert to RGB array
    x *= 255
    if K.image_data_format() == 'channels_first':
        x = x.transpose((1, 2, 0))
    x = np.clip(x, 0, 255).astype('uint8')
    return x

# build the VGG16 network with ImageNet weights 
开发者ID:xjtushilei,项目名称:pCVR,代码行数:20,代码来源:conv_filter_visualization.py

示例13: __init__

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def __init__(self):
        self.input_img = Input(name='input_img',
                               shape=(None, None, cfg.num_channels),
                               dtype='float32')
        vgg16 = VGG16(input_tensor=self.input_img,
                      weights='imagenet',
                      include_top=False)
        if cfg.locked_layers:
            # locked first two conv layers
            locked_layers = [vgg16.get_layer('block1_conv1'),
                             vgg16.get_layer('block1_conv2')]
            for layer in locked_layers:
                layer.trainable = False
        self.f = [vgg16.get_layer('block%d_pool' % i).output
                  for i in cfg.feature_layers_range]
        self.f.insert(0, None)
        self.diff = cfg.feature_layers_range[0] - cfg.feature_layers_num 
开发者ID:huoyijie,项目名称:AdvancedEAST,代码行数:19,代码来源:network.py

示例14: create_model

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def create_model(input_shape: tuple, nb_classes: int, init_with_imagenet: bool = False, learning_rate: float = 0.01):
    weights = None
    if init_with_imagenet:
        weights = "imagenet"

    model = VGG16(input_shape=input_shape,
                  classes=nb_classes,
                  weights=weights,
                  include_top=False)
    # "Shallow" VGG for Cifar10
    x = model.get_layer('block3_pool').output
    x = layers.Flatten(name='Flatten')(x)
    x = layers.Dense(512, activation='relu')(x)
    x = layers.Dense(nb_classes)(x)
    x = layers.Softmax()(x)
    model = models.Model(model.input, x)

    loss = losses.categorical_crossentropy
    optimizer = optimizers.SGD(lr=learning_rate, decay=0.99)

    model.compile(optimizer, loss, metrics=["accuracy"])
    return model 
开发者ID:gaborvecsei,项目名称:Federated-Learning-Mini-Framework,代码行数:24,代码来源:models.py

示例15: load_model

# 需要导入模块: from keras.applications import vgg16 [as 别名]
# 或者: from keras.applications.vgg16 import VGG16 [as 别名]
def load_model(self, config_file_path, weight_file_path):

        config = np.load(config_file_path).item()
        self.num_input_tokens = config['num_input_tokens']
        self.nb_classes = config['nb_classes']
        self.labels = config['labels']
        self.expected_frames = config['expected_frames']
        self.vgg16_include_top = config['vgg16_include_top']
        self.labels_idx2word = dict([(idx, word) for word, idx in self.labels.items()])

        self.model = self.create_model()
        self.model.load_weights(weight_file_path)

        vgg16_model = VGG16(include_top=self.vgg16_include_top, weights='imagenet')
        vgg16_model.compile(optimizer=SGD(), loss='categorical_crossentropy', metrics=['accuracy'])
        self.vgg16_model = vgg16_model 
开发者ID:chen0040,项目名称:keras-video-classifier,代码行数:18,代码来源:recurrent_networks.py


注:本文中的keras.applications.vgg16.VGG16属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。