当前位置: 首页>>代码示例>>Python>>正文


Python vgg19.VGG19属性代码示例

本文整理汇总了Python中keras.applications.vgg19.VGG19属性的典型用法代码示例。如果您正苦于以下问题:Python vgg19.VGG19属性的具体用法?Python vgg19.VGG19怎么用?Python vgg19.VGG19使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在keras.applications.vgg19的用法示例。


在下文中一共展示了vgg19.VGG19属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_img

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def load_img(path_to_img):

  max_dim  = 512
  img      = Image.open(path_to_img)
  img_size = max(img.size)
  scale    = max_dim/img_size
  img      = img.resize((round(img.size[0]*scale), round(img.size[1]*scale)), Image.ANTIALIAS)

  img      = kp_image.img_to_array(img)

  # We need to broadcast the image array such that it has a batch dimension 
  img = np.expand_dims(img, axis=0)

  # preprocess raw images to make it suitable to be used by VGG19 model
  out = tf.keras.applications.vgg19.preprocess_input(img)

  return tf.convert_to_tensor(out) 
开发者ID:Shashi456,项目名称:Neural-Style,代码行数:19,代码来源:train_TensorFlow.py

示例2: VGG_19

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def VGG_19(self, nOutput, input):

        # Define inputs and outputs IDs
        self.ids_inputs = ['input_1']
        self.ids_outputs = ['predictions']

        # Load VGG19 model pre-trained on ImageNet
        self.model = VGG19()

        # Recover input layer
        image = self.model.get_layer(self.ids_inputs[0]).output

        # Recover last layer kept from original model
        out = self.model.get_layer('fc2').output
        out = Dense(nOutput, name=self.ids_outputs[0], activation='softmax')(out)

        self.model = Model(input=image, output=out) 
开发者ID:sheffieldnlp,项目名称:deepQuest,代码行数:19,代码来源:cnn_model-predictor.py

示例3: VGG_19_ImageNet

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def VGG_19_ImageNet(self, nOutput, input):

        # Define inputs and outputs IDs
        self.ids_inputs = ['input_1']
        self.ids_outputs = ['predictions']

        # Load VGG19 model pre-trained on ImageNet
        self.model = VGG19(weights='imagenet', layers_lr=0.001)

        # Recover input layer
        image = self.model.get_layer(self.ids_inputs[0]).output

        # Recover last layer kept from original model
        out = self.model.get_layer('fc2').output
        out = Dense(nOutput, name=self.ids_outputs[0], activation='softmax')(out)

        self.model = Model(input=image, output=out)

    ########################################
    # GoogLeNet implementation from http://dandxy89.github.io/ImageModels/googlenet/
    ######################################## 
开发者ID:sheffieldnlp,项目名称:deepQuest,代码行数:23,代码来源:cnn_model-predictor.py

示例4: _get_base_model

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def _get_base_model(self):
        """
        :return: base model from Keras based on user-supplied model name
        """
        if self.model_name == 'inception_v3':
            return InceptionV3(weights='imagenet', include_top=False)
        elif self.model_name == 'xception':
            return Xception(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg16':
            return VGG16(weights='imagenet', include_top=False)
        elif self.model_name == 'vgg19':
            return VGG19(weights='imagenet', include_top=False)
        elif self.model_name == 'resnet50':
            return ResNet50(weights='imagenet', include_top=False)
        else:
            raise ValueError('Cannot find base model %s' % self.model_name) 
开发者ID:thoughtworksarts,项目名称:EmoPy,代码行数:18,代码来源:neuralnets.py

示例5: extra_feat

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def extra_feat(img_path):
        #Using a VGG19 as feature extractor
        base_model = VGG19(weights='imagenet',include_top=False)
	img = image.load_img(img_path, target_size=(224, 224))
	x = image.img_to_array(img)
	x = np.expand_dims(x, axis=0)
	x = preprocess_input(x)
        block1_pool_features=get_activations(base_model, 3, x)
        block2_pool_features=get_activations(base_model, 6, x)
        block3_pool_features=get_activations(base_model, 10, x)
        block4_pool_features=get_activations(base_model, 14, x)
        block5_pool_features=get_activations(base_model, 18, x)

	x1 = tf.image.resize_images(block1_pool_features[0],[112,112])
	x2 = tf.image.resize_images(block2_pool_features[0],[112,112])
	x3 = tf.image.resize_images(block3_pool_features[0],[112,112])
	x4 = tf.image.resize_images(block4_pool_features[0],[112,112])
	x5 = tf.image.resize_images(block5_pool_features[0],[112,112])
	
	F = tf.concat([x3,x2,x1,x4,x5],3) #Change to only x1, x1+x2,x1+x2+x3..so on, inorder to visualize features from diffetrrnt blocks
        return F 
开发者ID:vbhavank,项目名称:Unstructured-change-detection-using-CNN,代码行数:23,代码来源:feat.py

示例6: model

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def model(self, preprocessed, featurize):
        # Model provided by Keras. All cotributions by Keras are provided subject to the
        # MIT license located at https://github.com/fchollet/keras/blob/master/LICENSE
        # and subject to the below additional copyrights and licenses.
        #
        # Copyright 2014 Oxford University
        #
        # Licensed under the Creative Commons Attribution License CC BY 4.0 ("License").
        # You may obtain a copy of the License at
        #
        #     https://creativecommons.org/licenses/by/4.0/
        #
        return vgg19.VGG19(input_tensor=preprocessed, weights="imagenet",
                           include_top=(not featurize)) 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:16,代码来源:keras_applications.py

示例7: _testKerasModel

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def _testKerasModel(self, include_top):
        return vgg19.VGG19(weights="imagenet", include_top=include_top) 
开发者ID:databricks,项目名称:spark-deep-learning,代码行数:4,代码来源:keras_applications.py

示例8: build_vgg

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def build_vgg():
    vgg_model = VGG19(include_top=False, weights='imagenet')
    vgg_model.trainable = False
    return Model(inputs=vgg_model.input, outputs=vgg_model.get_layer('block3_conv4').output) 
开发者ID:Lvfeifan,项目名称:MBLLEN,代码行数:6,代码来源:Network.py

示例9: compute_loss

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def compute_loss(model, loss_weights, generated_output_activations, gram_style_features, content_features, num_content_layers, num_style_layers):

  generated_content_activations = generated_output_activations[:num_content_layers]
  generated_style_activations   = generated_output_activations[num_content_layers:]

  style_weight, content_weight = loss_weights
  
  style_score = 0
  content_score = 0

  # Accumulate style losses from all layers
  # Here, we equally weight each contribution of each loss layer
  weight_per_style_layer = 1.0 / float(num_style_layers)
  for target_style, comb_style in zip(gram_style_features, generated_style_activations):
    temp = get_style_loss(comb_style[0], target_style)
    style_score += weight_per_style_layer * temp
    
  # Accumulate content losses from all layers 
  weight_per_content_layer = 1.0 / float(num_content_layers)
  for target_content, comb_content in zip(content_features, generated_content_activations):
    temp = get_content_loss(comb_content[0], target_content)
    content_score += weight_per_content_layer* temp

  # Get total loss
  loss = style_weight*style_score + content_weight*content_score 


  return loss, style_score, content_score

############################################################################################################
############################################################################################################
#                                    CREATE STYLE TRANFER
############################################################################################################
############################################################################################################


# Using Keras Load VGG19 model 
开发者ID:Shashi456,项目名称:Neural-Style,代码行数:39,代码来源:train_TensorFlow.py

示例10: get_model

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def get_model(content_layers,style_layers):

  # Load our model. We load pretrained VGG, trained on imagenet data
  vgg19           = VGG19(weights=None, include_top=False)

  # We don't need to (or want to) train any layers of our pre-trained vgg model, so we set it's trainable to false.
  vgg19.trainable = False

  style_model_outputs   =  [vgg19.get_layer(name).output for name in style_layers]
  content_model_outputs =  [vgg19.get_layer(name).output for name in content_layers]
  
  model_outputs = content_model_outputs + style_model_outputs

  # Build model 
  return Model(inputs = vgg19.input, outputs = model_outputs),  vgg19 
开发者ID:Shashi456,项目名称:Neural-Style,代码行数:17,代码来源:train_TensorFlow.py

示例11: vgg_norm

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def vgg_norm():
    img_input = Input(shape=(256, 256, 3))
    x1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)
    x2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x1)
    x3 = AveragePooling2D((2, 2), strides=(2, 2), name='block1_pool')(x2)

    x4 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x3)
    x5 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x4)
    x6 = AveragePooling2D((2, 2), strides=(2, 2), name='block2_pool')(x5)

    x7 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x6)
    x8 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x7)
    x9 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x8)
    x10 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x9)
    x11 = AveragePooling2D((2, 2), strides=(2, 2), name='block3_pool')(x10)

    x12 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x11)
    x13 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x12)
    x14 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x13)
    x15 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x14)
    x16 = AveragePooling2D((2, 2), strides=(2, 2), name='block4_pool')(x15)

    x17 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x16)
    x18 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x17)
    x19 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x18)
    x20 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x19)
    x21 = AveragePooling2D((2, 2), strides=(2, 2), name='block5_pool')(x20)

    model = Model(inputs=[img_input], outputs=[x1, x2, x4, x5, x7, x8, x9, x10, x12, x13, x14, x15])
    model_orig = VGG19(weights='imagenet', input_shape=(256, 256, 3), include_top=False)

    for i in range(len(model.layers)):
        weights = model_orig.layers[i].get_weights()
        model.layers[i].set_weights(weights)

    return model 
开发者ID:balakg,项目名称:posewarp-cvpr2018,代码行数:38,代码来源:truncated_vgg.py

示例12: create_vgg19_network

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def create_vgg19_network(input_shape, weights):
    base_model = VGG19(input_shape=input_shape, weights=weights)
    return Model(inputs=base_model.input, outputs=base_model.get_layer('fc2').output) 
开发者ID:marco-c,项目名称:autowebcompat,代码行数:5,代码来源:network.py

示例13: get_model_pretrain

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def get_model_pretrain(arch):
    modlrate = 1
    if   "VGG16" in arch:       base_model = vgg16.VGG16
    elif "VGG19" in arch:       base_model = vgg19.VGG19
    elif "RESNET50" in arch:    base_model = resnet50.ResNet50
    elif "DENSENET121" in arch: base_model = densenet.DenseNet121
    elif "MOBILENET" in arch:
        base_model = mobilenet.MobileNet
        modlrate = 10
    else: print("model not avaiable"); exit()
    base_model = base_model(weights='imagenet', include_top=False)
    return base_model, modlrate 
开发者ID:mhaut,项目名称:hyperspectral_deeplearning_review,代码行数:14,代码来源:pretrain_imagenet_cnn.py

示例14: extract_VGG19

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def extract_VGG19(tensor):
	from keras.applications.vgg19 import VGG19, preprocess_input
	return VGG19(weights='imagenet', include_top=False).predict(preprocess_input(tensor)) 
开发者ID:kubeflow-kale,项目名称:kale,代码行数:5,代码来源:extract_bottleneck_features.py

示例15: get_model

# 需要导入模块: from keras.applications import vgg19 [as 别名]
# 或者: from keras.applications.vgg19 import VGG19 [as 别名]
def get_model(weights_path=None):
    
    ## [17-june-2018]Use residual after this
    input_tensor = Input(shape=(448,448,3))
    base_model = VGG19(weights='imagenet', include_top=False, input_tensor=input_tensor)
    #base_model.summary()
    for layer in base_model.layers:
        layer.trainable = False
        
    model = Model(input=base_model.input, output=base_model.get_layer('block5_pool').output)  
    #model.summary()
    #model = VGG19(weights_path)
    #model.summary()
    return model 
开发者ID:channelCS,项目名称:Audio-Vision,代码行数:16,代码来源:extract_features.py


注:本文中的keras.applications.vgg19.VGG19属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。