当前位置: 首页>>代码示例>>Python>>正文


Python core.Reshape方法代码示例

本文整理汇总了Python中keras.layers.core.Reshape方法的典型用法代码示例。如果您正苦于以下问题:Python core.Reshape方法的具体用法?Python core.Reshape怎么用?Python core.Reshape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.layers.core的用法示例。


在下文中一共展示了core.Reshape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: textual_embedding

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def textual_embedding(self, language_model, mask_zero):
        """
        Note:
        * mask_zero only makes sense if embedding is learnt
        """
        if self._config.textual_embedding_dim > 0:
            print('Textual Embedding is on')
            language_model.add(Embedding(
                self._config.input_dim, 
                self._config.textual_embedding_dim, 
                mask_zero=mask_zero))
        else:
            print('Textual Embedding is off')
            language_model.add(Reshape(
                input_shape=(self._config.max_input_time_steps, self._config.input_dim),
                dims=(self._config.max_input_time_steps, self._config.input_dim)))
            if mask_zero:
                language_model.add(Masking(0))
        return language_model 
开发者ID:mateuszmalinowski,项目名称:visual_turing_test-tutorial,代码行数:21,代码来源:model_zoo.py

示例2: textual_embedding_fixed_length

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def textual_embedding_fixed_length(self, language_model, mask_zero):
        """
        In contrast to textual_embedding, it produces a fixed length output.
        """
        if self._config.textual_embedding_dim > 0:
            print('Textual Embedding with fixed length is on')
            language_model.add(Embedding(
                self._config.input_dim, 
                self._config.textual_embedding_dim,
                input_length=self._config.max_input_time_steps,
                mask_zero=mask_zero))
        else:
            print('Textual Embedding with fixed length is off')
            language_model.add(Reshape(
                input_shape=(self._config.max_input_time_steps, self._config.input_dim),
                dims=(self._config.max_input_time_steps, self._config.input_dim)))
            if mask_zero:
                language_model.add(Masking(0))
        return language_model 
开发者ID:mateuszmalinowski,项目名称:visual_turing_test-tutorial,代码行数:21,代码来源:model_zoo.py

示例3: build_resnet_generator

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def build_resnet_generator(input_shape, n_filters, n_residual_blocks,
                           seq_len, vocabulary_size):
    inputs = Input(shape=input_shape)

    # Dense 1: 1 x seq_len x n_filters
    x = Dense(1 * seq_len * n_filters, input_shape=input_shape)(inputs)
    x = Reshape((1, seq_len, n_filters))(x)

    # ResNet blocks
    x = resnet_block(x, n_residual_blocks, n_filters)

    # Output layer
    x = Conv2D(filters=vocabulary_size, kernel_size=1, padding='same')(x)
    x = Softmax(axis=3)(x)

    # create model graph
    model = Model(inputs=inputs, outputs=x, name='Generator')

    print("\nGenerator ResNet")
    model.summary()
    return model 
开发者ID:PacktPublishing,项目名称:Hands-On-Generative-Adversarial-Networks-with-Keras,代码行数:23,代码来源:models.py

示例4: build_cifar10_resnet_generator

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def build_cifar10_resnet_generator(input_shape, n_filters, n_residual_blocks,
                                   n_channels):
    """ adapted from wgan-gp """

    inputs = Input(shape=input_shape)

    # Dense 1: 4 x 4 x n_filters
    x = Dense(4 * 4 * n_filters, input_shape=input_shape)(inputs)
    x = Reshape((4, 4, n_filters))(x)

    # ResNet blocks
    x = resnet_block_generator(x, n_residual_blocks, n_filters)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.2)(x)

    # Output layer
    x = Conv2D(filters=n_channels, kernel_size=(3, 3), padding='same')(x)
    x = Activation('tanh')(x)

    # create model graph
    model = Model(inputs=inputs, outputs=x, name='Generator')

    print("\nGenerator ResNet")
    model.summary()
    return model 
开发者ID:PacktPublishing,项目名称:Hands-On-Generative-Adversarial-Networks-with-Keras,代码行数:27,代码来源:resnet.py

示例5: call

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def call(self, inputs):
        rois = inputs[0]
        mrcnn_class = inputs[1]
        mrcnn_bbox = inputs[2]
        image_meta = inputs[3]

        # Run detection refinement graph on each item in the batch
        _, _, window, _ = parse_image_meta_graph(image_meta)
        detections_batch = utils.batch_slice(
            [rois, mrcnn_class, mrcnn_bbox, window],
            lambda x, y, w, z: refine_detections_graph(x, y, w, z, self.config),
            self.config.IMAGES_PER_GPU)

        # Reshape output
        # [batch, num_detections, (y1, x1, y2, x2, class_score)] in pixels
        return tf.reshape(
            detections_batch,
            [self.config.BATCH_SIZE, self.config.DETECTION_MAX_INSTANCES, 6]) 
开发者ID:DeepinSC,项目名称:PyTorch-Luna16,代码行数:20,代码来源:MaskRCNN.py

示例6: build

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def build(nc, w, h,
          loss='categorical_crossentropy',
          optimizer='adam',
          **kwargs):
    data_shape = w * h if None not in (w, h) else -1  # TODO: -1 or None?
    inp = Input(shape=(h, w, 3))
    enet = encoder.build(inp)
    enet = decoder.build(enet, nc=nc)
    name = 'enet_naive_upsampling'

    enet = Reshape((data_shape, nc))(enet)  # TODO: need to remove data_shape for multi-scale training

    enet = Activation('softmax')(enet)
    model = Model(inputs=inp, outputs=enet)

    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy', 'mean_squared_error'])

    return model, name 
开发者ID:PavlosMelissinos,项目名称:enet-keras,代码行数:20,代码来源:model.py

示例7: build

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def build(nc, w, h,
          loss='categorical_crossentropy',
          # optimizer='adadelta'):
          optimizer='adam',
          metrics=None,
          **kwargs):
    data_shape = w * h if None not in (w, h) else -1  # TODO: -1 or None?
    inp = Input(shape=(h, w, 3), name='image')
    enet = encoder.build(inp)
    enet = decoder.build(enet, nc=nc)
    name = 'enet_unpooling'

    # TODO: need to remove data_shape for multi-scale training
    enet = Reshape((data_shape, nc))(enet)

    enet = Activation('softmax', name='output')(enet)
    model = Model(inputs=inp, outputs=enet)

    if metrics is None:
        metrics = ['accuracy']
    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)

    return model, name 
开发者ID:PavlosMelissinos,项目名称:enet-keras,代码行数:25,代码来源:model.py

示例8: create

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def create(self):
        model = Sequential()
        model.add(Reshape(
            input_shape=(self._visual_dim,),
            dims=(self._visual_dim,)))
        return model 
开发者ID:mateuszmalinowski,项目名称:visual_turing_test-tutorial,代码行数:8,代码来源:visual_model_zoo.py

示例9: bbox_3D_net

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def bbox_3D_net(input_shape=(224, 224, 3), vgg_weights=None, freeze_vgg=False, bin_num=6):
    vgg16_model = VGG16(include_top=False, weights=vgg_weights, input_shape=input_shape)

    if freeze_vgg:
        for layer in vgg16_model.layers:
            layer.trainable = False

    x = Flatten()(vgg16_model.output)

    dimension = Dense(512)(x)
    dimension = LeakyReLU(alpha=0.1)(dimension)
    dimension = Dropout(0.5)(dimension)
    dimension = Dense(3)(dimension)
    dimension = LeakyReLU(alpha=0.1, name='dimension')(dimension)

    orientation = Dense(256)(x)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Dropout(0.5)(orientation)
    orientation = Dense(bin_num * 2)(orientation)
    orientation = LeakyReLU(alpha=0.1)(orientation)
    orientation = Reshape((bin_num, -1))(orientation)
    orientation = Lambda(l2_normalize, name='orientation')(orientation)

    confidence = Dense(256)(x)
    confidence = LeakyReLU(alpha=0.1)(confidence)
    confidence = Dropout(0.5)(confidence)
    confidence = Dense(bin_num, activation='softmax', name='confidence')(confidence)

    model = Model(vgg16_model.input, outputs=[dimension, orientation, confidence])
    return model 
开发者ID:cersar,项目名称:3D_detection,代码行数:32,代码来源:bbox_3D_net.py

示例10: test_reshape

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def test_reshape(self):
        layer = core.Reshape(10, 10)
        self._runner(layer) 
开发者ID:lllcho,项目名称:CAPTCHA-breaking,代码行数:5,代码来源:test_core.py

示例11: get_model

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def get_model(self, num_classes, activation='sigmoid'):
        max_len = opt.max_len
        voca_size = opt.unigram_hash_size + 1

        with tf.device('/gpu:0'):
            embd = Embedding(voca_size,
                             opt.embd_size,
                             name='uni_embd')

            t_uni = Input((max_len,), name="input_1")
            t_uni_embd = embd(t_uni)  # token

            w_uni = Input((max_len,), name="input_2")
            w_uni_mat = Reshape((max_len, 1))(w_uni)  # weight

            uni_embd_mat = dot([t_uni_embd, w_uni_mat], axes=1)
            uni_embd = Reshape((opt.embd_size, ))(uni_embd_mat)

            embd_out = Dropout(rate=0.5)(uni_embd)
            relu = Activation('relu', name='relu1')(embd_out)
            outputs = Dense(num_classes, activation=activation)(relu)
            model = Model(inputs=[t_uni, w_uni], outputs=outputs)
            optm = keras.optimizers.Nadam(opt.lr)
            model.compile(loss='binary_crossentropy',
                        optimizer=optm,
                        metrics=[top1_acc])
            model.summary(print_fn=lambda x: self.logger.info(x))
        return model 
开发者ID:kakao-arena,项目名称:shopping-classification,代码行数:30,代码来源:network.py

示例12: basic_mlp

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def basic_mlp(img_vec_dim, vocabulary_size, word_emb_dim,
              max_ques_length, num_hidden_units_lstm, 
              num_hidden_layers_mlp, num_hidden_units_mlp,
              dropout, nb_classes, class_activation):
    # Image model
    model_image = Sequential()
    model_image.add(Reshape((img_vec_dim,), input_shape=(img_vec_dim,)))

    # Language Model
    model_language = Sequential()
    model_language.add(Embedding(vocabulary_size, word_emb_dim, input_length=max_ques_length))
    model_language.add(LSTM(num_hidden_units_lstm, return_sequences=True, input_shape=(max_ques_length, word_emb_dim)))
    model_language.add(LSTM(num_hidden_units_lstm, return_sequences=True))
    model_language.add(LSTM(num_hidden_units_lstm, return_sequences=False))

    # combined model
    model = Sequential()
    model.add(Merge([model_language, model_image], mode='concat', concat_axis=1))


    for i in xrange(num_hidden_layers_mlp):
        model.add(Dense(num_hidden_units_mlp))
        model.add(Dropout(dropout))

    model.add(Dense(nb_classes))
    model.add(Activation(class_activation))

    return model 
开发者ID:channelCS,项目名称:Audio-Vision,代码行数:30,代码来源:my_models.py

示例13: visual_lstm

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def visual_lstm(img_vec_dim, activation_1,activation_2, dropout, vocabulary_size,
                num_hidden_units_lstm, max_ques_length,
                word_emb_dim, num_hidden_layers_mlp,
                num_hidden_units_mlp, nb_classes, class_activation,embedding_matrix):
    
    # Make image model
    inpx1=Input(shape=(img_vec_dim,))
    x1=Dense(embedding_matrix.shape[1], activation='tanh')(inpx1)
    x1=Reshape((1,embedding_matrix.shape[1]))(x1)
    image_model = Model([inpx1],x1)
    image_model.summary()
    
    # Make language Model
    inpx0=Input(shape=(max_ques_length,))
    x0=Embedding(vocabulary_size, word_emb_dim, weights=[embedding_matrix], trainable=False)(inpx0)
    x2=Dense(embedding_matrix.shape[1],activation='tanh')(x0)
    x2=Dropout(dropout)(x2)

    # Make embedding_model
    embedding_model = Model([inpx0],x2)
    embedding_model.summary()
    
    # Make combined model
    model = Sequential()
    model.add(Merge([image_model,embedding_model],mode = 'concat', concat_axis=1))
    model.add(LSTM(num_hidden_units_lstm, return_sequences=False, go_backwards=True))
    model.add(Dense(num_hidden_units_mlp))
    model.add(Activation('relu'))
    model.add(Dropout(dropout))
    
    model.summary()
    model.add(Dense(nb_classes))
    model.add(Activation(class_activation))

    return model 
开发者ID:channelCS,项目名称:Audio-Vision,代码行数:37,代码来源:my_models.py

示例14: visual_lstm2

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def visual_lstm2(img_vec_dim, activation_1,activation_2, dropout, vocabulary_size,
                num_hidden_units_lstm, max_ques_length,
                word_emb_dim, num_hidden_layers_mlp,
                num_hidden_units_mlp, nb_classes, class_activation,embedding_matrix):
    
    # Make image model
    inpx1=Input(shape=(img_vec_dim,))
    x1=Dense(embedding_matrix.shape[1], activation=activation_1)(inpx1)
    x1=Reshape((1,embedding_matrix.shape[1]))(x1)
    image_model = Model([inpx1],x1)
    image_model.summary()
    
    # Make language Model
    inpx0=Input(shape=(max_ques_length,))
    x0=Embedding(vocabulary_size, word_emb_dim, weights=[embedding_matrix], trainable=False)(inpx0)
    x2=Dense(embedding_matrix.shape[1],activation=activation_2)(x0)
    x2=Dropout(dropout)(x2)

    # Make embedding_model
    embedding_model = Model([inpx0],x2)
    embedding_model.summary()
    
    inpx2=Input(shape=(img_vec_dim,))
    x1=Dense(embedding_matrix.shape[1], activation=activation_1)(inpx1)
    x3=Reshape((1,embedding_matrix.shape[1]))(x1)
    image_model2 = Model([inpx2],x3)
    image_model2.summary()
    
    # Make combined model
    model = Sequential()
    model.add(Merge([image_model,embedding_model, image_model2],mode = 'concat', concat_axis=1))
    model.add(Bidirectional(LSTM(num_hidden_units_lstm, return_sequences=False)))
    model.add(Dense(num_hidden_units_mlp))
    model.add(Activation(activation_1))
    model.add(Dropout(dropout))
    
    model.summary()
    model.add(Dense(nb_classes))
    model.add(Activation(class_activation))

    return model 
开发者ID:channelCS,项目名称:Audio-Vision,代码行数:43,代码来源:my_models.py

示例15: dense_model

# 需要导入模块: from keras.layers import core [as 别名]
# 或者: from keras.layers.core import Reshape [as 别名]
def dense_model(patch_size, num_classes):
    merged_inputs = Input(shape=patch_size + (4,), name='merged_inputs')
    flair = Reshape(patch_size + (1,))(
        Lambda(
            lambda l: l[:, :, :, :, 0],
            output_shape=patch_size + (1,))(merged_inputs),
    )
    t2 = Reshape(patch_size + (1,))(
        Lambda(lambda l: l[:, :, :, :, 1], output_shape=patch_size + (1,))(merged_inputs)
    )
    t1 = Lambda(lambda l: l[:, :, :, :, 2:], output_shape=patch_size + (2,))(merged_inputs)

    flair = dense_net(flair)
    t2 = dense_net(t2)
    t1 = dense_net(t1)

    t2 = concatenate([flair, t2])

    t1 = concatenate([t2, t1])

    tumor = Conv3D(2, kernel_size=1, strides=1, name='tumor')(flair)
    core = Conv3D(3, kernel_size=1, strides=1, name='core')(t2)
    enhancing = Conv3D(num_classes, kernel_size=1, strides=1, name='enhancing')(t1)
    net = Model(inputs=merged_inputs, outputs=[tumor, core, enhancing])

    return net 
开发者ID:lelechen63,项目名称:MRI-tumor-segmentation-Brats,代码行数:28,代码来源:test.py


注:本文中的keras.layers.core.Reshape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。