當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.UpSampling3D方法代碼示例

本文整理匯總了Python中keras.layers.UpSampling3D方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.UpSampling3D方法的具體用法?Python layers.UpSampling3D怎麽用?Python layers.UpSampling3D使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.layers的用法示例。


在下文中一共展示了layers.UpSampling3D方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: test_keras_import

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import UpSampling3D [as 別名]
def test_keras_import(self):
        # Upsample 1D
        model = Sequential()
        model.add(UpSampling1D(size=2, input_shape=(16, 1)))
        model.build()
        self.keras_param_test(model, 0, 2)
        # Upsample 2D
        model = Sequential()
        model.add(UpSampling2D(size=(2, 2), input_shape=(16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 3)
        # Upsample 3D
        model = Sequential()
        model.add(UpSampling3D(size=(2, 2, 2), input_shape=(16, 16, 16, 1)))
        model.build()
        self.keras_param_test(model, 0, 4)


# ********** Pooling Layers ********** 
開發者ID:Cloud-CV,項目名稱:Fabrik,代碼行數:21,代碼來源:test_views.py

示例2: get_up_convolution

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import UpSampling3D [as 別名]
def get_up_convolution(n_filters, pool_size, kernel_size=(2, 2, 2), strides=(2, 2, 2),
                       deconvolution=False):
    if deconvolution:
        return Deconvolution3D(filters=n_filters, kernel_size=kernel_size,
                               strides=strides)
    else:
        return UpSampling3D(size=pool_size) 
開發者ID:ellisdg,項目名稱:3DUnetCNN,代碼行數:9,代碼來源:unet.py

示例3: nn_architecture_seg_3d

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import UpSampling3D [as 別名]
def nn_architecture_seg_3d(input_shape, pool_size=(2, 2, 2), n_labels=1, initial_learning_rate=0.00001,
                        depth=3, n_base_filters=16, metrics=dice_coefficient, batch_normalization=True):
    inputs = Input(input_shape)
    current_layer = inputs
    levels = list()

    for layer_depth in range(depth):
        layer1 = create_convolution_block(input_layer=current_layer, n_filters=n_base_filters * (2**layer_depth),
                                          batch_normalization=batch_normalization)
        layer2 = create_convolution_block(input_layer=layer1, n_filters=n_base_filters * (2**layer_depth) * 2,
                                          batch_normalization=batch_normalization)
        if layer_depth < depth - 1:
            current_layer = MaxPooling3D(pool_size=pool_size)(layer2)
            levels.append([layer1, layer2, current_layer])
        else:
            current_layer = layer2
            levels.append([layer1, layer2])

    for layer_depth in range(depth - 2, -1, -1):
        up_convolution = UpSampling3D(size=pool_size)
        concat = concatenate([up_convolution, levels[layer_depth][1]], axis=1)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=concat, batch_normalization=batch_normalization)
        current_layer = create_convolution_block(n_filters=levels[layer_depth][1]._keras_shape[1],
                                                 input_layer=current_layer,
                                                 batch_normalization=batch_normalization)

    final_convolution = Conv3D(n_labels, (1, 1, 1))(current_layer)
    act = Activation('sigmoid')(final_convolution)
    model = Model(inputs=inputs, outputs=act)

    if not isinstance(metrics, list):
        metrics = [metrics]

    model.compile(optimizer=Adam(lr=initial_learning_rate), loss=dice_coefficient_loss, metrics=metrics)
    return model 
開發者ID:neuropoly,項目名稱:spinalcordtoolbox,代碼行數:38,代碼來源:cnn_models_3d.py

示例4: model_simple_upsampling__reshape

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import UpSampling3D [as 別名]
def model_simple_upsampling__reshape(img_shape, class_n=None):

    from keras.layers import Input, Dense, Convolution3D, MaxPooling3D, UpSampling3D, Reshape, Flatten
    from keras.models import Sequential, Model
    from keras.layers.core import Activation
    from aitom.classify.deep.unsupervised.autoencoder.seg_util import conv_block

    NUM_CHANNELS=1
    input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS)

    # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term
    input_img = Input(shape=input_shape[1:])
    x = input_img

    x = conv_block(x, 32, 3, 3, 3)
    x = MaxPooling3D((2, 2, 2), border_mode='same')(x)
    x = conv_block(x, 32, 3, 3, 3)
    x = MaxPooling3D((2, 2, 2), border_mode='same')(x)

    x = conv_block(x, 32, 3, 3, 3)

    x = UpSampling3D((2, 2, 2))(x)
    x = conv_block(x, 32, 3, 3, 3)

    x = UpSampling3D((2, 2, 2))(x)
    x = conv_block(x, 32, 3, 3, 3)

    x = Convolution3D(class_n, 1, 1, 1, border_mode='same')(x)
    x = Reshape((N.prod(img_shape), class_n))(x)
    x = Activation('softmax')(x)

    model = Model(input=input_img, output=x)

    print('model layers:')
    for l in model.layers:    print (l.output_shape, l.name)

    return model 
開發者ID:xulabs,項目名稱:aitom,代碼行數:39,代碼來源:seg_src.py

示例5: create_up_sampling_module

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import UpSampling3D [as 別名]
def create_up_sampling_module(input_layer, n_filters, size=(2, 2, 2)):
    up_sample = UpSampling3D(size=size)(input_layer)
    convolution = create_convolution_block(up_sample, n_filters)
    return convolution 
開發者ID:ellisdg,項目名稱:3DUnetCNN,代碼行數:6,代碼來源:isensee2017.py

示例6: test_keras_export

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import UpSampling3D [as 別名]
def test_keras_export(self):
        tests = open(os.path.join(settings.BASE_DIR, 'tests', 'unit', 'keras_app',
                                  'keras_export_test.json'), 'r')
        response = json.load(tests)
        tests.close()
        net = yaml.safe_load(json.dumps(response['net']))
        net = {'l0': net['Input'], 'l1': net['Input2'], 'l2': net['Input4'], 'l3': net['Upsample']}
        # Conv 1D
        net['l1']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l1']
        net['l3']['params']['layer_type'] = '1D'
        inp = data(net['l1'], '', 'l1')['l1']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling1D')
        # Conv 2D
        net['l0']['connection']['output'].append('l0')
        net['l3']['connection']['input'] = ['l0']
        net['l3']['params']['layer_type'] = '2D'
        inp = data(net['l0'], '', 'l0')['l0']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling2D')
        # Conv 3D
        net['l2']['connection']['output'].append('l3')
        net['l3']['connection']['input'] = ['l2']
        net['l3']['params']['layer_type'] = '3D'
        inp = data(net['l2'], '', 'l2')['l2']
        temp = upsample(net['l3'], [inp], 'l3')
        model = Model(inp, temp['l3'])
        self.assertEqual(model.layers[1].__class__.__name__, 'UpSampling3D') 
開發者ID:Cloud-CV,項目名稱:Fabrik,代碼行數:33,代碼來源:test_views.py

示例7: auto_classifier_model

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import UpSampling3D [as 別名]
def auto_classifier_model(img_shape, encoding_dim=128, NUM_CHANNELS=1, num_of_class=2):

    input_shape = (None, img_shape[0], img_shape[1], img_shape[2], NUM_CHANNELS)
    mask_shape = (None, num_of_class)

    # use relu activation for hidden layer to guarantee non-negative outputs are passed to the max pooling layer. In such case, as long as the output layer is linear activation, the network can still accomodate negative image intendities, just matter of shift back using the bias term
    input_img = Input(shape=input_shape[1:])
    mask = Input(shape=mask_shape[1:])
    x = input_img

    x = conv_block(x, 32, 3, 3, 3)
    x = MaxPooling3D((2, 2, 2), padding ='same')(x)

    x = conv_block(x, 32, 3, 3, 3)
    x = MaxPooling3D((2, 2, 2), padding ='same')(x)

    encoder_conv_shape = [_.value for _ in  x.get_shape()]          # x.get_shape() returns a list of tensorflow.python.framework.tensor_shape.Dimension objects
    x = Flatten()(x)
    encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x)
    encoder = Model(inputs=input_img, outputs=encoded)

    x = BatchNormalization()(x)
    x = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.l1(10e-5))(x)
    x = Dense(128, activation = 'relu')(x)
    x = Dense(num_of_class, activation = 'softmax')(x)
    
    prob = x
    # classifier output
    classifier = Model(inputs=input_img, outputs=prob)

    input_img_decoder = Input(shape=encoder.output_shape[1:])
    x = input_img_decoder
    x = Dense(np.prod(encoder_conv_shape[1:]), activation='relu')(x)
    x = Reshape(encoder_conv_shape[1:])(x)

    x = UpSampling3D((2, 2, 2))(x)
    x = conv_block(x, 32, 3, 3, 3)

    x = UpSampling3D((2, 2, 2))(x)
    x = conv_block(x, 32, 3, 3, 3)
    x = Convolution3D(1, (3, 3, 3), activation='linear', padding ='same')(x)

    decoded = x
    # autoencoder output
    decoder = Model(inputs=input_img_decoder, outputs=decoded)

    
    autoencoder = Sequential()
    for l in encoder.layers:    
        autoencoder.add(l)
    last = None
    for l in decoder.layers:
        last = l    
        autoencoder.add(l)

    decoded = autoencoder(input_img)


    auto_classifier = Model(inputs=input_img, outputs=[decoded, prob])
    auto_classifier.summary()
    return auto_classifier 
開發者ID:xulabs,項目名稱:aitom,代碼行數:63,代碼來源:auto_classifier_model.py

示例8: _build

# 需要導入模塊: from keras import layers [as 別名]
# 或者: from keras.layers import UpSampling3D [as 別名]
def _build(self):
        # get parameters
        proj = self.proj_params
        proj_axis = axes_dict(self.config.axes)[proj.axis]

        # define surface projection network (3D -> 2D)
        inp = u = Input(self.config.unet_input_shape)
        def conv_layers(u):
            for _ in range(proj.n_conv_per_depth):
                u = Conv3D(proj.n_filt, proj.kern, padding='same', activation='relu')(u)
            return u
        # down
        for _ in range(proj.n_depth):
            u = conv_layers(u)
            u = MaxPooling3D(proj.pool)(u)
        # middle
        u = conv_layers(u)
        # up
        for _ in range(proj.n_depth):
            u = UpSampling3D(proj.pool)(u)
            u = conv_layers(u)
        u = Conv3D(1, proj.kern, padding='same', activation='linear')(u)
        # convert learned features along Z to surface probabilities
        # (add 1 to proj_axis because of batch dimension in tensorflow)
        u = Lambda(lambda x: softmax(x, axis=1+proj_axis))(u)
        # multiply Z probabilities with Z values in input stack
        u = Multiply()([inp, u])
        # perform surface projection by summing over weighted Z values
        u = Lambda(lambda x: K.sum(x, axis=1+proj_axis))(u)
        model_projection = Model(inp, u)

        # define denoising network (2D -> 2D)
        # (remove projected axis from input_shape)
        input_shape = list(self.config.unet_input_shape)
        del input_shape[proj_axis]
        model_denoising = nets.common_unet(
            n_dim           = self.config.n_dim-1,
            n_channel_out   = self.config.n_channel_out,
            prob_out        = self.config.probabilistic,
            residual        = self.config.unet_residual,
            n_depth         = self.config.unet_n_depth,
            kern_size       = self.config.unet_kern_size,
            n_first         = self.config.unet_n_first,
            last_activation = self.config.unet_last_activation,
        )(tuple(input_shape))

        # chain models together
        return Model(inp, model_denoising(model_projection(inp))) 
開發者ID:CSBDeep,項目名稱:CSBDeep,代碼行數:50,代碼來源:care_projection.py


注:本文中的keras.layers.UpSampling3D方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。