当前位置: 首页>>代码示例>>Python>>正文


Python layers.Dropout方法代码示例

本文整理汇总了Python中tensorflow.python.keras.layers.Dropout方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Dropout方法的具体用法?Python layers.Dropout怎么用?Python layers.Dropout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.keras.layers的用法示例。


在下文中一共展示了layers.Dropout方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def build(self, input_layer):
        last_layer = input_layer
        input_shape = K.int_shape(input_layer)

        if self.with_embedding:
            if input_shape[-1] != 1:
                raise ValueError("Only one feature (the index) can be used with embeddings, "
                                 "i.e. the input shape should be (num_samples, length, 1). "
                                 "The actual shape was: " + str(input_shape))

            last_layer = Lambda(lambda x: K.squeeze(x, axis=-1),
                                output_shape=K.int_shape(last_layer)[:-1])(last_layer)  # Remove feature dimension.
            last_layer = Embedding(self.embedding_size, self.embedding_dimension,
                                   input_length=input_shape[-2])(last_layer)

        for _ in range(self.num_layers):
            last_layer = Dense(self.num_units, activation=self.activation)(last_layer)
            if self.with_bn:
                last_layer = BatchNormalization()(last_layer)
            if not np.isclose(self.p_dropout, 0):
                last_layer = Dropout(self.p_dropout)(last_layer)
        return last_layer 
开发者ID:d909b,项目名称:cxplain,代码行数:24,代码来源:rnn.py

示例2: build

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def build(self, input_shapes):

        if self.feature_less:
            input_dim = int(input_shapes[0][-1])
        else:
            assert len(input_shapes) == 2
            features_shape = input_shapes[0]

            input_dim = int(features_shape[-1])

        self.kernel = self.add_weight(shape=(input_dim,
                                             self.units),
                                      initializer=glorot_uniform(
                                          seed=self.seed),
                                      regularizer=l2(self.l2_reg),
                                      name='kernel', )
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units,),
                                        initializer=Zeros(),
                                        name='bias', )

        self.dropout = Dropout(self.dropout_rate, seed=self.seed)

        self.built = True 
开发者ID:shenweichen,项目名称:GraphNeuralNetwork,代码行数:26,代码来源:gcn.py

示例3: __conv_block

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def __conv_block(ip, nb_filter, bottleneck=False, dropout_rate=None, weight_decay=1e-4):
    ''' Apply BatchNorm, Relu, 3x3 Conv2D, optional bottleneck block and dropout
    Args:
        ip: Input keras tensor
        nb_filter: number of filters
        bottleneck: add bottleneck block
        dropout_rate: dropout rate
        weight_decay: weight decay factor
    Returns: keras tensor with batch_norm, relu and convolution2d added (optional bottleneck)
    '''
    concat_axis = 1 if K.image_data_format() == 'channels_first' else -1

    x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(ip)
    x = Activation('relu')(x)

    if bottleneck:
        inter_channel = nb_filter * 4  # Obtained from https://github.com/liuzhuang13/DenseNet/blob/master/densenet.lua

        x = Conv2D(inter_channel, (1, 1), kernel_initializer='he_normal', padding='same', use_bias=False,
                   kernel_regularizer=l2(weight_decay))(x)
        x = BatchNormalization(axis=concat_axis, epsilon=1.1e-5)(x)
        x = Activation('relu')(x)

    x = Conv2D(nb_filter, (3, 3), kernel_initializer='he_normal', padding='same', use_bias=False)(x)
    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x 
开发者ID:OlafenwaMoses,项目名称:ImageAI,代码行数:30,代码来源:densenet.py

示例4: _build_dropout

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def _build_dropout(rate, noise_shape=None, seed=None, **kwargs):
    return layers.Dropout(rate, noise_shape=noise_shape, seed=seed, **kwargs) 
开发者ID:FederatedAI,项目名称:FATE,代码行数:4,代码来源:baisc.py

示例5: build

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def build(self, input_layer):
        last_layer = input_layer
        for _ in range(self.num_layers):
            last_layer = Dense(self.num_units, activation=self.activation)(last_layer)
            if self.with_bn:
                last_layer = BatchNormalization()(last_layer)
            if not np.isclose(self.p_dropout, 0):
                last_layer = Dropout(self.p_dropout)(last_layer)
        return last_layer 
开发者ID:d909b,项目名称:cxplain,代码行数:11,代码来源:mlp.py

示例6: architecture

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def architecture(inputs):
    """ Architecture of model """

    conv1 = Conv2D(32, kernel_size=(3, 3),
                   activation='relu')(inputs)
    max1 = MaxPooling2D(pool_size=(2, 2))(conv1)
    conv2 = Conv2D(32, (3, 3), activation='relu')(max1)
    max2 = MaxPooling2D(pool_size=(2, 2))(conv2)
    conv3 = Conv2D(64, (3, 3), activation='relu')(max2)
    max3 = MaxPooling2D(pool_size=(2, 2))(conv3)
    flat1 = Flatten()(max3)
    dense1 = Dense(64, activation='relu')(flat1)
    drop1 = Dropout(0.5)(dense1)

    return drop1 
开发者ID:marco-willi,项目名称:camera-trap-classifier,代码行数:17,代码来源:small_cnn.py

示例7: build

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def build(self, input_shapes):

        self.neigh_weights = self.add_weight(shape=(self.input_dim, self.units),
                                             initializer=glorot_uniform(
                                                 seed=self.seed),
                                             regularizer=l2(self.l2_reg),
                                             name="neigh_weights")
        if self.use_bias:
            self.bias = self.add_weight(shape=(self.units), initializer=Zeros(),
                                        name='bias_weight')

        self.dropout = Dropout(self.dropout_rate)
        self.built = True 
开发者ID:shenweichen,项目名称:GraphNeuralNetwork,代码行数:15,代码来源:graphsage.py

示例8: build

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def build(self, input_shape):

        X, A = input_shape
        embedding_size = int(X[-1])
        self.weight = self.add_weight(name='weight', shape=[embedding_size, self.att_embedding_size * self.head_num],
                                      dtype=tf.float32,
                                      regularizer=l2(self.l2_reg),
                                      initializer=tf.keras.initializers.glorot_uniform())
        self.att_self_weight = self.add_weight(name='att_self_weight',
                                               shape=[1, self.head_num,
                                                      self.att_embedding_size],
                                               dtype=tf.float32,
                                               regularizer=l2(self.l2_reg),
                                               initializer=tf.keras.initializers.glorot_uniform())
        self.att_neighs_weight = self.add_weight(name='att_neighs_weight',
                                                 shape=[1, self.head_num,
                                                        self.att_embedding_size],
                                                 dtype=tf.float32,
                                                 regularizer=l2(self.l2_reg),
                                                 initializer=tf.keras.initializers.glorot_uniform())

        if self.use_bias:
            self.bias_weight = self.add_weight(name='bias', shape=[1, self.head_num, self.att_embedding_size],
                                               dtype=tf.float32,
                                               initializer=Zeros())
        self.in_dropout = Dropout(self.dropout_rate)
        self.feat_dropout = Dropout(self.dropout_rate, )
        self.att_dropout = Dropout(self.dropout_rate, )
        # Be sure to call this somewhere!
        super(GATLayer, self).build(input_shape) 
开发者ID:shenweichen,项目名称:GraphNeuralNetwork,代码行数:32,代码来源:gat.py

示例9: __init__

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def __init__(self, game, encoder):
        """
        NNet model, copied from Othello NNet, with reduced fully connected layers fc1 and fc2 and reduced nnet_args.num_channels
        :param game: game configuration
        :param encoder: Encoder, used to encode game boards
        """
        from rts.src.config_class import CONFIG

        # game params
        self.board_x, self.board_y, num_encoders = game.getBoardSize()
        self.action_size = game.getActionSize()

        """
        num_encoders = CONFIG.nnet_args.encoder.num_encoders
        """
        num_encoders = encoder.num_encoders

        # Neural Net
        self.input_boards = Input(shape=(self.board_x, self.board_y, num_encoders))  # s: batch_size x board_x x board_y x num_encoders

        x_image = Reshape((self.board_x, self.board_y, num_encoders))(self.input_boards)  # batch_size  x board_x x board_y x num_encoders
        h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(x_image)))  # batch_size  x board_x x board_y x num_channels
        h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(h_conv1)))  # batch_size  x board_x x board_y x num_channels
        h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv2)))  # batch_size  x (board_x-2) x (board_y-2) x num_channels
        h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv3)))  # batch_size  x (board_x-4) x (board_y-4) x num_channels
        h_conv4_flat = Flatten()(h_conv4)
        s_fc1 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(256, use_bias=False)(h_conv4_flat))))  # batch_size x 1024
        s_fc2 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(128, use_bias=False)(s_fc1))))  # batch_size x 1024
        self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2)  # batch_size x self.action_size
        self.v = Dense(1, activation='tanh', name='v')(s_fc2)  # batch_size x 1

        self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v])
        self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(CONFIG.nnet_args.lr)) 
开发者ID:suragnair,项目名称:alpha-zero-general,代码行数:35,代码来源:RTSNNet.py

示例10: _build_model

# 需要导入模块: from tensorflow.python.keras import layers [as 别名]
# 或者: from tensorflow.python.keras.layers import Dropout [as 别名]
def _build_model(self, input_shape):
        x = Input(shape=(32, 32, 3))
        y = x
        y = Convolution2D(
            filters=64,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = Convolution2D(
            filters=64,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)

        y = Convolution2D(
            filters=128,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = Convolution2D(
            filters=128,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)

        y = Convolution2D(
            filters=256,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = Convolution2D(
            filters=256,
            kernel_size=3,
            strides=1,
            padding="same",
            activation="relu",
            kernel_initializer="he_normal")(y)
        y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)

        y = Flatten()(y)
        y = Dropout(self.config.get("dropout", 0.5))(y)
        y = Dense(
            units=10, activation="softmax", kernel_initializer="he_normal")(y)

        model = Model(inputs=x, outputs=y, name="model1")
        return model 
开发者ID:ray-project,项目名称:ray,代码行数:60,代码来源:pbt_tune_cifar10_with_keras.py


注:本文中的tensorflow.python.keras.layers.Dropout方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。