当前位置: 首页>>代码示例>>Python>>正文


Python layers.Dropout方法代码示例

本文整理汇总了Python中tensorflow.keras.layers.Dropout方法的典型用法代码示例。如果您正苦于以下问题:Python layers.Dropout方法的具体用法?Python layers.Dropout怎么用?Python layers.Dropout使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.Dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: build

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def build(self, input_shape):
    self.W_list = []
    self.b_list = []
    self.dropouts = []
    init = initializers.get(self.init)
    prev_layer_size = self.n_graph_feat
    for layer_size in self.layer_sizes:
      self.W_list.append(init([prev_layer_size, layer_size]))
      self.b_list.append(backend.zeros(shape=[
          layer_size,
      ]))
      if self.dropout is not None and self.dropout > 0.0:
        self.dropouts.append(Dropout(rate=self.dropout))
      else:
        self.dropouts.append(None)
      prev_layer_size = layer_size
    self.W_list.append(init([prev_layer_size, self.n_outputs]))
    self.b_list.append(backend.zeros(shape=[
        self.n_outputs,
    ]))
    if self.dropout is not None and self.dropout > 0.0:
      self.dropouts.append(Dropout(rate=self.dropout))
    else:
      self.dropouts.append(None)
    self.built = True 
开发者ID:deepchem,项目名称:deepchem,代码行数:27,代码来源:layers.py

示例2: _create_encoder

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def _create_encoder(self, n_layers, dropout):
    """Create the encoder as a tf.keras.Model."""
    input = self._create_features()
    gather_indices = Input(shape=(2,), dtype=tf.int32)
    prev_layer = input
    for i in range(len(self._filter_sizes)):
      filter_size = self._filter_sizes[i]
      kernel_size = self._kernel_sizes[i]
      if dropout > 0.0:
        prev_layer = Dropout(rate=dropout)(prev_layer)
      prev_layer = Conv1D(
          filters=filter_size, kernel_size=kernel_size,
          activation=tf.nn.relu)(prev_layer)
    prev_layer = Flatten()(prev_layer)
    prev_layer = Dense(
        self._decoder_dimension, activation=tf.nn.relu)(prev_layer)
    prev_layer = BatchNormalization()(prev_layer)
    return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer) 
开发者ID:deepchem,项目名称:deepchem,代码行数:20,代码来源:seqtoseq.py

示例3: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 g,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_layers,
                 activation,
                 dropout):
        super(GCN, self).__init__()
        self.g = g
        self.layer_list = []
        # input layer
        self.layer_list.append(GraphConv(in_feats, n_hidden, activation=activation))
        # hidden layers
        for i in range(n_layers - 1):
            self.layer_list.append(GraphConv(n_hidden, n_hidden, activation=activation))
        # output layer
        self.layer_list.append(GraphConv(n_hidden, n_classes))
        self.dropout = layers.Dropout(dropout) 
开发者ID:dmlc,项目名称:dgl,代码行数:21,代码来源:gcn.py

示例4: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 g,
                 in_feats,
                 n_hidden,
                 n_classes,
                 n_layers,
                 activation,
                 dropout):
        super(GCN, self).__init__()
        self.g = g
        self.layers =[]
        # input layer
        self.layers.append(GraphConv(in_feats, n_hidden, activation=activation))
        # hidden layers
        for i in range(n_layers - 1):
            self.layers.append(GraphConv(n_hidden, n_hidden, activation=activation))
        # output layer
        self.layers.append(GraphConv(n_hidden, n_classes))
        self.dropout = layers.Dropout(dropout) 
开发者ID:dmlc,项目名称:dgl,代码行数:21,代码来源:gcn.py

示例5: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 in_feats,
                 out_feats,
                 aggregator_type,
                 feat_drop=0.,
                 bias=True,
                 norm=None,
                 activation=None):
        super(SAGEConv, self).__init__()

        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._aggre_type = aggregator_type
        self.norm = norm
        self.feat_drop = layers.Dropout(feat_drop)
        self.activation = activation
        # aggregator type: mean/pool/lstm/gcn
        if aggregator_type == 'pool':
            self.fc_pool = layers.Dense(self._in_src_feats)
        if aggregator_type == 'lstm':
            self.lstm = layers.LSTM(units=self._in_src_feats)
        if aggregator_type != 'gcn':
            self.fc_self = layers.Dense(out_feats, use_bias=bias)
        self.fc_neigh = layers.Dense(out_feats, use_bias=bias) 
开发者ID:dmlc,项目名称:dgl,代码行数:26,代码来源:sageconv.py

示例6: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 data_format="channels_last",
                 **kwargs):
        super(DeepLabv3FinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        self.data_format = data_format
        mid_channels = in_channels // bottleneck_factor

        self.conv1 = conv3x3_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv1")
        self.dropout = nn.Dropout(
            rate=0.1,
            name="dropout")
        self.conv2 = conv1x1(
            in_channels=mid_channels,
            out_channels=out_channels,
            use_bias=True,
            data_format=data_format,
            name="conv2") 
开发者ID:osmr,项目名称:imgclsmob,代码行数:27,代码来源:deeplabv3.py

示例7: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 in_channels,
                 out_channels,
                 dropout_rate,
                 data_format="channels_last",
                 **kwargs):
        super(DenseSimpleUnit, self).__init__(**kwargs)
        self.data_format = data_format
        self.use_dropout = (dropout_rate != 0.0)
        inc_channels = out_channels - in_channels

        self.conv = pre_conv3x3_block(
            in_channels=in_channels,
            out_channels=inc_channels,
            data_format=data_format,
            name="conv")
        if self.use_dropout:
            self.dropout = nn.Dropout(
                rate=dropout_rate,
                name="dropout") 
开发者ID:osmr,项目名称:imgclsmob,代码行数:22,代码来源:densenet_cifar.py

示例8: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 in_channels,
                 upscale_out_size,
                 bottleneck_factor,
                 data_format="channels_last",
                 **kwargs):
        super(PSPBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        mid_channels = in_channels // bottleneck_factor

        self.pool = PyramidPooling(
            in_channels=in_channels,
            upscale_out_size=upscale_out_size,
            data_format=data_format,
            name="pool")
        self.conv = conv3x3_block(
            in_channels=4096,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv")
        self.dropout = nn.Dropout(
            rate=0.1,
            name="dropout") 
开发者ID:osmr,项目名称:imgclsmob,代码行数:25,代码来源:icnet.py

示例9: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 data_format="channels_last",
                 **kwargs):
        super(PSPFinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        self.data_format = data_format
        mid_channels = in_channels // bottleneck_factor

        self.conv1 = conv3x3_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv1")
        self.dropout = nn.Dropout(
            rate=0.1,
            name="dropout")
        self.conv2 = conv1x1(
            in_channels=mid_channels,
            out_channels=out_channels,
            use_bias=True,
            data_format=data_format,
            name="conv2") 
开发者ID:osmr,项目名称:imgclsmob,代码行数:27,代码来源:pspnet.py

示例10: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 in_channels,
                 out_channels,
                 bottleneck_factor=4,
                 data_format="channels_last",
                 **kwargs):
        super(FCNFinalBlock, self).__init__(**kwargs)
        assert (in_channels % bottleneck_factor == 0)
        self.data_format = data_format
        mid_channels = in_channels // bottleneck_factor

        self.conv1 = conv3x3_block(
            in_channels=in_channels,
            out_channels=mid_channels,
            data_format=data_format,
            name="conv1")
        self.dropout = nn.Dropout(
            rate=0.1,
            name="dropout")
        self.conv2 = conv1x1(
            in_channels=mid_channels,
            out_channels=out_channels,
            use_bias=True,
            data_format=data_format,
            name="conv2") 
开发者ID:osmr,项目名称:imgclsmob,代码行数:27,代码来源:fcn8sd.py

示例11: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self, n_symbols: int, dropout: float = 0, use_pfam_alphabet: bool = True):
        super().__init__()

        self._use_pfam_alphabet = use_pfam_alphabet

        if use_pfam_alphabet:
            self.embed = Embedding(n_symbols, n_symbols)
        else:
            n_symbols = 21
            self.embed = Embedding(n_symbols + 1, n_symbols)

        self.dropout = Dropout(dropout)
        self.rnn = Stack([
            LSTM(1024, return_sequences=True, use_bias=True,
                 implementation=2, recurrent_activation='sigmoid'),
            LSTM(1024, return_sequences=True, use_bias=True,
                 implementation=2, recurrent_activation='sigmoid')])

        self.compute_logits = Dense(n_symbols, use_bias=True, activation='linear') 
开发者ID:songlab-cal,项目名称:tape-neurips2019,代码行数:21,代码来源:BeplerModel.py

示例12: __init__

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def __init__(self,
                 n_symbols: int,
                 n_units: int = 1024,
                 n_layers: int = 3,
                 dropout: Optional[float] = 0.1) -> None:
        super().__init__(n_symbols)

        if dropout is None:
            dropout = 0

        self.embedding = Embedding(n_symbols, 128)

        self.forward_lstm = Stack([
            LSTM(n_units,
                 return_sequences=True) for _ in range(n_layers)],
            name='forward_lstm')

        self.reverse_lstm = Stack([
            LSTM(n_units,
                 return_sequences=True) for _ in range(n_layers)],
            name='reverse_lstm')

        self.dropout = Dropout(dropout) 
开发者ID:songlab-cal,项目名称:tape-neurips2019,代码行数:25,代码来源:BidirectionalLSTM.py

示例13: get_model

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def get_model(args):
    model = models.Sequential()
    model.add(
        layers.Conv2D(args.conv1_size, (3, 3), activation=args.conv_activation, input_shape=(28, 28, 1)))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(args.conv2_size, (3, 3), activation=args.conv_activation))
    model.add(layers.MaxPooling2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation=args.conv_activation))
    model.add(layers.Dropout(args.dropout))
    model.add(layers.Flatten())
    model.add(layers.Dense(args.hidden1_size, activation=args.dense_activation))
    model.add(layers.Dense(10, activation='softmax'))

    model.summary()

    model.compile(optimizer=OPTIMIZERS[args.optimizer](learning_rate=args.learning_rate),
                  loss=args.loss,
                  metrics=['accuracy'])

    return model 
开发者ID:polyaxon,项目名称:polyaxon-examples,代码行数:22,代码来源:run.py

示例14: load

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def load(input_shape, output_shape, cfg):
    nb_lstm_states = int(cfg['nb_lstm_states'])


    inputs = KL.Input(shape=input_shape)
    x = KL.CuDNNLSTM(units=nb_lstm_states, unit_forget_bias=True)(inputs)

    x = KL.Dense(512)(x)
    x = KL.Activation('relu')(x)
    x = KL.Dropout(0.2)(x)

    x = KL.Dense(256)(x)
    x = KL.Activation('relu')(x)
    x = KL.Dropout(0.3)(x)

    mu = KL.Dense(1)(x)
    std = KL.Dense(1)(x)
    activation_fn = get_activation_function_by_name(cfg['activation_function'])
    std = KL.Activation(activation_fn, name="exponential_activation")(std)

    output = KL.Concatenate(axis=-1)([std, mu])
    model = KM.Model(inputs=[inputs], outputs=[output])

    return model 
开发者ID:johnmartinsson,项目名称:blood-glucose-prediction,代码行数:26,代码来源:lstm_experiment_keras.py

示例15: test_load_persist

# 需要导入模块: from tensorflow.keras import layers [as 别名]
# 或者: from tensorflow.keras.layers import Dropout [as 别名]
def test_load_persist(self):
        # define the model.
        model = Sequential()
        model.add(Dense(16, input_shape=(10,)))
        model.add(Dropout(0.5))
        model.add(Dense(10, activation='softmax'))
        model.compile(optimizer='adam', loss='categorical_crossentropy')

        # fetch activations.
        x = np.ones((2, 10))
        activations = get_activations(model, x)

        # persist the activations to the disk.
        output = 'activations.json'
        persist_to_json_file(activations, output)

        # read them from the disk.
        activations2 = load_activations_from_json_file(output)

        for a1, a2 in zip(list(activations.values()), list(activations2.values())):
            np.testing.assert_almost_equal(a1, a2) 
开发者ID:philipperemy,项目名称:keract,代码行数:23,代码来源:persist_load_test.py


注:本文中的tensorflow.keras.layers.Dropout方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。