當前位置: 首頁>>代碼示例>>Python>>正文


Python layers.Conv1D方法代碼示例

本文整理匯總了Python中tensorflow.keras.layers.Conv1D方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.Conv1D方法的具體用法?Python layers.Conv1D怎麽用?Python layers.Conv1D使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.layers的用法示例。


在下文中一共展示了layers.Conv1D方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _create_encoder

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def _create_encoder(self, n_layers, dropout):
    """Create the encoder as a tf.keras.Model."""
    input = self._create_features()
    gather_indices = Input(shape=(2,), dtype=tf.int32)
    prev_layer = input
    for i in range(len(self._filter_sizes)):
      filter_size = self._filter_sizes[i]
      kernel_size = self._kernel_sizes[i]
      if dropout > 0.0:
        prev_layer = Dropout(rate=dropout)(prev_layer)
      prev_layer = Conv1D(
          filters=filter_size, kernel_size=kernel_size,
          activation=tf.nn.relu)(prev_layer)
    prev_layer = Flatten()(prev_layer)
    prev_layer = Dense(
        self._decoder_dimension, activation=tf.nn.relu)(prev_layer)
    prev_layer = BatchNormalization()(prev_layer)
    return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer) 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:20,代碼來源:seqtoseq.py

示例2: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def __init__(self,
                 n_symbols: int,  # This argument is required!
                 filters: int = 32  # There's no way to change this
                                    # from the commandline - see `my_simple_model_with_hparams.py`
                 ) -> None:
        super().__init__(n_symbols)

        self.input_embedding = Embedding(n_symbols, 10)
        self.conv1d = Conv1D(filters=filters, kernel_size=7, strides=1, padding='same') 
開發者ID:songlab-cal,項目名稱:tape-neurips2019,代碼行數:11,代碼來源:simple_model.py

示例3: define_model

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def define_model(self):

         inputs = tf.keras.Input(shape=(n_inputs, 1), name='input')

         # 64 filters, 10 kernel size
         x = Conv1D(64, 10, activation='relu')(inputs)
         x = MaxPool1D()(x)
         x = BatchNormalization()(x)

         x = Conv1D(128, 10, activation='relu')(x)
         x = MaxPool1D()(x)
         x = BatchNormalization()(x)

         x = Conv1D(128, 10, activation='relu')(x)
         x = MaxPool1D()(x)
         x = BatchNormalization()(x)

         x = Conv1D(256, 10, activation='relu')(x)
         x = MaxPool1D()(x)
         x = BatchNormalization()(x)

         x = Flatten()(x)
         x = Dense(1024, activation='relu', name='dense_1')(x)
         x = BatchNormalization()(x)
         x = Dropout(dropout)(x)

         x = Dense(2048, activation='relu', name='dense_2')(x)
         x = BatchNormalization()(x)
         x = Dropout(dropout)(x)

         outputs = Dense(n_classes, activation='softmax', name='predictions')(x)

         self.cnn_model = tf.keras.Model(inputs=inputs, outputs=outputs)
         optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
         accuracy = CategoricalAccuracy()
         self.cnn_model.compile(optimizer=optimizer, loss='categorical_crossentropy',
                                metrics=[accuracy]) 
開發者ID:hedrox,項目名稱:ecg-classification,代碼行數:39,代碼來源:cnn_tf2.py

示例4: __init__

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def __init__(self,
                 filters: int,
                 kernel_size: int,
                 dilation_rate: int,
                 dropout_rate: float,
                 activation: str,
                 **kwargs):
        super(ResidualBlock, self).__init__(**kwargs)

        self.filters = filters

        self.causal_conv_1 = layers.Conv1D(filters=self.filters,
                                           kernel_size=kernel_size,
                                           dilation_rate=dilation_rate,
                                           padding='causal')
        self.weight_norm_1 = layers.LayerNormalization()
        self.dropout_1 = layers.SpatialDropout1D(rate=dropout_rate)
        self.activation_1 = layers.Activation(activation)

        self.causal_conv_2 = layers.Conv1D(filters=self.filters,
                                           kernel_size=kernel_size,
                                           dilation_rate=dilation_rate,
                                           padding='causal')
        self.weight_norm_2 = layers.LayerNormalization()
        self.dropout_2 = layers.SpatialDropout1D(rate=dropout_rate)
        self.activation_2 = layers.Activation(activation)

        self.activation_3 = layers.Activation(activation) 
開發者ID:1044197988,項目名稱:TF.Keras-Commonly-used-models,代碼行數:30,代碼來源:tcn.py

示例5: build

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def build(self, input_shape):
        in_channels = input_shape[-1]
        if in_channels == self.filters:
            self.skip_conv = None
        else:
            self.skip_conv = layers.Conv1D(filters=self.filters,
                                           kernel_size=1)

        super(ResidualBlock, self).build(input_shape) 
開發者ID:1044197988,項目名稱:TF.Keras-Commonly-used-models,代碼行數:11,代碼來源:tcn.py

示例6: _build_graph

# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def _build_graph(self):
    """Build the model."""
    smiles_seqs = Input(dtype=tf.int32, shape=(self.max_seq_len,), name='Input')
    rnn_input = tf.keras.layers.Embedding(
        input_dim=len(self.char_to_idx),
        output_dim=self.embedding_dim)(smiles_seqs)

    if self.use_conv:
      rnn_input = Conv1D(
          filters=self.filters,
          kernel_size=self.kernel_size,
          strides=self.strides,
          activation=tf.nn.relu,
          name='Conv1D')(rnn_input)

    rnn_embeddings = rnn_input
    for idx, rnn_type in enumerate(self.rnn_types[:-1]):
      rnn_layer = RNN_DICT[rnn_type]
      layer = rnn_layer(units=self.rnn_sizes[idx], return_sequences=True)
      if self.use_bidir:
        layer = Bidirectional(layer)

      rnn_embeddings = layer(rnn_embeddings)

    # Last layer sequences not returned.
    layer = RNN_DICT[self.rnn_types[-1]](units=self.rnn_sizes[-1])
    if self.use_bidir:
      layer = Bidirectional(layer)
    rnn_embeddings = layer(rnn_embeddings)

    if self.mode == "classification":
      logits = Dense(self.n_tasks * self.n_classes)(rnn_embeddings)
      logits = Reshape((self.n_tasks, self.n_classes))(logits)
      if self.n_classes == 2:
        output = Activation(activation='sigmoid')(logits)
        loss = SigmoidCrossEntropy()
      else:
        output = Softmax()(logits)
        loss = SoftmaxCrossEntropy()
      outputs = [output, logits]
      output_types = ['prediction', 'loss']

    else:
      output = Dense(self.n_tasks * 1, name='Dense')(rnn_embeddings)
      output = Reshape((self.n_tasks, 1), name='Reshape')(output)
      outputs = [output]
      output_types = ['prediction']
      loss = L2Loss()

    model = tf.keras.Model(inputs=[smiles_seqs], outputs=outputs)
    return model, loss, output_types 
開發者ID:deepchem,項目名稱:deepchem,代碼行數:53,代碼來源:chemnet_models.py


注:本文中的tensorflow.keras.layers.Conv1D方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。