本文整理匯總了Python中tensorflow.keras.layers.Conv1D方法的典型用法代碼示例。如果您正苦於以下問題:Python layers.Conv1D方法的具體用法?Python layers.Conv1D怎麽用?Python layers.Conv1D使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.layers
的用法示例。
在下文中一共展示了layers.Conv1D方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _create_encoder
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def _create_encoder(self, n_layers, dropout):
"""Create the encoder as a tf.keras.Model."""
input = self._create_features()
gather_indices = Input(shape=(2,), dtype=tf.int32)
prev_layer = input
for i in range(len(self._filter_sizes)):
filter_size = self._filter_sizes[i]
kernel_size = self._kernel_sizes[i]
if dropout > 0.0:
prev_layer = Dropout(rate=dropout)(prev_layer)
prev_layer = Conv1D(
filters=filter_size, kernel_size=kernel_size,
activation=tf.nn.relu)(prev_layer)
prev_layer = Flatten()(prev_layer)
prev_layer = Dense(
self._decoder_dimension, activation=tf.nn.relu)(prev_layer)
prev_layer = BatchNormalization()(prev_layer)
return tf.keras.Model(inputs=[input, gather_indices], outputs=prev_layer)
示例2: __init__
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def __init__(self,
n_symbols: int, # This argument is required!
filters: int = 32 # There's no way to change this
# from the commandline - see `my_simple_model_with_hparams.py`
) -> None:
super().__init__(n_symbols)
self.input_embedding = Embedding(n_symbols, 10)
self.conv1d = Conv1D(filters=filters, kernel_size=7, strides=1, padding='same')
示例3: define_model
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def define_model(self):
inputs = tf.keras.Input(shape=(n_inputs, 1), name='input')
# 64 filters, 10 kernel size
x = Conv1D(64, 10, activation='relu')(inputs)
x = MaxPool1D()(x)
x = BatchNormalization()(x)
x = Conv1D(128, 10, activation='relu')(x)
x = MaxPool1D()(x)
x = BatchNormalization()(x)
x = Conv1D(128, 10, activation='relu')(x)
x = MaxPool1D()(x)
x = BatchNormalization()(x)
x = Conv1D(256, 10, activation='relu')(x)
x = MaxPool1D()(x)
x = BatchNormalization()(x)
x = Flatten()(x)
x = Dense(1024, activation='relu', name='dense_1')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
x = Dense(2048, activation='relu', name='dense_2')(x)
x = BatchNormalization()(x)
x = Dropout(dropout)(x)
outputs = Dense(n_classes, activation='softmax', name='predictions')(x)
self.cnn_model = tf.keras.Model(inputs=inputs, outputs=outputs)
optimizer = tf.keras.optimizers.Adam(lr=learning_rate)
accuracy = CategoricalAccuracy()
self.cnn_model.compile(optimizer=optimizer, loss='categorical_crossentropy',
metrics=[accuracy])
示例4: __init__
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def __init__(self,
filters: int,
kernel_size: int,
dilation_rate: int,
dropout_rate: float,
activation: str,
**kwargs):
super(ResidualBlock, self).__init__(**kwargs)
self.filters = filters
self.causal_conv_1 = layers.Conv1D(filters=self.filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding='causal')
self.weight_norm_1 = layers.LayerNormalization()
self.dropout_1 = layers.SpatialDropout1D(rate=dropout_rate)
self.activation_1 = layers.Activation(activation)
self.causal_conv_2 = layers.Conv1D(filters=self.filters,
kernel_size=kernel_size,
dilation_rate=dilation_rate,
padding='causal')
self.weight_norm_2 = layers.LayerNormalization()
self.dropout_2 = layers.SpatialDropout1D(rate=dropout_rate)
self.activation_2 = layers.Activation(activation)
self.activation_3 = layers.Activation(activation)
示例5: build
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def build(self, input_shape):
in_channels = input_shape[-1]
if in_channels == self.filters:
self.skip_conv = None
else:
self.skip_conv = layers.Conv1D(filters=self.filters,
kernel_size=1)
super(ResidualBlock, self).build(input_shape)
示例6: _build_graph
# 需要導入模塊: from tensorflow.keras import layers [as 別名]
# 或者: from tensorflow.keras.layers import Conv1D [as 別名]
def _build_graph(self):
"""Build the model."""
smiles_seqs = Input(dtype=tf.int32, shape=(self.max_seq_len,), name='Input')
rnn_input = tf.keras.layers.Embedding(
input_dim=len(self.char_to_idx),
output_dim=self.embedding_dim)(smiles_seqs)
if self.use_conv:
rnn_input = Conv1D(
filters=self.filters,
kernel_size=self.kernel_size,
strides=self.strides,
activation=tf.nn.relu,
name='Conv1D')(rnn_input)
rnn_embeddings = rnn_input
for idx, rnn_type in enumerate(self.rnn_types[:-1]):
rnn_layer = RNN_DICT[rnn_type]
layer = rnn_layer(units=self.rnn_sizes[idx], return_sequences=True)
if self.use_bidir:
layer = Bidirectional(layer)
rnn_embeddings = layer(rnn_embeddings)
# Last layer sequences not returned.
layer = RNN_DICT[self.rnn_types[-1]](units=self.rnn_sizes[-1])
if self.use_bidir:
layer = Bidirectional(layer)
rnn_embeddings = layer(rnn_embeddings)
if self.mode == "classification":
logits = Dense(self.n_tasks * self.n_classes)(rnn_embeddings)
logits = Reshape((self.n_tasks, self.n_classes))(logits)
if self.n_classes == 2:
output = Activation(activation='sigmoid')(logits)
loss = SigmoidCrossEntropy()
else:
output = Softmax()(logits)
loss = SoftmaxCrossEntropy()
outputs = [output, logits]
output_types = ['prediction', 'loss']
else:
output = Dense(self.n_tasks * 1, name='Dense')(rnn_embeddings)
output = Reshape((self.n_tasks, 1), name='Reshape')(output)
outputs = [output]
output_types = ['prediction']
loss = L2Loss()
model = tf.keras.Model(inputs=[smiles_seqs], outputs=outputs)
return model, loss, output_types