本文整理汇总了Python中tensorflow.python.keras.models.Sequential方法的典型用法代码示例。如果您正苦于以下问题:Python models.Sequential方法的具体用法?Python models.Sequential怎么用?Python models.Sequential使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.keras.models
的用法示例。
在下文中一共展示了models.Sequential方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: keras_estimator
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def keras_estimator(model_dir, config, learning_rate, vocab_size):
"""Creates a Keras Sequential model with layers.
Args:
model_dir: (str) file path where training files will be written.
config: (tf.estimator.RunConfig) Configuration options to save model.
learning_rate: (int) Learning rate.
vocab_size: (int) Size of the vocabulary in number of words.
Returns:
A keras.Model
"""
model = models.Sequential()
model.add(Embedding(vocab_size, 16))
model.add(GlobalAveragePooling1D())
model.add(Dense(16, activation=tf.nn.relu))
model.add(Dense(1, activation=tf.nn.sigmoid))
# Compile model with learning parameters.
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
model.compile(
optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir=model_dir, config=config)
return estimator
示例2: crnn_model
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def crnn_model(width=100, n_vars=6, n_classes=7, conv_kernel_size=5,
conv_filters=3, lstm_units=3):
input_shape = (width, n_vars)
model = Sequential()
model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
padding='valid', activation='relu', input_shape=input_shape))
model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
padding='valid', activation='relu'))
model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
model.add(Dense(n_classes, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
# load the data
示例3: crnn_model
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def crnn_model(width=100, n_vars=6, n_classes=7, conv_kernel_size=5,
conv_filters=2, lstm_units=2):
# create a crnn model with keras with one cnn layers, and one rnn layer
input_shape = (width, n_vars)
model = Sequential()
model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
padding='valid', activation='relu', input_shape=input_shape))
model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
model.add(Dense(n_classes, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
# load the data
示例4: crnn_model
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def crnn_model(width=100, n_vars=6, n_classes=7, conv_kernel_size=5,
conv_filters=3, lstm_units=3):
input_shape = (width, n_vars)
model = Sequential()
model.add(Conv1D(filters=conv_filters, kernel_size=conv_kernel_size,
padding='valid', activation='relu', input_shape=input_shape))
model.add(LSTM(units=lstm_units, dropout=0.1, recurrent_dropout=0.1))
model.add(Dense(n_classes, activation="softmax"))
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy'])
return model
##############################################
# Setup
##############################################
# load the data
示例5: set_last_layer_to_random
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def set_last_layer_to_random(model_trained, model_random):
""" Set all layers with and after layer_name to random """
logging.info("Replacing layers of model with random layers")
layer_names = [x.name for x in model_trained.layers]
layer = layer_names[-1]
# find layers which have to be kept unchanged
id_to_set_random = layer_names.index(layer)
# combine old, trained layers with new random layers
comb_layers = model_trained.layers[0:id_to_set_random]
new_layers = model_random.layers[id_to_set_random:]
comb_layers.extend(new_layers)
# define new model
new_model = Sequential(comb_layers)
# print layers of new model
for layer, i in zip(new_model.layers, range(0, len(new_model.layers))):
logging.info("New model - layer %s: %s" % (i, layer.name))
return new_model
示例6: keras_estimator
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def keras_estimator(model_dir, config, params):
"""Creates a Keras Sequential model with layers.
Mean Squared Error (MSE) is a common loss function used for regression.
A common regression metric is Mean Absolute Error (MAE).
Args:
model_dir: (str) file path where training files will be written.
config: (tf.estimator.RunConfig) Configuration options to save model.
params: (dict)
Returns:
A keras.Model
"""
model = models.Sequential()
model.add(
Dense(64, activation=tf.nn.relu, input_shape=(params['num_features'],)))
model.add(Dense(64, activation=tf.nn.relu))
model.add(Dense(1))
# Compile model with learning parameters.
optimizer = tf.train.RMSPropOptimizer(learning_rate=params['learning_rate'])
model.compile(optimizer=optimizer, loss='mse', metrics=['mae'])
return tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir=model_dir, config=config)
示例7: keras_estimator
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def keras_estimator(model_dir, config, learning_rate):
"""Creates a Keras Sequential model with layers.
Args:
model_dir: (str) file path where training files will be written.
config: (tf.estimator.RunConfig) Configuration options to save model.
learning_rate: (int) Learning rate.
Returns:
A keras.Model
"""
model = models.Sequential()
model.add(Flatten(input_shape=(28, 28)))
model.add(Dense(128, activation=tf.nn.relu))
model.add(Dense(10, activation=tf.nn.softmax))
# Compile model with learning parameters.
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
model.compile(
optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
estimator = tf.keras.estimator.model_to_estimator(
keras_model=model, model_dir=model_dir, config=config)
return estimator
示例8: set_specific_layers_to_random
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def set_specific_layers_to_random(model_trained, model_random, layer):
""" Set all layers with and after layer_name to random """
logging.info("Replacing layers of model with random layers")
layer_names = [x.name for x in model_trained.layers]
# check if target layer is in model
if layer not in layer_names:
logging.error("Layer %s not in model.layers" % layer)
logging.error("Available Layers %s" % layer_names)
raise IOError("Layer %s not in model.layers" % layer)
# find layers which have to be kept unchanged
id_to_set_random = layer_names.index(layer)
# combine old, trained layers with new random layers
comb_layers = model_trained.layers[0:id_to_set_random]
new_layers = model_random.layers[id_to_set_random:]
comb_layers.extend(new_layers)
# define new model
new_model = Sequential(comb_layers)
# print layers of new model
for layer, i in zip(new_model.layers, range(0, len(new_model.layers))):
logging.debug("New model - layer %s: %s" % (i, layer.name))
return new_model
示例9: _assert_valid_model
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def _assert_valid_model(model, custom_objects=None):
is_subclass = (not model._is_graph_network and
not isinstance(model, models.Sequential))
if is_subclass:
try:
custom_objects = custom_objects or {}
with tf.keras.utils.CustomObjectScope(custom_objects):
model.__class__.from_config(model.get_config())
except NotImplementedError:
raise ValueError(
'Subclassed `Model`s passed to `model_to_estimator` must '
'implement `Model.get_config` and `Model.from_config`.')
示例10: build_model
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def build_model():
base_model = VGG16(weights='imagenet')
top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
return Model(inputs=base_model.input, outputs=top_model(base_model.output))
示例11: model
# 需要导入模块: from tensorflow.python.keras import models [as 别名]
# 或者: from tensorflow.python.keras.models import Sequential [as 别名]
def model(train_x, train_y, test_x, test_y, epoch):
'''
:param train_x: train features
:param train_y: train labels
:param test_x: test features
:param test_y: test labels
:param epoch: no. of epochs
:return:
'''
conv_model = Sequential()
# first layer with input shape (img_rows, img_cols, 1) and 12 filters
conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu',
input_shape=(img_rows, img_cols, 1)))
# second layer with 12 filters
conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu'))
# third layer with 12 filers
conv_model.add(Conv2D(12, kernel_size=(3, 3), activation='relu'))
# flatten layer
conv_model.add(Flatten())
# adding a Dense layer
conv_model.add(Dense(100, activation='relu'))
# adding the final Dense layer with softmax
conv_model.add(Dense(num_classes, activation='softmax'))
# compile the model
conv_model.compile(optimizer=keras.optimizers.Adadelta(),
loss='categorical_crossentropy',
metrics=['accuracy'])
print("\n Training the Convolution Neural Network on MNIST data\n")
# fit the model
conv_model.fit(train_x, train_y, batch_size=128, epochs=epoch,
validation_split=0.1, verbose=2)
predicted_train_y = conv_model.predict(train_x)
train_accuracy = (sum(np.argmax(predicted_train_y, axis=1)
== np.argmax(train_y, axis=1))/(float(len(train_y))))
print('Train accuracy : ', train_accuracy)
predicted_test_y = conv_model.predict(test_x)
test_accuracy = (sum(np.argmax(predicted_test_y, axis=1)
== np.argmax(test_y, axis=1))/(float(len(test_y))))
print('Test accuracy : ', test_accuracy)
CNN_accuracy = {'train_accuracy': train_accuracy,
'test_accuracy': test_accuracy, 'epoch': epoch}
return conv_model, CNN_accuracy