本文整理汇总了Python中tensorflow.python.keras.Model方法的典型用法代码示例。如果您正苦于以下问题:Python keras.Model方法的具体用法?Python keras.Model怎么用?Python keras.Model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.keras
的用法示例。
在下文中一共展示了keras.Model方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_keras_model
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def load_keras_model(model_dir, max_seq_len):
from tensorflow.python import keras
from bert import BertModelLayer
from bert.loader import StockBertConfig, load_stock_weights, params_from_pretrained_ckpt
bert_config_file = os.path.join(model_dir, "bert_config.json")
bert_ckpt_file = os.path.join(model_dir, "bert_model.ckpt")
l_bert = BertModelLayer.from_params(params_from_pretrained_ckpt(model_dir))
l_input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="input_ids")
l_token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="token_type_ids")
output = l_bert([l_input_ids, l_token_type_ids])
model = keras.Model(inputs=[l_input_ids, l_token_type_ids], outputs=output)
model.build(input_shape=[(None, max_seq_len),
(None, max_seq_len)])
load_stock_weights(l_bert, bert_ckpt_file)
return model
示例2: simple_subclassed_model
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def simple_subclassed_model():
class SimpleModel(keras.Model):
def __init__(self):
super(SimpleModel, self).__init__()
self.dense1 = keras.layers.Dense(16, activation='relu')
self.dp = keras.layers.Dropout(0.1)
self.dense2 = keras.layers.Dense(_NUM_CLASS, activation='softmax')
def call(self, inputs):
x = self.dense1(inputs)
x = self.dp(x)
return self.dense2(x)
def get_config(self):
return {}
@classmethod
def from_config(cls, config):
return cls()
return SimpleModel()
示例3: multi_inputs_multi_outputs_model
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def multi_inputs_multi_outputs_model():
input_a = keras.layers.Input(shape=(16,), name='input_a')
input_b = keras.layers.Input(shape=(16,), name='input_b')
input_m = keras.layers.Input(shape=(8,), dtype='string', name='input_m')
dense = keras.layers.Dense(8, name='dense_1')
interm_a = dense(input_a)
# Read m
interm_m = keras.layers.Lambda(gen_parsing_ops.string_to_number)(input_m)
interm_s = keras.layers.Lambda(lambda k: k[0] * k[1])([interm_m, interm_a])
interm_b = dense(input_b)
merged = keras.layers.concatenate([interm_s, interm_b], name='merge')
output_c = keras.layers.Dense(3, activation='softmax', name='dense_2')(merged)
output_d = keras.layers.Dense(2, activation='softmax', name='dense_3')(merged)
model = keras.models.Model(
inputs=[input_a, input_b, input_m], outputs=[output_c, output_d])
model.compile(
loss='categorical_crossentropy',
optimizer='rmsprop',
metrics={
'dense_2': 'categorical_accuracy',
'dense_3': 'categorical_accuracy'
})
return model
示例4: create_bert_model
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def create_bert_model(self, max_seq_len=18):
bert_params = bert.loader.params_from_pretrained_ckpt(self.bert_ckpt_dir)
l_bert = bert.BertModelLayer.from_params(bert_params, name="bert")
input_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="input_ids")
token_type_ids = keras.layers.Input(shape=(max_seq_len,), dtype='int32', name="token_type_ids")
output = l_bert([input_ids, token_type_ids])
model = keras.Model(inputs=[input_ids, token_type_ids], outputs=output)
return model, l_bert, (input_ids, token_type_ids)
示例5: _define_model
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def _define_model(output_layer=-1):
'''Define a pre-trained MobileNet model.
Args:
output_layer: the number of layer that output.
Returns:
Class of keras model with weights.
'''
base_model = MobileNet(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
output = base_model.layers[output_layer].output
output = GlobalAveragePooling2D()(output)
model = Model(inputs=base_model.input, outputs=output)
return model
示例6: __init__
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def __init__(self, game, encoder):
"""
NNet model, copied from Othello NNet, with reduced fully connected layers fc1 and fc2 and reduced nnet_args.num_channels
:param game: game configuration
:param encoder: Encoder, used to encode game boards
"""
from rts.src.config_class import CONFIG
# game params
self.board_x, self.board_y, num_encoders = game.getBoardSize()
self.action_size = game.getActionSize()
"""
num_encoders = CONFIG.nnet_args.encoder.num_encoders
"""
num_encoders = encoder.num_encoders
# Neural Net
self.input_boards = Input(shape=(self.board_x, self.board_y, num_encoders)) # s: batch_size x board_x x board_y x num_encoders
x_image = Reshape((self.board_x, self.board_y, num_encoders))(self.input_boards) # batch_size x board_x x board_y x num_encoders
h_conv1 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(x_image))) # batch_size x board_x x board_y x num_channels
h_conv2 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='same', use_bias=False)(h_conv1))) # batch_size x board_x x board_y x num_channels
h_conv3 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv2))) # batch_size x (board_x-2) x (board_y-2) x num_channels
h_conv4 = Activation('relu')(BatchNormalization(axis=3)(Conv2D(CONFIG.nnet_args.num_channels, 3, padding='valid', use_bias=False)(h_conv3))) # batch_size x (board_x-4) x (board_y-4) x num_channels
h_conv4_flat = Flatten()(h_conv4)
s_fc1 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(256, use_bias=False)(h_conv4_flat)))) # batch_size x 1024
s_fc2 = Dropout(CONFIG.nnet_args.dropout)(Activation('relu')(BatchNormalization(axis=1)(Dense(128, use_bias=False)(s_fc1)))) # batch_size x 1024
self.pi = Dense(self.action_size, activation='softmax', name='pi')(s_fc2) # batch_size x self.action_size
self.v = Dense(1, activation='tanh', name='v')(s_fc2) # batch_size x 1
self.model = Model(inputs=self.input_boards, outputs=[self.pi, self.v])
self.model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=Adam(CONFIG.nnet_args.lr))
示例7: test_train_premade_linear_model_with_dense_features
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def test_train_premade_linear_model_with_dense_features(self):
vocab_list = ['alpha', 'beta', 'gamma']
vocab_val = [0.4, 0.6, 0.9]
data = np.random.choice(vocab_list, size=256)
y = np.zeros_like(data, dtype=np.float32)
for vocab, val in zip(vocab_list, vocab_val):
indices = np.where(data == vocab)
y[indices] = val + np.random.uniform(
low=-0.01, high=0.01, size=indices[0].shape)
cat_column = tf.feature_column.categorical_column_with_vocabulary_list(
key='symbol', vocabulary_list=vocab_list)
ind_column = tf.feature_column.indicator_column(cat_column)
keras_input = keras.layers.Input(
name='symbol', shape=3, dtype=tf.dtypes.string)
feature_layer = tf.compat.v1.keras.layers.DenseFeatures([ind_column])
h = feature_layer({'symbol': keras_input})
linear_model = linear.LinearModel(units=1)
h = linear_model(h)
model = keras.Model(inputs=keras_input, outputs=h)
opt = gradient_descent.SGD(0.1)
model.compile(opt, 'mse', ['mse'])
train_input_fn = numpy_io.numpy_input_fn(
x={'symbol': data}, y=y, num_epochs=20, shuffle=False)
eval_input_fn = numpy_io.numpy_input_fn(
x={'symbol': data}, y=y, num_epochs=20, shuffle=False)
est = keras_lib.model_to_estimator(
keras_model=model, config=self._config, checkpoint_format='saver')
before_eval_results = est.evaluate(input_fn=eval_input_fn, steps=1)
est.train(input_fn=train_input_fn, steps=30)
after_eval_results = est.evaluate(input_fn=eval_input_fn, steps=1)
self.assertLess(after_eval_results['loss'], before_eval_results['loss'])
self.assertLess(after_eval_results['loss'], 0.05)
示例8: simple_functional_model
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def simple_functional_model(activation='relu'):
a = keras.layers.Input(shape=_INPUT_SIZE, name='input_layer')
b = keras.layers.Dense(16, activation=activation)(a)
b = keras.layers.Dropout(0.1)(b)
b = keras.layers.Dense(_NUM_CLASS, activation='softmax')(b)
model = keras.models.Model(inputs=[a], outputs=[b])
return model
示例9: test_train_with_dense_features
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def test_train_with_dense_features(self):
feature_dict = {
'sex': np.int64([1, 1, 1, 1, 0]),
'cp': np.int64([0, 3, 3, 2, 1]),
'slope': np.int64([3, 2, 0, 3, 1]),
}
label = np.int64([0, 1, 0, 0, 0])
train_input_fn = numpy_io.numpy_input_fn(
x=feature_dict, y=label, num_epochs=1, shuffle=False)
feature_columns = list()
input_features = dict()
for feature_name, data_array in feature_dict.items():
feature_columns.append(
tf.feature_column.indicator_column(
tf.feature_column.categorical_column_with_identity(
key=feature_name,
num_buckets=np.size(np.unique(data_array)))))
input_features[feature_name] = keras.layers.Input(
name=feature_name,
shape=(np.size(np.unique(data_array)),),
dtype=tf.dtypes.int64)
x = tf.compat.v1.keras.layers.DenseFeatures(feature_columns)(input_features)
x = keras.layers.Dense(16, activation='relu')(x)
logits = keras.layers.Dense(1, activation='linear')(x)
model = keras.Model(inputs=input_features, outputs=logits)
model.compile(
optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
estimator_model = keras_lib.model_to_estimator(keras_model=model)
estimator_model.train(input_fn=train_input_fn, steps=5)
# TODO(b/139845232): Enable after TF2 nightly's start.
示例10: DISABLED_test_train_with_dense_features_v2
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def DISABLED_test_train_with_dense_features_v2(self):
feature_dict = {
'sex': np.int64([1, 1, 1, 1, 0]),
'cp': np.int64([0, 3, 3, 2, 1]),
'slope': np.int64([3, 2, 0, 3, 1]),
}
label = np.int64([0, 1, 0, 0, 0])
train_input_fn = numpy_io.numpy_input_fn(
x=feature_dict, y=label, num_epochs=1, shuffle=False)
feature_columns = list()
input_features = dict()
for feature_name, data_array in feature_dict.items():
feature_columns.append(
tf.feature_column.embedding_column(
tf.feature_column.categorical_column_with_identity(
key=feature_name, num_buckets=np.size(np.unique(data_array))),
dimension=3))
input_features[feature_name] = keras.layers.Input(
name=feature_name,
shape=(np.size(np.unique(data_array)),),
dtype=tf.dtypes.int64)
df = tf.compat.v2.keras.layers.DenseFeatures(feature_columns)
x = df(input_features)
x = keras.layers.Dense(16, activation='relu')(x)
logits = keras.layers.Dense(1, activation='linear')(x)
model = keras.Model(inputs=input_features, outputs=logits)
model.compile(
optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
estimator_model = keras_lib.model_to_estimator(keras_model=model)
estimator_model.train(input_fn=train_input_fn, steps=5)
# We assert that we find the embedding_weights variables in the dependencies
# for the DenseFeatures layer.
dependency_names = [x.name for x in df._checkpoint_dependencies]
self.assertNotIn('embedding_weights', dependency_names)
self.assertIn('cp_embedding/embedding_weights', dependency_names)
self.assertIn('sex_embedding/embedding_weights', dependency_names)
self.assertIn('slope_embedding/embedding_weights', dependency_names)
示例11: test_sample_weights
# 需要导入模块: from tensorflow.python import keras [as 别名]
# 或者: from tensorflow.python.keras import Model [as 别名]
def test_sample_weights(self):
# Create simple pass-through model
input_layer = keras.layers.Input(shape=1, name='input_layer')
keras_model = keras.Model(inputs=input_layer, outputs=input_layer)
keras_model.compile(loss='mean_absolute_error', optimizer='adam')
features = [[0.], [0], [1], [1]]
sample_weights = [0, .4, 1, 1]
targets = [[0], [1], [0], [1]]
expected_loss = keras_model.test_on_batch(
tf.constant(features), tf.constant(targets),
tf.constant(sample_weights))
def input_fn():
dataset = tf.compat.v1.data.Dataset.from_tensors(({
'features': features,
'sample_weights': sample_weights
}, targets))
return dataset
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, model_dir=tempfile.mkdtemp(dir=self._base_dir))
eval_results = est_keras.evaluate(input_fn, steps=1)
self.assertAllClose(expected_loss, eval_results['loss'])
# Test multiple with outputs and sample weights.
keras_model = keras.Model(
inputs=input_layer, outputs=[input_layer, input_layer])
keras_model.compile(loss='mean_absolute_error', optimizer='adam')
expected_loss = keras_model.test_on_batch(
tf.constant(features),
[tf.constant(targets), tf.constant(targets)],
[tf.constant(sample_weights),
tf.constant(sample_weights)])[0]
def input_fn_multiple_targets():
dataset = tf.compat.v1.data.Dataset.from_tensors(
(features, sample_weights, targets))
dataset = dataset.map(lambda x, y, z: ({
'features': x,
'sample_weights': (y, y)
}, (z, z)))
return dataset
est_keras = keras_lib.model_to_estimator(
keras_model=keras_model, model_dir=tempfile.mkdtemp(dir=self._base_dir))
eval_results = est_keras.evaluate(input_fn_multiple_targets, steps=1)
self.assertAllClose(expected_loss, eval_results['loss'])