本文整理匯總了Python中keras.engine.training.Model方法的典型用法代碼示例。如果您正苦於以下問題:Python training.Model方法的具體用法?Python training.Model怎麽用?Python training.Model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.engine.training
的用法示例。
在下文中一共展示了training.Model方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_trainable_argument
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def test_trainable_argument():
x = np.random.random((5, 3))
y = np.random.random((5, 2))
model = Sequential()
model.add(Dense(2, input_dim=3, trainable=False))
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
assert_allclose(out, out_2)
# test with nesting
inputs = Input(shape=(3,))
outputs = model(inputs)
model = Model(inputs, outputs)
model.compile('rmsprop', 'mse')
out = model.predict(x)
model.train_on_batch(x, y)
out_2 = model.predict(x)
assert_allclose(out, out_2)
示例2: create_policy_value_net
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def create_policy_value_net(self):
"""create the policy value network """
in_x = network = Input((4, self.board_width, self.board_height))
# conv layers
network = Conv2D(filters=32, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
network = Conv2D(filters=64, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
network = Conv2D(filters=128, kernel_size=(3, 3), padding="same", data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
# action policy layers
policy_net = Conv2D(filters=4, kernel_size=(1, 1), data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
policy_net = Flatten()(policy_net)
self.policy_net = Dense(self.board_width*self.board_height, activation="softmax", kernel_regularizer=l2(self.l2_const))(policy_net)
# state value layers
value_net = Conv2D(filters=2, kernel_size=(1, 1), data_format="channels_first", activation="relu", kernel_regularizer=l2(self.l2_const))(network)
value_net = Flatten()(value_net)
value_net = Dense(64, kernel_regularizer=l2(self.l2_const))(value_net)
self.value_net = Dense(1, activation="tanh", kernel_regularizer=l2(self.l2_const))(value_net)
self.model = Model(in_x, [self.policy_net, self.value_net])
def policy_value(state_input):
state_input_union = np.array(state_input)
results = self.model.predict_on_batch(state_input_union)
return results
self.policy_value = policy_value
示例3: main_test
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def main_test():
# loadFlickr8k() # load Flickr8k dataset for Image Description
# loadMSVD() # load MSVD dataset for Video Description
loadFood101() # load Food101 dataset for Image Classification
# Build basic model for image classification
classifyFood101()
#################################
#
# Model building functions
#
#################################
示例4: setOutputsMapping
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def setOutputsMapping(self, outputsMapping, acc_output=None):
"""
Sets the mapping of the outputs from the format given by the dataset to the format received by the model.
:param outputsMapping: dictionary with the model outputs'
identifiers as keys and the dataset outputs identifiers' position as values.
If the current model is Sequential then keys must be ints with
the desired output order (in this case only one value can be provided).
If it is Model then keys must be str.
:param acc_output: name of the model's output that will be used for calculating
the accuracy of the model (only needed for Graph models)
"""
if isinstance(self.model, Sequential) and len(outputsMapping.keys()) > 1:
raise Exception("When using Sequential models only one output can be provided in outputsMapping")
self.outputsMapping = outputsMapping
self.acc_output = acc_output
示例5: VGG_19
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def VGG_19(self, nOutput, input):
# Define inputs and outputs IDs
self.ids_inputs = ['input_1']
self.ids_outputs = ['predictions']
# Load VGG19 model pre-trained on ImageNet
self.model = VGG19()
# Recover input layer
image = self.model.get_layer(self.ids_inputs[0]).output
# Recover last layer kept from original model
out = self.model.get_layer('fc2').output
out = Dense(nOutput, name=self.ids_outputs[0], activation='softmax')(out)
self.model = Model(input=image, output=out)
示例6: VGG_19_ImageNet
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def VGG_19_ImageNet(self, nOutput, input):
# Define inputs and outputs IDs
self.ids_inputs = ['input_1']
self.ids_outputs = ['predictions']
# Load VGG19 model pre-trained on ImageNet
self.model = VGG19(weights='imagenet', layers_lr=0.001)
# Recover input layer
image = self.model.get_layer(self.ids_inputs[0]).output
# Recover last layer kept from original model
out = self.model.get_layer('fc2').output
out = Dense(nOutput, name=self.ids_outputs[0], activation='softmax')(out)
self.model = Model(input=image, output=out)
########################################
# GoogLeNet implementation from http://dandxy89.github.io/ImageModels/googlenet/
########################################
示例7: add_One_vs_One_Merge_Functional
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def add_One_vs_One_Merge_Functional(self, inputs_list, nOutput, activation='softmax'):
# join outputs from OneVsOne classifers
ecoc_loss_name = 'ecoc_loss'
final_loss_name = 'final_loss/out'
ecoc_loss = merge(inputs_list, name=ecoc_loss_name, mode='concat', concat_axis=1)
drop = Dropout(0.5, name='final_loss/drop')(ecoc_loss)
# apply final joint prediction
final_loss = Dense(nOutput, activation=activation, name=final_loss_name)(drop)
in_node = self.model.layers[0].name
in_node = self.model.get_layer(in_node).output
self.model = Model(input=in_node, output=[ecoc_loss, final_loss])
# self.model = Model(input=in_node, output=['ecoc_loss', 'final_loss'])
return [ecoc_loss_name, final_loss_name]
示例8: create_model
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def create_model(gpu):
with tf.device(gpu):
input = Input((1280, 1918, len(dirs)))
x = Lambda(lambda x: K.mean(x, axis=-1, keepdims=True))(input)
model = Model(input, x)
model.summary()
return model
示例9: get_unet_resnet
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def get_unet_resnet(input_shape):
resnet_base = ResNet50(input_shape=input_shape, include_top=False)
if args.show_summary:
resnet_base.summary()
for l in resnet_base.layers:
l.trainable = True
conv1 = resnet_base.get_layer("activation_1").output
conv2 = resnet_base.get_layer("activation_10").output
conv3 = resnet_base.get_layer("activation_22").output
conv4 = resnet_base.get_layer("activation_40").output
conv5 = resnet_base.get_layer("activation_49").output
up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
conv6 = conv_block_simple(up6, 256, "conv6_1")
conv6 = conv_block_simple(conv6, 256, "conv6_2")
up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
conv7 = conv_block_simple(up7, 192, "conv7_1")
conv7 = conv_block_simple(conv7, 192, "conv7_2")
up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
conv8 = conv_block_simple(up8, 128, "conv8_1")
conv8 = conv_block_simple(conv8, 128, "conv8_2")
up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
conv9 = conv_block_simple(up9, 64, "conv9_1")
conv9 = conv_block_simple(conv9, 64, "conv9_2")
vgg = VGG16(input_shape=input_shape, input_tensor=resnet_base.input, include_top=False)
for l in vgg.layers:
l.trainable = False
vgg_first_conv = vgg.get_layer("block1_conv2").output
up10 = concatenate([UpSampling2D()(conv9), resnet_base.input, vgg_first_conv], axis=-1)
conv10 = conv_block_simple(up10, 32, "conv10_1")
conv10 = conv_block_simple(conv10, 32, "conv10_2")
conv10 = SpatialDropout2D(0.2)(conv10)
x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
model = Model(resnet_base.input, x)
return model
示例10: get_simple_unet
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def get_simple_unet(input_shape):
img_input = Input(input_shape)
conv1 = conv_block_simple(img_input, 32, "conv1_1")
conv1 = conv_block_simple(conv1, 32, "conv1_2")
pool1 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool1")(conv1)
conv2 = conv_block_simple(pool1, 64, "conv2_1")
conv2 = conv_block_simple(conv2, 64, "conv2_2")
pool2 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool2")(conv2)
conv3 = conv_block_simple(pool2, 128, "conv3_1")
conv3 = conv_block_simple(conv3, 128, "conv3_2")
pool3 = MaxPooling2D((2, 2), strides=(2, 2), padding="same", name="pool3")(conv3)
conv4 = conv_block_simple(pool3, 256, "conv4_1")
conv4 = conv_block_simple(conv4, 256, "conv4_2")
conv4 = conv_block_simple(conv4, 256, "conv4_3")
up5 = concatenate([UpSampling2D()(conv4), conv3], axis=-1)
conv5 = conv_block_simple(up5, 128, "conv5_1")
conv5 = conv_block_simple(conv5, 128, "conv5_2")
up6 = concatenate([UpSampling2D()(conv5), conv2], axis=-1)
conv6 = conv_block_simple(up6, 64, "conv6_1")
conv6 = conv_block_simple(conv6, 64, "conv6_2")
up7 = concatenate([UpSampling2D()(conv6), conv1], axis=-1)
conv7 = conv_block_simple(up7, 32, "conv7_1")
conv7 = conv_block_simple(conv7, 32, "conv7_2")
conv7 = SpatialDropout2D(0.2)(conv7)
prediction = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv7)
model = Model(img_input, prediction)
return model
示例11: get_unet_mobilenet
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def get_unet_mobilenet(input_shape):
base_model = MobileNet(include_top=False, input_shape=input_shape)
conv1 = base_model.get_layer('conv_pw_1_relu').output
conv2 = base_model.get_layer('conv_pw_3_relu').output
conv3 = base_model.get_layer('conv_pw_5_relu').output
conv4 = base_model.get_layer('conv_pw_11_relu').output
conv5 = base_model.get_layer('conv_pw_13_relu').output
up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
conv6 = conv_block_simple(up6, 256, "conv6_1")
conv6 = conv_block_simple(conv6, 256, "conv6_2")
up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
conv7 = conv_block_simple(up7, 256, "conv7_1")
conv7 = conv_block_simple(conv7, 256, "conv7_2")
up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
conv8 = conv_block_simple(up8, 192, "conv8_1")
conv8 = conv_block_simple(conv8, 128, "conv8_2")
up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
conv9 = conv_block_simple(up9, 96, "conv9_1")
conv9 = conv_block_simple(conv9, 64, "conv9_2")
up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=-1)
conv10 = conv_block_simple(up10, 48, "conv10_1")
conv10 = conv_block_simple(conv10, 32, "conv10_2")
conv10 = SpatialDropout2D(0.2)(conv10)
x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
model = Model(base_model.input, x)
return model
示例12: get_unet_inception_resnet_v2
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def get_unet_inception_resnet_v2(input_shape):
base_model = InceptionResNetV2(include_top=False, input_shape=input_shape)
conv1 = base_model.get_layer('activation_3').output
conv2 = base_model.get_layer('activation_5').output
conv3 = base_model.get_layer('block35_10_ac').output
conv4 = base_model.get_layer('block17_20_ac').output
conv5 = base_model.get_layer('conv_7b_ac').output
up6 = concatenate([UpSampling2D()(conv5), conv4], axis=-1)
conv6 = conv_block_simple(up6, 256, "conv6_1")
conv6 = conv_block_simple(conv6, 256, "conv6_2")
up7 = concatenate([UpSampling2D()(conv6), conv3], axis=-1)
conv7 = conv_block_simple(up7, 256, "conv7_1")
conv7 = conv_block_simple(conv7, 256, "conv7_2")
up8 = concatenate([UpSampling2D()(conv7), conv2], axis=-1)
conv8 = conv_block_simple(up8, 128, "conv8_1")
conv8 = conv_block_simple(conv8, 128, "conv8_2")
up9 = concatenate([UpSampling2D()(conv8), conv1], axis=-1)
conv9 = conv_block_simple(up9, 64, "conv9_1")
conv9 = conv_block_simple(conv9, 64, "conv9_2")
up10 = concatenate([UpSampling2D()(conv9), base_model.input], axis=-1)
conv10 = conv_block_simple(up10, 48, "conv10_1")
conv10 = conv_block_simple(conv10, 32, "conv10_2")
conv10 = SpatialDropout2D(0.4)(conv10)
x = Conv2D(1, (1, 1), activation="sigmoid", name="prediction")(conv10)
model = Model(base_model.input, x)
return model
示例13: update_learning_rate
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def update_learning_rate(self, learning_rate, arg_weight=1.):
print("Re-Compile Model lr=%s aw=%s" % (learning_rate, arg_weight))
self.compile_model(learning_rate, arg_weight=arg_weight)
示例14: train_f_enc
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def train_f_enc(self, steps_list, epoch=50):
print("training f_enc")
f_add0 = Sequential(name='f_add0')
f_add0.add(self.f_enc)
f_add0.add(Dense(FIELD_DEPTH))
f_add0.add(Activation('softmax', name='softmax_add0'))
f_add1 = Sequential(name='f_add1')
f_add1.add(self.f_enc)
f_add1.add(Dense(FIELD_DEPTH))
f_add1.add(Activation('softmax', name='softmax_add1'))
env_model = Model(self.f_enc.inputs, [f_add0.output, f_add1.output], name="env_model")
env_model.compile(optimizer='adam', loss=['categorical_crossentropy']*2)
for ep in range(epoch):
losses = []
for idx, steps_dict in enumerate(steps_list):
prev = None
for step in steps_dict['steps']:
x = self.convert_input(step.input)[:2]
env_values = step.input.env.reshape((4, -1))
in1 = np.clip(env_values[0].argmax() - 1, 0, 9)
in2 = np.clip(env_values[1].argmax() - 1, 0, 9)
carry = np.clip(env_values[2].argmax() - 1, 0, 9)
y_num = in1 + in2 + carry
now = (in1, in2, carry)
if prev == now:
continue
prev = now
y0 = to_one_hot_array((y_num % 10)+1, FIELD_DEPTH)
y1 = to_one_hot_array((y_num // 10)+1, FIELD_DEPTH)
y = [yy.reshape((self.batch_size, -1)) for yy in [y0, y1]]
loss = env_model.train_on_batch(x, y)
losses.append(loss)
print("ep %3d: loss=%s" % (ep, np.average(losses)))
if np.average(losses) < 1e-06:
break
示例15: build_model
# 需要導入模塊: from keras.engine import training [as 別名]
# 或者: from keras.engine.training import Model [as 別名]
def build_model(args):
cnn_filter_num = args['cnn_filter_num']
cnn_filter_size = args['cnn_filter_size']
l2_reg = args['l2_reg']
in_x = x = Input(args['input_dim'])
# (batch, channels, height, width)
x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same",
data_format="channels_first", kernel_regularizer=l2(l2_reg))(x)
x = BatchNormalization(axis=1)(x)
x = Activation("relu")(x)
for _ in range(args['res_layer_num']):
x = _build_residual_block(args, x)
res_out = x
# for policy output
x = Conv2D(filters=2, kernel_size=1, data_format="channels_first", kernel_regularizer=l2(l2_reg))(res_out)
x = BatchNormalization(axis=1)(x)
x = Activation("relu")(x)
x = Flatten()(x)
policy_out = Dense(args['policy_dim'], kernel_regularizer=l2(l2_reg), activation="softmax", name="policy")(x)
# for value output
x = Conv2D(filters=1, kernel_size=1, data_format="channels_first", kernel_regularizer=l2(l2_reg))(res_out)
x = BatchNormalization(axis=1)(x)
x = Activation("relu")(x)
x = Flatten()(x)
x = Dense(256, kernel_regularizer=l2(l2_reg), activation="relu")(x)
value_out = Dense(1, kernel_regularizer=l2(l2_reg), activation="tanh", name="value")(x)
return Model(in_x, [policy_out, value_out], name="model")