本文整理匯總了Python中keras.engine.Input方法的典型用法代碼示例。如果您正苦於以下問題:Python engine.Input方法的具體用法?Python engine.Input怎麽用?Python engine.Input使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類keras.engine
的用法示例。
在下文中一共展示了engine.Input方法的8個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: build_encoder
# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Input [as 別名]
def build_encoder(config: BEGANConfig, name="encoder"):
n_filters = config.n_filters
hidden_size = config.hidden_size
n_layer = config.n_layer_in_conv
dx = image_input = Input((config.image_height, config.image_width, 3))
# output: (N, 32, 32, n_filters)
dx = convolution_image_for_encoding(dx, n_filters, strides=(2, 2), name="%s/L1" % name, n_layer=n_layer)
# output: (N, 16, 16, n_filters*2)
dx = convolution_image_for_encoding(dx, n_filters * 2, strides=(2, 2), name="%s/L2" % name, n_layer=n_layer)
# output: (N, 8, 8, n_filters*3)
dx = convolution_image_for_encoding(dx, n_filters * 3, strides=(2, 2), name="%s/L3" % name, n_layer=n_layer)
# output: (N, 8, 8, n_filters*4)
dx = convolution_image_for_encoding(dx, n_filters * 4, strides=(1, 1), name="%s/L4" % name, n_layer=n_layer)
dx = Flatten()(dx)
hidden = Dense(hidden_size, activation='linear', name="%s/Dense" % name)(dx)
encoder = Container(image_input, hidden, name=name)
return encoder
示例2: build_discriminator
# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Input [as 別名]
def build_discriminator(config: BEGANConfig, autoencoder: Container):
"""
Keras Model class is able to have several inputs/outputs.
But, loss functions should be defined each other, and the loss function cannot reference other inputs/outputs.
For computing loss, two inputs/outputs are concatenated.
"""
# IN Shape: [ImageHeight, ImageWidth, (real data(3 channels) + generated data(3 channels))]
in_out_shape = (config.image_height, config.image_width, 3 * 2)
all_input = Input(in_out_shape)
# Split Input Data
data_input = Lambda(lambda x: x[:, :, :, 0:3], output_shape=(config.image_height, config.image_width, 3))(all_input)
generator_input = Lambda(lambda x: x[:, :, :, 3:6], output_shape=(config.image_height, config.image_width, 3))(all_input)
# use same autoencoder(weights are shared)
data_output = autoencoder(data_input) # (bs, row, col, ch)
generator_output = autoencoder(generator_input)
# concatenate output to be same shape of input
all_output = Concatenate(axis=-1)([data_output, generator_output])
discriminator = DiscriminatorModel(all_input, all_output, name="discriminator")
return discriminator
示例3: test_layer_call_arguments
# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Input [as 別名]
def test_layer_call_arguments():
# Test the ability to pass and serialize arguments to `call`.
inp = layers.Input(shape=(2,))
x = layers.Dense(3)(inp)
x = layers.Dropout(0.5)(x, training=True)
model = Model(inp, x)
assert not model.uses_learning_phase
# Test that argument is kept when applying the model
inp2 = layers.Input(shape=(2,))
out2 = model(inp2)
assert not out2._uses_learning_phase
# Test that argument is kept after loading a model
config = model.get_config()
model = Model.from_config(config)
assert not model.uses_learning_phase
示例4: test_recursion_with_bn_and_loss
# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Input [as 別名]
def test_recursion_with_bn_and_loss():
model1 = Sequential([
layers.Dense(5, input_dim=5, activity_regularizer='l1'),
layers.BatchNormalization(),
layers.Dense(5),
])
print('NEW MODEL')
inputs = layers.Input(shape=(5,))
outputs = model1(inputs)
model2 = Model(inputs=inputs, outputs=outputs)
assert len(model1.updates) == 2
assert len(model2.updates) == 2
assert len(model1.losses) == 1
assert len(model2.losses) == 1, model2.layers[1]._per_input_losses
model1.compile(optimizer='sgd', loss='categorical_crossentropy')
model2.compile(optimizer='sgd', loss='categorical_crossentropy')
x = np.ones((3, 5))
y = np.ones((3, 5))
model1.fit(x, y, verbose=0, epochs=1)
model2.fit(x, y, verbose=0, epochs=1)
示例5: test_activity_regularization_with_model_composition
# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Input [as 別名]
def test_activity_regularization_with_model_composition():
def reg(x):
return K.sum(x)
net_a_input = Input((2,))
net_a = net_a_input
net_a = Dense(2, kernel_initializer='ones',
use_bias=False,
activity_regularizer=reg)(net_a)
model_a = Model([net_a_input], [net_a])
net_b_input = Input((2,))
net_b = model_a(net_b_input)
model_b = Model([net_b_input], [net_b])
model_b.compile(optimizer='sgd', loss=None)
x = np.ones((1, 2))
loss = model_b.evaluate(x)
assert loss == 4
示例6: test_shared_layer_depth_is_correct
# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Input [as 別名]
def test_shared_layer_depth_is_correct():
# Basic outline here: we have a shared embedding layer, and two inputs that go through
# different depths of computation in the graph before the final output. We need the computed
# depth of the input layers to be the same, because they both pass through the embedding layer
# before anything else happens. That's what we're testing.
from keras.layers import Embedding, Input, Dense, Concatenate
from keras.models import Model
input1 = Input(shape=(10,), name='input1')
input2 = Input(shape=(10,), name='input2')
embedding_layer = Embedding(name='embedding', input_dim=5, output_dim=10)
embedded_input1 = embedding_layer(input1)
embedded_input2 = embedding_layer(input2)
transformed_input2 = Dense(6)(Dense(5)(Dense(3)(embedded_input2)))
final_output = Dense(2)(Concatenate()([embedded_input1, transformed_input2]))
model = Model(inputs=[input1, input2], outputs=final_output)
input1_depth = -1
input2_depth = -1
for depth, layers in model.layers_by_depth.items():
for layer in layers:
if layer.name == 'input1':
input1_depth = depth
if layer.name == 'input2':
input2_depth = depth
assert input1_depth != -1
assert input1_depth == input2_depth
示例7: test_layer_sharing_at_heterogeneous_depth
# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Input [as 別名]
def test_layer_sharing_at_heterogeneous_depth():
x_val = np.random.random((10, 5))
x = Input(shape=(5,))
A = Dense(5, name='A')
B = Dense(5, name='B')
output = A(B(A(B(x))))
M = Model(x, output)
output_val = M.predict(x_val)
config = M.get_config()
weights = M.get_weights()
M2 = Model.from_config(config)
M2.set_weights(weights)
output_val_2 = M2.predict(x_val)
np.testing.assert_allclose(output_val, output_val_2, atol=1e-6)
示例8: test_layer_sharing_at_heterogeneous_depth_with_concat
# 需要導入模塊: from keras import engine [as 別名]
# 或者: from keras.engine import Input [as 別名]
def test_layer_sharing_at_heterogeneous_depth_with_concat():
input_shape = (16, 9, 3)
input_layer = Input(shape=input_shape)
A = Dense(3, name='dense_A')
B = Dense(3, name='dense_B')
C = Dense(3, name='dense_C')
x1 = B(A(input_layer))
x2 = A(C(input_layer))
output = layers.concatenate([x1, x2])
M = Model(inputs=input_layer, outputs=output)
x_val = np.random.random((10, 16, 9, 3))
output_val = M.predict(x_val)
config = M.get_config()
weights = M.get_weights()
M2 = Model.from_config(config)
M2.set_weights(weights)
output_val_2 = M2.predict(x_val)
np.testing.assert_allclose(output_val, output_val_2, atol=1e-6)