本文整理汇总了Python中keras.models.Input方法的典型用法代码示例。如果您正苦于以下问题:Python models.Input方法的具体用法?Python models.Input怎么用?Python models.Input使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models
的用法示例。
在下文中一共展示了models.Input方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_shallow_convnet
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def get_shallow_convnet(window_size=4096, channels=2, output_size=84):
inputs = Input(shape=(window_size, channels))
conv = ComplexConv1D(
32, 512, strides=16,
activation='relu')(inputs)
pool = AveragePooling1D(pool_size=4, strides=2)(conv)
pool = Permute([2, 1])(pool)
flattened = Flatten()(pool)
dense = ComplexDense(2048, activation='relu')(flattened)
predictions = ComplexDense(
output_size,
activation='sigmoid',
bias_initializer=Constant(value=-5))(dense)
predictions = GetReal(predictions)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=Adam(lr=1e-4),
loss='binary_crossentropy',
metrics=['accuracy'])
return model
示例2: loadModel
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def loadModel(self):
"""
'loadModel' is used to load the model into the CustomObjectDetection class
:return: None
"""
if self.__model_type == "yolov3":
detection_model_json = json.load(open(self.__detection_config_json_path))
self.__model_labels = detection_model_json["labels"]
self.__model_anchors = detection_model_json["anchors"]
self.__detection_utils = CustomDetectionUtils(labels=self.__model_labels)
self.__model = yolo_main(Input(shape=(None, None, 3)), 3, len(self.__model_labels))
self.__model.load_weights(self.__model_path)
示例3: model
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def model(self):
inputs_img = Input(shape=(self.img_height, self.img_width, self.num_channels))
inputs_mask = Input(shape=(self.img_height, self.img_width, self.num_channels))
inputs = Multiply()([inputs_img, inputs_mask])
# Local discriminator
l_dis = Conv2D(filters=64, kernel_size=5, strides=(2, 2), padding='same')(inputs)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=512, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=256, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Conv2D(filters=128, kernel_size=5, strides=(2, 2), padding='same')(l_dis)
l_dis = LeakyReLU()(l_dis)
l_dis = Flatten()(l_dis)
l_dis = Dense(units=1)(l_dis)
model = Model(name=self.model_name, inputs=[inputs_img, inputs_mask], outputs=l_dis)
return model
示例4: build
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def build(self, **kwargs):
self.vocab_size = len(self.token2idx)
self.input = Input(shape=(self.len_max,), dtype='int32')
self.output = Embedding(self.vocab_size+1,
self.embed_size,
input_length=self.len_max,
trainable=self.trainable,
)(self.input)
self.model = Model(self.input, self.output)
示例5: create_tcn
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def create_tcn(list_n_filters=[8],
kernel_size=4,
dilations=[1, 2],
nb_stacks=1,
activation='norm_relu',
n_layers=1,
dropout_rate=0.05,
use_skip_connections=True,
bidirectional=True):
if bidirectional:
padding = 'same'
else:
padding = 'causal'
dilations = process_dilations(dilations)
input_layer = Input(shape=(None, config.N_MELS))
for i in range(n_layers):
if i == 0:
x = TCN(list_n_filters[i], kernel_size, nb_stacks, dilations, activation,
padding, use_skip_connections, dropout_rate, return_sequences=True)(input_layer)
else:
x = TCN(list_n_filters[i], kernel_size, nb_stacks, dilations, activation,
padding, use_skip_connections, dropout_rate, return_sequences=True, name="tcn" + str(i))(x)
x = Dense(config.CLASSES)(x)
x = Activation('sigmoid')(x)
output_layer = x
return Model(input_layer, output_layer)
示例6: _get_model
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def _get_model(self):
d = 0.5
rd = 0.5
rnn_units = 128
input_text = Input((self.input_length,))
text_embedding = Embedding(input_dim=self.max_words + 2, output_dim=self.emb_dim,
input_length=self.input_length, mask_zero=True)(input_text)
text_embedding = SpatialDropout1D(0.5)(text_embedding)
bilstm = Bidirectional(LSTM(units=rnn_units, return_sequences=True, dropout=d,
recurrent_dropout=rd))(text_embedding)
x, attn = AttentionWeightedAverage(return_attention=True)(bilstm)
x = Dropout(0.5)(x)
out = Dense(units=self.n_classes, activation="softmax")(x)
model = Model(input_text, out)
return model
示例7: build_model
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def build_model(self):
'''建立模型'''
# 输入的dimension
input_tensor = Input(shape=(self.config.max_len,))
embedd = Embedding(len(self.num2word) + 2, 300, input_length=self.config.max_len)(input_tensor)
lstm = Bidirectional(GRU(128, return_sequences=True))(embedd)
# dropout = Dropout(0.6)(lstm)
# lstm = LSTM(256)(dropout)
# dropout = Dropout(0.6)(lstm)
flatten = Flatten()(lstm)
dense = Dense(len(self.words), activation='softmax')(flatten)
self.model = Model(inputs=input_tensor, outputs=dense)
optimizer = Adam(lr=self.config.learning_rate)
self.model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy'])
示例8: define_global_discriminator
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def define_global_discriminator(self, generator_raw, global_discriminator_raw):
generator_inputs = Input(shape=(self.img_height, self.img_width, self.num_channels))
generator_masks = Input(shape=(self.img_height, self.img_width, self.num_channels))
real_samples = Input(shape=(self.img_height, self.img_width, self.num_channels))
fake_samples = generator_raw.model([generator_inputs, generator_masks])
# fake_samples = generator_inputs * (1 - generator_masks) + fake_samples * generator_masks
fake_samples = Lambda(make_comp_sample)([generator_inputs, fake_samples, generator_masks])
discriminator_output_from_fake_samples = global_discriminator_raw.model(fake_samples)
discriminator_output_from_real_samples = global_discriminator_raw.model(real_samples)
averaged_samples = custom_layers.RandomWeightedAverage()([real_samples, fake_samples])
# We then run these samples through the discriminator as well. Note that we never
# really use the discriminator output for these samples - we're only running them to
# get the gradient norm for the gradient penalty loss.
averaged_samples_outputs = global_discriminator_raw.model(averaged_samples)
# The gradient penalty loss function requires the input averaged samples to get
# gradients. However, Keras loss functions can only have two arguments, y_true and
# y_pred. We get around this by making a partial() of the function with the averaged
# samples here.
partial_gp_loss = partial(gradient_penalty_loss,
averaged_samples=averaged_samples,
gradient_penalty_weight=self.gradient_penalty_loss_weight)
# Functions need names or Keras will throw an error
partial_gp_loss.__name__ = 'gradient_penalty'
global_discriminator_model = Model(inputs=[real_samples, generator_inputs, generator_masks],
outputs=[discriminator_output_from_real_samples,
discriminator_output_from_fake_samples,
averaged_samples_outputs])
# We use the Adam paramaters from Gulrajani et al. We use the Wasserstein loss for both
# the real and generated samples, and the gradient penalty loss for the averaged samples
global_discriminator_model.compile(optimizer=self.discriminator_optimizer,
loss=[wasserstein_loss, wasserstein_loss, partial_gp_loss])
return global_discriminator_model
示例9: define_local_discriminator
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def define_local_discriminator(self, generator_raw, local_discriminator_raw):
generator_inputs = Input(shape=(self.img_height, self.img_width, self.num_channels))
generator_masks = Input(shape=(self.img_height, self.img_width, self.num_channels))
real_samples = Input(shape=(self.img_height, self.img_width, self.num_channels))
fake_samples = generator_raw.model([generator_inputs, generator_masks])
# fake_samples = generator_inputs * (1 - generator_masks) + fake_samples * generator_masks
# fake_samples = Lambda(make_comp_sample)([generator_inputs, fake_samples, generator_masks])
discriminator_output_from_fake_samples = local_discriminator_raw.model(
[fake_samples, generator_masks])
discriminator_output_from_real_samples = local_discriminator_raw.model(
[real_samples, generator_masks])
averaged_samples = custom_layers.RandomWeightedAverage()([real_samples, fake_samples])
averaged_samples_output = local_discriminator_raw.model([averaged_samples, generator_masks])
partial_gp_loss = partial(gradient_penalty_loss,
averaged_samples=averaged_samples,
gradient_penalty_weight=self.gradient_penalty_loss_weight)
partial_gp_loss.__name__ = 'gradient_penalty'
local_discriminator_model = Model(inputs=[real_samples, generator_inputs, generator_masks],
outputs=[discriminator_output_from_real_samples,
discriminator_output_from_fake_samples,
averaged_samples_output])
local_discriminator_model.compile(optimizer=self.discriminator_optimizer,
loss=[wasserstein_loss, wasserstein_loss, partial_gp_loss])
return local_discriminator_model
示例10: model
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def model():
input = Input(shape=(224, 224, 3))
x = conv_batch_norm_relu(input, 32, (3, 3), padding='same', activation='relu')
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = conv_batch_norm_relu(x, 64, (3, 3), padding='same', activation='relu')
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = conv_batch_norm_relu(x, 128, (3, 3), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 64, (1, 1), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 128, (3, 3), padding='same', activation='relu')
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = conv_batch_norm_relu(x, 256, (3, 3), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 128, (1, 1), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 256, (3, 3), padding='same', activation='relu')
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = conv_batch_norm_relu(x, 512, (3, 3), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 256, (1, 1), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 512, (3, 3), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 256, (1, 1), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 512, (3, 3), padding='same', activation='relu')
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(x)
x = conv_batch_norm_relu(x, 1024, (3, 3), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 512, (1, 1), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 1024, (3, 3), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 512, (1, 1), padding='same', activation='relu')
x = conv_batch_norm_relu(x, 1024, (3, 3), padding='same', activation='relu')
x = Conv2D(5, (1, 1), padding='same')(x)
x = BatchNormalization()(x)
x = Activation('sigmoid', name='output')(x)
return Model(inputs=input, outputs=x)
示例11: create_dagmm_model
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def create_dagmm_model(encoder, decoder, estimation_encoder, lambd_diag=0.005):
x_in = Input(batch_shape=encoder.input_shape)
zc = encoder(x_in)
decoder.name = 'reconstruction'
x_rec = decoder(zc)
euclid_dist = Lambda(lambda args: K.sqrt(K.sum(K.batch_flatten(K.square(args[0] - args[1])),
axis=-1, keepdims=True) /
K.sum(K.batch_flatten(K.square(args[0])),
axis=-1, keepdims=True)),
output_shape=(1,))([x_in, x_rec])
cos_sim = Lambda(lambda args: K.batch_dot(K.l2_normalize(K.batch_flatten(args[0]), axis=-1),
K.l2_normalize(K.batch_flatten(args[1]), axis=-1),
axes=-1),
output_shape=(1,))([x_in, x_rec])
zr = concatenate([euclid_dist, cos_sim])
z = concatenate([zc, zr])
gamma = estimation_encoder(z)
gamma_ks = [Lambda(lambda g: g[:, k:k + 1], output_shape=(1,))(gamma)
for k in range(estimation_encoder.output_shape[-1])]
components = [GaussianMixtureComponent(lambd_diag)([z, gamma_k])
for gamma_k in gamma_ks]
density = add(components) if len(components) > 1 else components[0]
energy = Lambda(lambda dens: -K.log(dens), name='energy')(density)
dagmm = Model(x_in, [x_rec, energy])
return dagmm
示例12: CNN_BIGRU
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def CNN_BIGRU():
# Inp is one-hot encoded version of inp_alt
inp = Input(shape=(maxlen_seq, n_words))
inp_alt = Input(shape=(maxlen_seq,))
inp_profiles = Input(shape=(maxlen_seq, 22))
# Concatenate embedded and unembedded input
x_emb = Embedding(input_dim=n_words, output_dim=64,
input_length=maxlen_seq)(inp_alt)
x = Concatenate(axis=-1)([inp, x_emb, inp_profiles])
x = super_conv_block(x)
x = conv_block(x)
x = super_conv_block(x)
x = conv_block(x)
x = super_conv_block(x)
x = conv_block(x)
x = Bidirectional(CuDNNGRU(units = 256, return_sequences = True, recurrent_regularizer=l2(0.2)))(x)
x = TimeDistributed(Dropout(0.5))(x)
x = TimeDistributed(Dense(256, activation = "relu"))(x)
x = TimeDistributed(Dropout(0.5))(x)
y = TimeDistributed(Dense(n_tags, activation = "softmax"))(x)
model = Model([inp, inp_alt, inp_profiles], y)
return model
示例13: build_model
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def build_model():
input = Input(shape = (None, ))
profiles_input = Input(shape = (None, 22))
# Defining an embedding layer mapping from the words (n_words) to a vector of len 128
x1 = Embedding(input_dim = n_words, output_dim = 250, input_length = None)(input)
x1 = concatenate([x1, profiles_input], axis = 2)
x2 = Embedding(input_dim = n_words, output_dim = 125, input_length = None)(input)
x2 = concatenate([x2, profiles_input], axis = 2)
x1 = Dense(1200, activation = "relu")(x1)
x1 = Dropout(0.5)(x1)
# Defining a bidirectional LSTM using the embedded representation of the inputs
x2 = Bidirectional(CuDNNGRU(units = 500, return_sequences = True))(x2)
x2 = Bidirectional(CuDNNGRU(units = 100, return_sequences = True))(x2)
COMBO_MOVE = concatenate([x1, x2])
w = Dense(500, activation = "relu")(COMBO_MOVE) # try 500
w = Dropout(0.4)(w)
w = tcn.TCN()(w)
y = TimeDistributed(Dense(n_tags, activation = "softmax"))(w)
# Defining the model as a whole and printing the summary
model = Model([input, profiles_input], y)
#model.summary()
# Setting up the model with categorical x-entropy loss and the custom accuracy function as accuracy
adamOptimizer = Adam(lr=0.0025, beta_1=0.8, beta_2=0.8, epsilon=None, decay=0.0001, amsgrad=False)
model.compile(optimizer = adamOptimizer, loss = "categorical_crossentropy", metrics = ["accuracy", accuracy])
return model
# Defining the decoders so that we can
示例14: get_deep_convnet
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def get_deep_convnet(window_size=4096, channels=2, output_size=84):
inputs = Input(shape=(window_size, channels))
outs = inputs
outs = (ComplexConv1D(
16, 6, strides=2, padding='same',
activation='linear',
kernel_initializer='complex_independent'))(outs)
outs = (ComplexBN(axis=-1))(outs)
outs = (keras.layers.Activation('relu'))(outs)
outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)
outs = (ComplexConv1D(
32, 3, strides=2, padding='same',
activation='linear',
kernel_initializer='complex_independent'))(outs)
outs = (ComplexBN(axis=-1))(outs)
outs = (keras.layers.Activation('relu'))(outs)
outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)
outs = (ComplexConv1D(
64, 3, strides=1, padding='same',
activation='linear',
kernel_initializer='complex_independent'))(outs)
outs = (ComplexBN(axis=-1))(outs)
outs = (keras.layers.Activation('relu'))(outs)
outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)
outs = (ComplexConv1D(
64, 3, strides=1, padding='same',
activation='linear',
kernel_initializer='complex_independent'))(outs)
outs = (ComplexBN(axis=-1))(outs)
outs = (keras.layers.Activation('relu'))(outs)
outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)
outs = (ComplexConv1D(
128, 3, strides=1, padding='same',
activation='relu',
kernel_initializer='complex_independent'))(outs)
outs = (ComplexConv1D(
128, 3, strides=1, padding='same',
activation='linear',
kernel_initializer='complex_independent'))(outs)
outs = (ComplexBN(axis=-1))(outs)
outs = (keras.layers.Activation('relu'))(outs)
outs = (keras.layers.AveragePooling1D(pool_size=2, strides=2))(outs)
#outs = (keras.layers.MaxPooling1D(pool_size=2))
#outs = (Permute([2, 1]))
outs = (keras.layers.Flatten())(outs)
outs = (keras.layers.Dense(2048, activation='relu',
kernel_initializer='glorot_normal'))(outs)
predictions = (keras.layers.Dense(output_size, activation='sigmoid',
bias_initializer=keras.initializers.Constant(value=-5)))(outs)
model = Model(inputs=inputs, outputs=predictions)
model.compile(optimizer=keras.optimizers.Adam(lr=1e-4),
loss='binary_crossentropy',
metrics=['accuracy'])
return model
示例15: vggnet_keras
# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import Input [as 别名]
def vggnet_keras():
# Block 1
img_input = Input((3, 224, 224))
x = Conv2D(64, (3, 3), activation='relu',
padding='same', name='features.0')(img_input)
x = Conv2D(64, (3, 3), activation='relu',
padding='same', name='features.2')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)
# Block 2
x = Conv2D(128, (3, 3), activation='relu',
padding='same', name='features.5')(x)
x = Conv2D(128, (3, 3), activation='relu',
padding='same', name='features.7')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)
# Block 3
x = Conv2D(256, (3, 3), activation='relu',
padding='same', name='features.10')(x)
x = Conv2D(256, (3, 3), activation='relu',
padding='same', name='features.12')(x)
x = Conv2D(256, (3, 3), activation='relu',
padding='same', name='features.14')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)
# Block 4
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='features.17')(x)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='features.19')(x)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='features.21')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)
# Block 5
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='features.24')(x)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='features.26')(x)
x = Conv2D(512, (3, 3), activation='relu',
padding='same', name='features.28')(x)
x = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(x)
x = Flatten(name='flatten')(x)
x = Dense(4096, activation='relu', name='classifier.0')(x)
x = Dropout(0.5)(x)
x = Dense(4096, activation='relu', name='classifier.3')(x)
x = Dropout(0.5)(x)
x = Dense(1000, activation=None, name='classifier.6')(x)
# Create model.
model = Model(img_input, x, name='vgg16')
return model