本文整理汇总了Python中keras.layers.convolutional.Convolution1D方法的典型用法代码示例。如果您正苦于以下问题:Python convolutional.Convolution1D方法的具体用法?Python convolutional.Convolution1D怎么用?Python convolutional.Convolution1D使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.layers.convolutional
的用法示例。
在下文中一共展示了convolutional.Convolution1D方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cnn_model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def cnn_model(input_shape, hidden = 256, targets = 1, learn_rate = 1e-4):
model = Sequential()
model.add(Convolution1D(input_shape = input_shape, nb_filter = 64, filter_length = 3, border_mode = 'same', activation = 'relu'))
model.add(MaxPooling1D(pool_length = 3))
model.add(Bidirectional(LSTM(hidden), merge_mode = 'concat'))
model.add(Activation('tanh'))
model.add(Dropout(0.5))
model.add(Dense(targets))
if multiclass:
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['categorical_accuracy'])
else:
model.add(Activation ('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=learn_rate, beta_1 =.5 ), metrics=['accuracy'])
return (model)
示例2: create
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def create(self):
self.textual_embedding(self, mask_zero=False)
self.add(Convolution1D(
nb_filter=self._config.language_cnn_filters,
filter_length=self._config.language_cnn_filter_length,
border_mode='valid',
activation=self._config.language_cnn_activation,
subsample_length=1))
#self.add(MaxPooling1D(pool_length=self._config.language_max_pool_length))
self.add(self._config.recurrent_encoder(
self._config.hidden_state_dim,
return_sequences=False,
go_backwards=False))
self.deep_mlp()
self.add(Dense(self._config.output_dim))
self.add(Activation('softmax'))
示例3: hierarchical_cnn
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def hierarchical_cnn (input_shape, aux_shape, targets = 1, hidden = 256, multiclass = False, learn_rate=1e-4):
x = Input(shape = input_shape, name = 'x')
xx = Convolution1D(nb_filter = 64, filter_length = 3, border_mode = 'same', activation = 'relu') (x)
xx = MaxPooling1D(pool_length = 3) (xx)
xx = Bidirectional(LSTM (256, activation = 'relu'), merge_mode = 'concat') (xx)
xx = Dropout(0.5)(xx)
dx = Input(shape = aux_shape, name = 'aux')
xx = concatenate([xx, dx])
if multiclass:
y = Dense(targets, activation = 'softmax') (xx)
model = Model(inputs = [x, dx], outputs = [y])
model.compile (loss = 'categorical_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['categorical_accuracy'])
else:
y = Dense(targets, activation = 'sigmoid') (xx)
model = Model(inputs = [x, dx], outputs = [y])
model.compile (loss = 'binary_crossentropy', optimizer = Adam(lr = learn_rate), metrics = ['accuracy'])
return (model)
示例4: _buildEncoder
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std = 0.01):
h = Convolution1D(9, 9, activation = 'relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation = 'relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation = 'relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation = 'relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(shape=(batch_size, latent_rep_size), mean=0., std = epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation = 'linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation = 'linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis = -1)
return xent_loss + kl_loss
return (vae_loss, Lambda(sampling, output_shape=(latent_rep_size,), name='lambda')([z_mean, z_log_var]))
示例5: _buildEncoder
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def _buildEncoder(self, x, latent_rep_size, max_length, epsilon_std=0.01):
h = Convolution1D(9, 9, activation='relu', name='conv_1')(x)
h = Convolution1D(9, 9, activation='relu', name='conv_2')(h)
h = Convolution1D(10, 11, activation='relu', name='conv_3')(h)
h = Flatten(name='flatten_1')(h)
h = Dense(435, activation='relu', name='dense_1')(h)
def sampling(args):
z_mean_, z_log_var_ = args
batch_size = K.shape(z_mean_)[0]
epsilon = K.random_normal(
shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
z_mean = Dense(latent_rep_size, name='z_mean', activation='linear')(h)
z_log_var = Dense(latent_rep_size, name='z_log_var', activation='linear')(h)
def vae_loss(x, x_decoded_mean):
x = K.flatten(x)
x_decoded_mean = K.flatten(x_decoded_mean)
xent_loss = max_length * objectives.binary_crossentropy(x, x_decoded_mean)
kl_loss = -0.5 * K.mean(
1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
return (vae_loss, Lambda(
sampling, output_shape=(latent_rep_size,),
name='lambda')([z_mean, z_log_var]))
示例6: embeddingCNN
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def embeddingCNN(shape, clusters=2, embedLayer=200, middle = 100):
top_words = 2001
lossType = 'binary_crossentropy' if clusters == 2 else 'categorical_crossentropy'
model = Sequential()
model.add(Embedding(top_words, embedLayer, input_length=shape))
model.add(Convolution1D(nb_filter=embedLayer, filter_length=3, border_mode='same', activation='relu'))
model.add(MaxPooling1D(pool_length=2))
model.add(Flatten())
model.add(Dense(middle, activation='relu'))
model.add(Dense(clusters, activation='sigmoid'))
model.compile(loss=lossType, optimizer='adam', metrics=['accuracy'])
return model
开发者ID:WayneDW,项目名称:Sentiment-Analysis-in-Event-Driven-Stock-Price-Movement-Prediction,代码行数:14,代码来源:model_keras_cnn_rnn.py
示例7: model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def model(X_train, X_test, y_train, y_test, maxlen, max_features):
embedding_size = 300
pool_length = 4
lstm_output_size = 100
batch_size = 200
nb_epoch = 1
model = Sequential()
model.add(Embedding(max_features, embedding_size, input_length=maxlen))
model.add(Dropout({{uniform(0, 1)}}))
# Note that we use unnamed parameters here, which is bad style, but is used here
# to demonstrate that it works. Always prefer named parameters.
model.add(Convolution1D({{choice([64, 128])}},
{{choice([6, 8])}},
border_mode='valid',
activation='relu',
subsample_length=1))
model.add(MaxPooling1D(pool_length=pool_length))
model.add(LSTM(lstm_output_size))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print('Train...')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch,
validation_data=(X_test, y_test))
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size)
print('Test score:', score)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': model}
示例8: getconvmodel
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def getconvmodel(filter_length,nb_filter):
model = Sequential()
model.add(Convolution1D(nb_filter=nb_filter,
input_shape=(100,32),
filter_length=filter_length,
border_mode='same',
activation='relu',
subsample_length=1))
model.add(Lambda(sum_1d, output_shape=(nb_filter,)))
#model.add(BatchNormalization(mode=0))
model.add(Dropout(0.5))
return model
示例9: trainCNN
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def trainCNN(obj, dataset_headLines, dataset_body):
embedding_dim = 300
LSTM_neurons = 50
dense_neuron = 16
dimx = 100
dimy = 200
lamda = 0.0
nb_filter = 100
filter_length = 4
vocab_size = 10000
batch_size = 50
epochs = 5
ntn_out = 16
ntn_in = nb_filter
state = False
train_head,train_body,embedding_matrix = obj.process_data(sent_Q=dataset_headLines,
sent_A=dataset_body,dimx=dimx,dimy=dimy,
wordVec_model = wordVec_model)
inpx = Input(shape=(dimx,),dtype='int32',name='inpx')
#x = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimx)(inpx)
x = word2vec_embedding_layer(embedding_matrix)(inpx)
inpy = Input(shape=(dimy,),dtype='int32',name='inpy')
#y = Embedding(output_dim=embedding_dim, input_dim=vocab_size, input_length=dimy)(inpy)
y = word2vec_embedding_layer(embedding_matrix)(inpy)
ques = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
border_mode='valid', activation='relu',
subsample_length=1)(x)
ans = Convolution1D(nb_filter=nb_filter, filter_length=filter_length,
border_mode='valid', activation='relu',
subsample_length=1)(y)
#hx = Lambda(max_1d, output_shape=(nb_filter,))(ques)
#hy = Lambda(max_1d, output_shape=(nb_filter,))(ans)
hx = GlobalMaxPooling1D()(ques)
hy = GlobalMaxPooling1D()(ans)
#wordVec_model = []
#h = Merge(mode="concat",name='h')([hx,hy])
h1 = Multiply()([hx,hy])
h2 = Abs()([hx,hy])
h = Merge(mode="concat",name='h')([h1,h2])
#h = NeuralTensorLayer(output_dim=1,input_dim=ntn_in)([hx,hy])
#h = ntn_layer(ntn_in,ntn_out,activation=None)([hx,hy])
#score = h
wrap = Dense(dense_neuron, activation='relu',name='wrap')(h)
#score = Dense(1,activation='sigmoid',name='score')(h)
#wrap = Dense(dense_neuron,activation='relu',name='wrap')(h)
score = Dense(4,activation='softmax',name='score')(wrap)
#score=K.clip(score,1e-7,1.0-1e-7)
#corr = CorrelationRegularization(-lamda)([hx,hy])
#model = Model( [inpx,inpy],[score,corr])
model = Model( [inpx,inpy],score)
model.compile( loss='categorical_crossentropy',optimizer="adadelta",metrics=['accuracy'])
return model,train_head,train_body
示例10: create_neural_network_rnn
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def create_neural_network_rnn(self):
"""
Create the Neural Network Model
:return: Keras Modelh
"""
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(12, # Number of Features from State Space
300, # Vector Size
input_length=self.input_dim))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter=self.nb_filter,
filter_length=self.filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
# we use standard max pooling (halving the output of the previous
# layer):
model.add(MaxPooling1D(pool_length=self.pool_length))
model.add(Dropout(self.dropout))
# We flatten the output of the conv layer,
# so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
model.add(Dense(self.neurons))
model.add(Dropout(self.dropout))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a
# sigmoid:
model.add(Dense(len(self.actions)))
model.add(Activation('linear'))
model.compile(loss='mse',
optimizer=Adadelta(lr=0.00025))
print(model.summary())
return model
示例11: create_model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def create_model(self, n_dim, r):
# load inputs
X, _, _ = self.inputs
K.set_session(self.sess)
with tf.name_scope('generator'):
x = X
L = self.layers
# dim/layer: 4096, 2048, 1024, 512, 256, 128, 64, 32,
n_filters = [128, 384, 512, 512, 512, 512, 512, 512]
n_filtersizes = [65, 33, 17, 9, 9, 9, 9, 9, 9]
downsampling_l = []
print 'building model...'
# downsampling layers
for l, nf, fs in zip(range(L), n_filters, n_filtersizes):
with tf.name_scope('downsc_conv%d' % l):
x = (Convolution1D(nb_filter=nf, filter_length=fs,
activation=None, border_mode='same', init=orthogonal_init,
subsample_length=2))(x)
# if l > 0: x = BatchNormalization(mode=2)(x)
x = LeakyReLU(0.2)(x)
print 'D-Block: ', x.get_shape()
downsampling_l.append(x)
# bottleneck layer
with tf.name_scope('bottleneck_conv'):
x = (Convolution1D(nb_filter=n_filters[-1], filter_length=n_filtersizes[-1],
activation=None, border_mode='same', init=orthogonal_init,
subsample_length=2))(x)
x = Dropout(p=0.5)(x)
x = LeakyReLU(0.2)(x)
# upsampling layers
for l, nf, fs, l_in in reversed(zip(range(L), n_filters, n_filtersizes, downsampling_l)):
with tf.name_scope('upsc_conv%d' % l):
# (-1, n/2, 2f)
x = (Convolution1D(nb_filter=2*nf, filter_length=fs,
activation=None, border_mode='same', init=orthogonal_init))(x)
x = Dropout(p=0.5)(x)
x = Activation('relu')(x)
# (-1, n, f)
x = SubPixel1D(x, r=2)
# (-1, n, 2f)
x = K.concatenate(tensors=[x, l_in], axis=2)
print 'U-Block: ', x.get_shape()
# final conv layer
with tf.name_scope('lastconv'):
x = Convolution1D(nb_filter=2, filter_length=9,
activation=None, border_mode='same', init=normal_init)(x)
x = SubPixel1D(x, r=2)
print x.get_shape()
g = merge([x, X], mode='sum')
return g
示例12: copy_model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def copy_model(input_row,input_col):
input = Input(shape=(input_row,input_col))
filtersize1=1
filtersize2=9
filtersize3=10
filter1=200
filter2=150
filter3=200
dropout1=0.75
dropout2=0.75
dropout4=0.75
dropout5=0.75
dropout6=0
L1CNN=0
nb_classes=2
batch_size=1200
actfun="relu";
optimization='adam';
attentionhidden_x=10
attentionhidden_xr=8
attention_reg_x=0.151948
attention_reg_xr=2
dense_size1=149
dense_size2=8
dropout_dense1=0.298224
dropout_dense2=0
input = Input(shape=(input_row,input_col))
x = conv.Convolution1D(filter1, filtersize1,kernel_initializer='he_normal',kernel_regularizer= l1(L1CNN),padding="same")(input)
x = Dropout(dropout1)(x)
x = Activation(actfun)(x)
x = conv.Convolution1D(filter2,filtersize2,kernel_initializer='he_normal',kernel_regularizer= l1(L1CNN),padding="same")(x)
x = Dropout(dropout2)(x)
x = Activation(actfun)(x)
x = conv.Convolution1D(filter3,filtersize3,kernel_initializer='he_normal',kernel_regularizer= l1(L1CNN),padding="same")(x)
x = Activation(actfun)(x)
x_reshape=core.Reshape((x._keras_shape[2],x._keras_shape[1]))(x)
x = Dropout(dropout4)(x)
x_reshape=Dropout(dropout5)(x_reshape)
decoder_x = Attention(hidden=attentionhidden_x,activation='linear',init='he_normal',W_regularizer=l1(attention_reg_x)) # success
decoded_x=decoder_x(x)
output_x = myFlatten(x._keras_shape[2])(decoded_x)
decoder_xr = Attention(hidden=attentionhidden_xr,activation='linear',init='he_normal',W_regularizer=l1(attention_reg_xr))
decoded_xr=decoder_xr(x_reshape)
output_xr = myFlatten(x_reshape._keras_shape[2])(decoded_xr)
output=merge([output_x,output_xr],mode='concat')
output=Dropout(dropout6)(output)
output=Dense(dense_size1,kernel_initializer='he_normal',activation='relu')(output)
output=Dropout(dropout_dense1)(output)
output=Dense(dense_size2,activation="relu",kernel_initializer='he_normal')(output)
output=Dropout(dropout_dense2)(output)
out=Dense(nb_classes,kernel_initializer='he_normal',activation='softmax')(output)
cp_model=Model(input,out)
return cp_model
示例13: time_glot_model
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def time_glot_model(timesteps=128, input_dim=22, output_dim=400, model_name="time_glot_model"):
ac_input = Input(shape=(timesteps, input_dim), name="ac_input")
x_t = ac_input
x_t = GRU(50, activation='relu', kernel_initializer='glorot_normal',
return_sequences=False, unroll=False)(x_t)
x = x_t
x = Dense(output_dim)(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Reshape((output_dim, 1))(x)
x = Convolution1D(filters=100,
kernel_size=15,
padding='same',
strides=1)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Convolution1D(filters=100,
kernel_size=15,
padding='same',
strides=1)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Convolution1D(filters=100,
kernel_size=15,
padding='same',
strides=1)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Convolution1D(filters=100,
kernel_size=15,
padding='same',
strides=1)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = Convolution1D(filters=1,
kernel_size=15,
padding='same',
strides=1)(x)
# remove singleton outer dimension
x = Reshape((output_dim,))(x)
x_t = x
x_fft = fft_layer(x)
model = Model(inputs=[ac_input], outputs=[x_t, x_fft], name=model_name)
return model
示例14: generator
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def generator(input_dim=400, ac_dim=22, output_dim=400):
pls_input = Input(shape=(input_dim,), name="pls_input")
noise_input = Input(shape=(input_dim,), name="noise_input")
pls = Reshape((input_dim, 1))(pls_input)
noise = Reshape((input_dim, 1))(noise_input)
x = concatenate([pls, noise], axis=2) # concat as different channels
x = Convolution1D(filters=100,
kernel_size=15,
padding='same',
strides=1)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = concatenate([pls, x], axis=2) # concat as different channels
x = Convolution1D(filters=100,
kernel_size=15,
padding='same',
strides=1)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = concatenate([pls, x], axis=2) # concat as different channels
x = Convolution1D(filters=100,
kernel_size=15,
padding='same',
strides=1)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
x = concatenate([pls, x], axis=2) # concat as different channels
x = Convolution1D(filters=1,
kernel_size=15,
padding='same',
strides=1)(x)
x = Activation('tanh')(x)
# force additivity
x = add([pls, x])
# remove singleton outer dimension
x = Reshape((output_dim,))(x)
# add fft channel to output
x_fft = fft_layer(x)
model = Model(inputs=[pls_input, noise_input], outputs=[x, x_fft],
name="generator")
return model
示例15: discriminator
# 需要导入模块: from keras.layers import convolutional [as 别名]
# 或者: from keras.layers.convolutional import Convolution1D [as 别名]
def discriminator(input_dim=400):
pls_input = Input(shape=(input_dim,), name="pls_input")
fft_input = Input(shape=(input_dim,), name="fft_input")
x = Reshape((input_dim, 1))(pls_input)
x_fft = Reshape((input_dim, 1))(fft_input)
x = concatenate([x, x_fft], axis=2) # concat as different channels
# input shape batch_size x 1 (number of channels) x 400 (length of pulse)
x = Convolution1D(filters=64,
kernel_size=7,
strides=3)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
# shape [batch_size x 64 x 132]
x = Convolution1D(filters=128,
kernel_size=7,
strides=3)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
# shape [batch_size x 128 x 42]
x = Convolution1D(filters=256,
kernel_size=7,
strides=3)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
peek_output = x # used for generator training regularization
# shape [batch_size x 256 x 12]
x = Convolution1D(filters=128,
kernel_size=5,
strides=2)(x)
x = BatchNormalization()(x)
x = LeakyReLU(0.1)(x)
# shape [batch_size x 128 x 4]
#nn.Sigmoid() # use sigmoid for normal gan, commented out for LS-GAN
x = Convolution1D(filters=1,
kernel_size=3,
strides=2)(x)
# shape [batch_size x 1 x 1]
x = Reshape((1,))(x)
model = Model(inputs=[pls_input, fft_input], outputs=[x, peek_output],
name="discriminator")
return model