本文整理汇总了Python中keras.models.Sequential.predict_classes方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.predict_classes方法的具体用法?Python Sequential.predict_classes怎么用?Python Sequential.predict_classes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.models.Sequential
的用法示例。
在下文中一共展示了Sequential.predict_classes方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_nested_sequential
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def test_nested_sequential(in_tmpdir):
(x_train, y_train), (x_test, y_test) = _get_test_data()
inner = Sequential()
inner.add(Dense(num_hidden, input_shape=(input_dim,)))
inner.add(Activation('relu'))
inner.add(Dense(num_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test))
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=2, validation_split=0.1)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=0)
model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, shuffle=False)
model.train_on_batch(x_train[:32], y_train[:32])
loss = model.evaluate(x_test, y_test, verbose=0)
model.predict(x_test, verbose=0)
model.predict_classes(x_test, verbose=0)
model.predict_proba(x_test, verbose=0)
fname = 'test_nested_sequential_temp.h5'
model.save_weights(fname, overwrite=True)
inner = Sequential()
inner.add(Dense(num_hidden, input_shape=(input_dim,)))
inner.add(Activation('relu'))
inner.add(Dense(num_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(x_test, y_test, verbose=0)
assert(loss == nloss)
# test serialization
config = model.get_config()
Sequential.from_config(config)
model.summary()
json_str = model.to_json()
model_from_json(json_str)
yaml_str = model.to_yaml()
model_from_yaml(yaml_str)
示例2: test_nested_sequential
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def test_nested_sequential():
(X_train, y_train), (X_test, y_test) = _get_test_data()
inner = Sequential()
inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
inner.add(Activation("relu"))
inner.add(Dense(nb_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_test, y_test, verbose=0)
model.predict(X_test, verbose=0)
model.predict_classes(X_test, verbose=0)
model.predict_proba(X_test, verbose=0)
fname = "test_nested_sequential_temp.h5"
model.save_weights(fname, overwrite=True)
inner = Sequential()
inner.add(Dense(nb_hidden, input_shape=(input_dim,)))
inner.add(Activation("relu"))
inner.add(Dense(nb_class))
middle = Sequential()
middle.add(inner)
model = Sequential()
model.add(middle)
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(X_test, y_test, verbose=0)
assert loss == nloss
# test serialization
config = model.get_config()
new_model = Sequential.from_config(config)
model.summary()
json_str = model.to_json()
new_model = model_from_json(json_str)
yaml_str = model.to_yaml()
new_model = model_from_yaml(yaml_str)
示例3: test_merge_overlap
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def test_merge_overlap():
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, left], mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_train, y_train, verbose=0)
assert(loss < 0.7)
model.predict(X_test, verbose=0)
model.predict_classes(X_test, verbose=0)
model.predict_proba(X_test, verbose=0)
model.get_config(verbose=0)
fname = 'test_merge_overlap_temp.h5'
model.save_weights(fname, overwrite=True)
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(X_train, y_train, verbose=0)
assert(loss == nloss)
示例4: test_merge_sum
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def test_merge_sum():
(X_train, y_train), (X_test, y_test) = _get_test_data()
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
loss = model.evaluate([X_test, X_test], y_test, verbose=0)
model.predict([X_test, X_test], verbose=0)
model.predict_classes([X_test, X_test], verbose=0)
model.predict_proba([X_test, X_test], verbose=0)
# test weight saving
fname = 'test_merge_sum_temp.h5'
model.save_weights(fname, overwrite=True)
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.load_weights(fname)
os.remove(fname)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
assert(loss == nloss)
# test serialization
config = model.get_config()
Sequential.from_config(config)
model.summary()
json_str = model.to_json()
model_from_json(json_str)
yaml_str = model.to_yaml()
model_from_yaml(yaml_str)
示例5: test_merge_concat
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def test_merge_concat(self):
print('Test merge: concat')
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
loss = model.evaluate([X_train, X_train], y_train, verbose=0)
print('loss:', loss)
if loss > 0.7:
raise Exception('Score too low, learning issue.')
model.predict([X_test, X_test], verbose=0)
model.predict_classes([X_test, X_test], verbose=0)
model.predict_proba([X_test, X_test], verbose=0)
model.get_config(verbose=0)
print('test weight saving')
fname = 'test_merge_concat_temp.h5'
model.save_weights(fname, overwrite=True)
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Merge([left, right], mode='concat'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate([X_train, X_train], y_train, verbose=0)
assert(loss == nloss)
示例6: test_siamese_1
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def test_siamese_1():
(X_train, y_train), (X_test, y_test) = _get_test_data()
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_data=([X_test, X_test], y_test))
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=0, validation_split=0.1)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit([X_train, X_train], y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0, shuffle=False)
loss = model.evaluate([X_test, X_test], y_test, verbose=0)
assert(loss < 0.8)
model.predict([X_test, X_test], verbose=0)
model.predict_classes([X_test, X_test], verbose=0)
model.predict_proba([X_test, X_test], verbose=0)
model.get_config(verbose=0)
# test weight saving
fname = 'test_siamese_1.h5'
model.save_weights(fname, overwrite=True)
left = Sequential()
left.add(Dense(nb_hidden, input_shape=(input_dim,)))
left.add(Activation('relu'))
right = Sequential()
right.add(Dense(nb_hidden, input_shape=(input_dim,)))
right.add(Activation('relu'))
model = Sequential()
model.add(Siamese(Dense(nb_hidden), [left, right], merge_mode='sum'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.load_weights(fname)
os.remove(fname)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
nloss = model.evaluate([X_test, X_test], y_test, verbose=0)
assert(loss == nloss)
示例7: evaluate_keras_classification_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def evaluate_keras_classification_model(X_train, X_test, y_train, y_test):
X_train = X_train.astype(theano.config.floatX)
X_test = X_test.astype(theano.config.floatX)
print("First 3 labels: %s" % y_train[:3])
y_train_ohe = np_utils.to_categorical(y_train)
print('\nFirst 3 labels (one-hot):\n', y_train_ohe[:3])
model = Sequential()
model.add(Dense(
input_dim=X_train.shape[1],
output_dim=50,
init='uniform',
activation='tanh',
))
model.add(Dense(
input_dim=50,
output_dim=50,
init='uniform',
activation='tanh',
))
model.add(Dense(
input_dim=50,
output_dim=y_train_ohe.shape[1],
init='uniform',
activation='softmax',
))
sgd = SGD(lr=0.001, decay=1e-7, momentum=0.9)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
model.fit(
X_train,
y_train_ohe,
nb_epoch=5,
batch_size=300,
verbose=1,
validation_split=0.1,
show_accuracy=True,
)
y_train_pred = model.predict_classes(X_train, verbose=0)
print('First 3 predictions: ', y_train_pred[:3])
train_acc = np.sum(y_train == y_train_pred, axis=0) / X_train.shape[0]
print("Training accuracy: %.2f%%" % (train_acc * 100))
y_test_pred = model.predict_classes(X_test, verbose=0)
test_acc = np.sum(y_test == y_test_pred, axis=0) / X_test.shape[0]
print("Test accuracy: %.2f%%" % (test_acc * 100))
示例8: test_sequential
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def test_sequential(self):
print('Test sequential')
model = Sequential()
model.add(Dense(nb_hidden, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)
model.train_on_batch(X_train[:32], y_train[:32])
loss = model.evaluate(X_train, y_train, verbose=0)
print('loss:', loss)
if loss > 0.7:
raise Exception('Score too low, learning issue.')
model.predict(X_test, verbose=0)
model.predict_classes(X_test, verbose=0)
model.predict_proba(X_test, verbose=0)
model.get_config(verbose=0)
print('test weight saving')
fname = 'test_sequential_temp.h5'
model.save_weights(fname, overwrite=True)
model = Sequential()
model.add(Dense(nb_hidden, input_shape=(input_dim,)))
model.add(Activation('relu'))
model.add(Dense(nb_class))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights(fname)
os.remove(fname)
nloss = model.evaluate(X_train, y_train, verbose=0)
assert(loss == nloss)
# test json serialization
json_data = model.to_json()
model = model_from_json(json_data)
# test yaml serialization
yaml_data = model.to_yaml()
model = model_from_yaml(yaml_data)
示例9: evaluate_conv_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def evaluate_conv_model(dataset, num_classes, maxlen=125,embedding_dims=250,max_features=5000,nb_filter=300,filter_length=3,num_hidden=250,dropout=0.25,verbose=True,pool_length=2,with_lstm=False):
(X_train, Y_train), (X_test, Y_test) = dataset
batch_size = 32
nb_epoch = 5
if verbose:
print('Loading data...')
print(len(X_train), 'train sequences')
print(len(X_test), 'test sequences')
print('Pad sequences (samples x time)')
X_train = sequence.pad_sequences(X_train, maxlen=maxlen)
X_test = sequence.pad_sequences(X_test, maxlen=maxlen)
if verbose:
print('X_train shape:', X_train.shape)
print('X_test shape:', X_test.shape)
print('Build model...')
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model.add(Dropout(dropout))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter=nb_filter,
filter_length=filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
if pool_length:
# we use standard max pooling (halving the output of the previous layer):
model.add(MaxPooling1D(pool_length=2))
if with_lstm:
model.add(LSTM(125))
else:
# We flatten the output of the conv layer,
# so that we can add a vanilla dense layer:
model.add(Flatten())
#We add a vanilla hidden layer:
model.add(Dense(num_hidden))
model.add(Activation('relu'))
model.add(Dropout(dropout))
# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer='adam')
model.fit(X_train, Y_train, batch_size=batch_size,nb_epoch=nb_epoch, show_accuracy=True,validation_split=0.1)
score = model.evaluate(X_test, Y_test, batch_size=batch_size, verbose=1 if verbose else 0, show_accuracy=True)
if verbose:
print('Test score:',score[0])
print('Test accuracy:', score[1])
predictions = model.predict_classes(X_test,verbose=1 if verbose else 0)
return predictions,score[1]
示例10: RNN
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def RNN(self):
featureNum = len(self.X[0]) / 6
X = np.empty((len(self.X), 6, featureNum))
X_test = np.empty((len(self.X_test), 6, featureNum))
self.X = self.X.reshape(len(self.X), featureNum, 6)
self.X_test = self.X_test.reshape(len(self.X_test), featureNum, 6)
for i in range(len(self.X)):
X[i] = self.X[i].transpose()
for i in range(len(self.X_test)):
X_test[i] = self.X_test[i].transpose()
np.random.seed(0)
model = Sequential()
model.add(SimpleRNN(20, batch_input_shape=(None, 6, 28)))
model.add(Dropout(0.1))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
model.fit(X, self.y, verbose=2)
predicted = model.predict_classes(X_test, verbose=0)
# Final evaluation of the model
scores = model.evaluate(X_test, self.expected, verbose=0)
print("Accuracy: %.2f%%" % (scores[1]*100))
self.ptLocal(self.fout, "Classification report for classifier:\n%s", \
(metrics.classification_report(self.expected, predicted)))
self.ptLocal(self.fout, "Confusion matrix:\n%s", \
metrics.confusion_matrix(self.expected, predicted))
self.ptLocal(self.fout, "Random pick successful rate: %.3f\n",\
round(float(sum(self.expected)) / len(self.expected), 3))
示例11: tipdm_chapter5_nn_test
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def tipdm_chapter5_nn_test():
# 参数初始化
filename = '../../../MyFile/chapter5/data/sales_data.xls'
data = pd.read_excel(filename, index_col = u'序号') # 导入数据
# 数据是类别标签,要将它转化为数据形式
# 对于属性“高”、“好”和“是”使用1表示,对于“低”、“坏”和“否”使用-1表示
data[data == u'高'] = 1
data[data == u'是'] = 1
data[data == u'好'] = 1
data[data != 1] = -1
x = data.iloc[:,:3].as_matrix().astype(int)
y = data.iloc[:,3].as_matrix().astype(int)
# model and training
# 三个输入节点,10个隐含节点,一个输出节点
model = Sequential()
model.add(Dense(10, input_dim = 3))
model.add(Activation('relu')) # 用relu作为激活函数,可以大幅提高准确度
model.add(Dense(1, input_dim = 10))
model.add(Activation('sigmoid')) # 由于是0-1输出,用sigmoid函数作为激活函数
# compilation before training : configure the learning process
# 此处为二元分类,所以我们指定损失函数为binary_crossentropy,以及模式为bianry
# 另外常见的损失函数还有mean_squared_error, categorical_crossentropy等
# 求解方法我们指定adam,此外还有sgd,rmsprop等可选
model.compile(loss = 'binary_crossentropy', optimizer = 'adam', class_mode = 'binary')
# training and predict
model.fit(x, y, nb_epoch = 500, batch_size = 10) # 训练模型,学习一千次
yp = model.predict_classes(x).reshape(len(y)) #分类预测
cm_plot(y, yp).show()
示例12: model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def model(df, parent_id, go_id):
# Training
batch_size = 64
nb_epoch = 64
# Split pandas DataFrame
n = len(df)
split = 0.8
m = int(n * split)
train, test = df[:m], df[m:]
# train, test = train_test_split(
# labels, data, batch_size=batch_size)
train_label, train_data = train['labels'], train['data']
if len(train_data) < 100:
raise Exception("No training data for " + go_id)
test_label, test_data = test['labels'], test['data']
test_label_rep = test_label
train_data = train_data.as_matrix()
test_data = test_data.as_matrix()
train_data = numpy.hstack(train_data).reshape(train_data.shape[0], 8000)
test_data = numpy.hstack(test_data).reshape(test_data.shape[0], 8000)
shape = numpy.shape(train_data)
print('X_train shape: ', shape)
print('X_test shape: ', test_data.shape)
model = Sequential()
model.add(Dense(8000, activation='relu', input_dim=8000))
model.add(Highway())
model.add(Dense(1, activation='sigmoid'))
model.compile(
loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
model_path = DATA_ROOT + parent_id + '/' + go_id + '.hdf5'
checkpointer = ModelCheckpoint(
filepath=model_path, verbose=1, save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=7, verbose=1)
model.fit(
X=train_data, y=train_label,
batch_size=batch_size, nb_epoch=nb_epoch,
show_accuracy=True, verbose=1,
validation_split=0.2,
callbacks=[checkpointer, earlystopper])
# Loading saved weights
print 'Loading weights'
model.load_weights(model_path)
pred_data = model.predict_classes(
test_data, batch_size=batch_size)
return classification_report(list(test_label_rep), pred_data)
示例13: bidirectional_lstm
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def bidirectional_lstm(X_train, y_train, X_test, y_test):
X_train = sequence.pad_sequences(X_train, maxlen=max_len)
X_test = sequence.pad_sequences(X_test, maxlen=max_len)
lstm = LSTM(output_dim=64)
gru = GRU(output_dim=64) # original examples was 128, we divide by 2 because results will be concatenated
brnn = Bidirectional(forward=lstm, backward=gru)
print X_train.shape, y_train.shape
print X_test.shape, y_test.shape
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=max_len))
model.add(brnn) # try using another Bidirectional RNN inside the Bidirectional RNN. Inception meets callback hell.
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
# model.compile(loss='binary_crossentropy', optimizer='rmsprop')
print("Train...")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=4, validation_data=(X_test, y_test), show_accuracy=True)
score, acc = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)
pred_labels = model.predict_classes(X_test)
# print pred_labels
accuracy = accuracy_score(y_test, pred_labels)
precision, recall, f1, supp = precision_recall_fscore_support(y_test, pred_labels, average='weighted')
print precision, recall, f1, supp
return accuracy, precision, recall, f1
示例14: __init__
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
class LSTMSentiment:
def __init__(self):
self.in_dim = 500
self.n_prev=25
self.future=50
out_dim = 1
hidden_neurons = 500
self.max_length = 100
max_features = 20000
# Initializing a sequential Model
self.model = Sequential()
self.model.add(Embedding(max_features, 128, input_length=self.max_length))
self.model.add(Dropout(0.2))
#self.model.add(LSTM(output_dim=128,input_dim=500,activation='relu'))
self.model.add(LSTM(128))
self.model.add(Dropout(0.2))
self.model.add(Dense(1))
self.model.add(Activation('linear'))
def configureLSTMModel(self,TrainX,TrainY):
print('Configuring the LSTM Model')
self.model.compile(loss='binary_crossentropy', optimizer='adam',class_mode="binary")
self.model.fit(TrainX, TrainY, nb_epoch=10,batch_size=32, show_accuracy=True,validation_split=0.3)
#,validation_data =(ValidX,ValidY))
def evaluateLSTMModel(self,TestX,TestY):
obj_sc,acc = self.model.evaluate(TestX, TestY, batch_size=32,show_accuracy=True)
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('Objective Score : ',obj_sc)
print('Accuracy : ' ,acc)
def predictSentiment(self,testX):
sentiment = self.model.predict_classes(testX,batch_size=32)
return sentiment
def printSummary(self):
print(self.model.summary())
def getTrainTestData(self):
print('Loading Training and Test data')
trainX=[]
trainY=[]
testX=[]
testY = []
f= open('trainingdata.pkl','rb')
(trainX,trainY) = cPickle.load(f)
f= open('testingdata.pkl','rb')
(testX,testY) = cPickle.load(f)
return ((trainX,trainY),(testX,testY))
示例15: lstm_model
# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import predict_classes [as 别名]
def lstm_model(X_train, y_train, X_test, y_test):
X_train = sequence.pad_sequences(X_train, maxlen=max_len, padding='post')
X_test = sequence.pad_sequences(X_test, maxlen=max_len, padding='post')
print X_train.shape, y_train.shape
print X_test.shape, y_test.shape
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128, input_length=max_len))
model.add(LSTM(128)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# print X_train.shape, y_train.shape
# print X_test.shape, y_test.shape
model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
print("Train...")
model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=4, validation_data=(X_test, y_test), show_accuracy=True)
acc, score = model.evaluate(X_test, y_test, batch_size=batch_size, show_accuracy=True)
print('Test score:', score)
print('Test accuracy:', acc)
pred_labels = model.predict_classes(X_test)
# print pred_labels
accuracy = accuracy_score(y_test, pred_labels)
precision, recall, f1, supp = precision_recall_fscore_support(y_test, pred_labels, average='weighted')
print precision, recall, f1, supp
return accuracy, precision, recall, f1