本文整理汇总了Python中tflearn.data_utils.to_categorical函数的典型用法代码示例。如果您正苦于以下问题:Python to_categorical函数的具体用法?Python to_categorical怎么用?Python to_categorical使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了to_categorical函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: do_rnn
def do_rnn(trainX, testX, trainY, testY):
max_document_length=64
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=64)
net = tflearn.lstm(net, 64, dropout=0.1)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0,tensorboard_dir="dga_log")
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="dga",n_epoch=1)
y_predict_list = model.predict(testX)
#print y_predict_list
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
示例2: do_cnn_doc2vec
def do_cnn_doc2vec(trainX, testX, trainY, testY):
global max_features
print "CNN and doc2vec"
#trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
#testX = pad_sequences(testX, maxlen=max_features, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None,max_features], name='input')
network = tflearn.embedding(network, input_dim=1000000, output_dim=128,validate_indices=False)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY,
n_epoch=5, shuffle=True, validation_set=(testX, testY),
show_metric=True, batch_size=100,run_id="review")
示例3: do_cnn
def do_cnn(trainX, trainY,testX, testY):
global n_words
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None, MAX_DOCUMENT_LENGTH], name='input')
network = tflearn.embedding(network, input_dim=n_words+1, output_dim=128)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.5)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY, n_epoch = 20, shuffle=True, validation_set=(testX, testY), show_metric=True, batch_size=32)
示例4: do_rnn
def do_rnn(trainX, testX, trainY, testY):
global n_words
# Data preprocessing
# Sequence padding
print "GET n_words embedding %d" % n_words
trainX = pad_sequences(trainX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
testX = pad_sequences(testX, maxlen=MAX_DOCUMENT_LENGTH, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, MAX_DOCUMENT_LENGTH])
net = tflearn.embedding(net, input_dim=n_words, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32,run_id="maidou")
示例5: do_rnn
def do_rnn(x_train,x_test,y_train,y_test):
global n_words
# Data preprocessing
# Sequence padding
print "GET n_words embedding %d" % n_words
#x_train = pad_sequences(x_train, maxlen=100, value=0.)
#x_test = pad_sequences(x_test, maxlen=100, value=0.)
# Converting labels to binary vectors
y_train = to_categorical(y_train, nb_classes=2)
y_test = to_categorical(y_test, nb_classes=2)
# Network building
net = tflearn.input_data(shape=[None, 100,n_words])
net = tflearn.lstm(net, 10, return_seq=True)
net = tflearn.lstm(net, 10, )
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,name="output",
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(x_train, y_train, validation_set=(x_test, y_test), show_metric=True,
batch_size=32,run_id="maidou")
示例6: do_rnn
def do_rnn(x,y):
global max_document_length
print "RNN"
trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True,
batch_size=10,run_id="webshell",n_epoch=5)
y_predict_list=model.predict(testX)
y_predict=[]
for i in y_predict_list:
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
do_metrics(y_test, y_predict)
示例7: do_cnn
def do_cnn(x,y):
global max_document_length
print "CNN and tf"
trainX, testX, trainY, testY = train_test_split(x, y, test_size=0.4, random_state=0)
y_test=testY
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None,max_document_length], name='input')
network = tflearn.embedding(network, input_dim=1000000, output_dim=128)
branch1 = conv_1d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_1d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_1d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool(network)
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
#if not os.path.exists(pkl_file):
# Training
model.fit(trainX, trainY,
n_epoch=5, shuffle=True, validation_set=0.1,
show_metric=True, batch_size=100,run_id="webshell")
# model.save(pkl_file)
#else:
# model.load(pkl_file)
y_predict_list=model.predict(testX)
#y_predict = list(model.predict(testX,as_iterable=True))
y_predict=[]
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print 'y_predict_list:'
print y_predict_list
print 'y_predict:'
print y_predict
#print y_test
do_metrics(y_test, y_predict)
示例8: process_form_data
def process_form_data(filename) :
data = h5py.File(filename, 'r')
output = h5py.File('forms_out.h5', 'w')
test_image = output.create_dataset('test_image', (330, 3, 256, 256), dtype=np.uint8)
train_image = output.create_dataset('train_image', (770, 3, 256, 256), dtype=np.uint8)
test_label = output.create_dataset('test_label', (330,11), dtype=np.int8)
train_label = output.create_dataset('train_label', (770,11), dtype=np.int8)
image, labels = shuffle(data['image'], data['form'])
onehot_labels = to_categorical(labels, 11)
count = {}
train_count = 0
test_count = 0
for i, l in enumerate(labels) :
if l not in count :
count[l] = 0
if count[l] > 29 :
train_image[train_count] = image[i]
train_label[train_count] = onehot_labels[i]
train_count += 1
else :
test_image[test_count] = image[i]
test_label[test_count] = onehot_labels[i]
test_count += 1
count[l] += 1
output.close()
示例9: do_rnn
def do_rnn(trainX, testX, trainY, testY):
global max_sequences_len
global max_sys_call
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=max_sequences_len, value=0.)
testX = pad_sequences(testX, maxlen=max_sequences_len, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY_old=testY
testY = to_categorical(testY, nb_classes=2)
# Network building
print "GET max_sequences_len embedding %d" % max_sequences_len
print "GET max_sys_call embedding %d" % max_sys_call
net = tflearn.input_data([None, max_sequences_len])
net = tflearn.embedding(net, input_dim=max_sys_call+1, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.3)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.1,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=3)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32,run_id="maidou")
y_predict_list = model.predict(testX)
#print y_predict_list
y_predict = []
for i in y_predict_list:
#print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
#y_predict=to_categorical(y_predict, nb_classes=2)
print(classification_report(testY_old, y_predict))
print metrics.confusion_matrix(testY_old, y_predict)
示例10: do_cnn_word2vec_2d
def do_cnn_word2vec_2d(trainX, testX, trainY, testY):
global max_features
global max_document_length
print "CNN and word2vec2d"
y_test = testY
#trainX = pad_sequences(trainX, maxlen=max_features, value=0.)
#testX = pad_sequences(testX, maxlen=max_features, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None,max_document_length,max_features,1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY,
n_epoch=5, shuffle=True, validation_set=(testX, testY),
show_metric=True,run_id="sms")
y_predict_list = model.predict(testX)
print y_predict_list
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
示例11: bi_lstm
def bi_lstm(trainX, trainY,testX, testY):
trainX = pad_sequences(trainX, maxlen=200, value=0.)
testX = pad_sequences(testX, maxlen=200, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data(shape=[None, 200])
net = tflearn.embedding(net, input_dim=20000, output_dim=128)
net = tflearn.bidirectional_rnn(net, BasicLSTMCell(128), BasicLSTMCell(128))
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, clip_gradients=0., tensorboard_verbose=2)
model.fit(trainX, trainY, validation_set=0.1, show_metric=True, batch_size=64,run_id="rnn-bilstm")
示例12: do_cnn_word2vec_2d_345
def do_cnn_word2vec_2d_345(trainX, testX, trainY, testY):
global max_features
global max_document_length
print "CNN and word2vec_2d_345"
y_test = testY
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Building convolutional network
network = input_data(shape=[None,max_document_length,max_features,1], name='input')
network = tflearn.embedding(network, input_dim=1, output_dim=128,validate_indices=False)
branch1 = conv_2d(network, 128, 3, padding='valid', activation='relu', regularizer="L2")
branch2 = conv_2d(network, 128, 4, padding='valid', activation='relu', regularizer="L2")
branch3 = conv_2d(network, 128, 5, padding='valid', activation='relu', regularizer="L2")
network = merge([branch1, branch2, branch3], mode='concat', axis=1)
network = tf.expand_dims(network, 2)
network = global_max_pool_2d(network)
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit(trainX, trainY,
n_epoch=5, shuffle=True, validation_set=(testX, testY),
show_metric=True, batch_size=100,run_id="sms")
y_predict_list = model.predict(testX)
print y_predict_list
y_predict = []
for i in y_predict_list:
print i[0]
if i[0] > 0.5:
y_predict.append(0)
else:
y_predict.append(1)
print(classification_report(y_test, y_predict))
print metrics.confusion_matrix(y_test, y_predict)
示例13: lstm
def lstm(trainX, trainY,testX, testY):
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=100, value=0.)
testX = pad_sequences(testX, maxlen=100, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, 100])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=32,run_id="rnn-lstm")
示例14: do_rnn_wordbag
def do_rnn_wordbag(trainX, testX, trainY, testY):
global max_document_length
print "RNN and wordbag"
trainX = pad_sequences(trainX, maxlen=max_document_length, value=0.)
testX = pad_sequences(testX, maxlen=max_document_length, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
# Network building
net = tflearn.input_data([None, max_document_length])
net = tflearn.embedding(net, input_dim=10240000, output_dim=128)
net = tflearn.lstm(net, 128, dropout=0.8)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy')
# Training
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(trainX, trainY, validation_set=(testX, testY), show_metric=True,
batch_size=10,run_id="review",n_epoch=5)
示例15: create_datasets
def create_datasets(file_path, vocab_size=30000, val_fraction=0.0):
# IMDB Dataset loading
train, test, _ = imdb.load_data(
path=file_path,
n_words=vocab_size,
valid_portion=val_fraction,
sort_by_len=False)
trainX, trainY = train
testX, testY = test
# Data preprocessing
# Sequence padding
trainX = pad_sequences(trainX, maxlen=FLAGS.max_len, value=0.)
testX = pad_sequences(testX, maxlen=FLAGS.max_len, value=0.)
# Converting labels to binary vectors
trainY = to_categorical(trainY, nb_classes=2)
testY = to_categorical(testY, nb_classes=2)
train_dataset = DataSet(trainX, trainY)
return train_dataset