本文整理汇总了Python中tflearn.layers.core.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dropout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cnn
def cnn():
X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': X}, {'target': Y}, n_epoch=20,
validation_set=({'input': testX}, {'target': testY}),
snapshot_step=100, show_metric=True, run_id='cnn_demo')
示例2: do_cnn_doc2vec_2d
def do_cnn_doc2vec_2d(trainX, testX, trainY, testY):
print "CNN and doc2vec 2d"
trainX = trainX.reshape([-1, max_features, max_document_length, 1])
testX = testX.reshape([-1, max_features, max_document_length, 1])
# Building convolutional network
network = input_data(shape=[None, max_features, max_document_length, 1], name='input')
network = conv_2d(network, 16, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=0)
model.fit({'input': trainX}, {'target': trainY}, n_epoch=20,
validation_set=({'input': testX}, {'target': testY}),
snapshot_step=100, show_metric=True, run_id='review')
示例3: neural_network_model
def neural_network_model(input_size):
"""
Function is to build NN based on the input size
:param input_size: feature size of each observation
:return: tensorflow model
"""
network = input_data(shape=[None, input_size], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, learning_rate=LR, name='targets')
model = tflearn.DNN(network, tensorboard_dir='logs/ann/ann_0')
return model
示例4: alexnet
def alexnet():
X, Y = oxflower17.load_data(one_hot=True, resize_pics=(227, 227))
# Building 'AlexNet'
network = input_data(shape=[None, 227, 227, 3])
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 17, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=0.001)
# Training
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=2)
model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
show_metric=True, batch_size=64, snapshot_step=200,
snapshot_epoch=False, run_id='alexnet')
示例5: neural_network_model
def neural_network_model(input_size):
network = input_data(shape=[None, input_size, 1], name='input')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 512, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 2, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=LR,
loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(network, tensorboard_dir='log')
return model
示例6: _model1
def _model1():
global yTest, img_aug
tf.reset_default_graph()
img_prep = ImagePreprocessing()
img_prep.add_featurewise_zero_center()
img_prep.add_featurewise_stdnorm()
network = input_data(shape=[None, inputSize, inputSize, dim],
name='input',
data_preprocessing=img_prep,
data_augmentation=img_aug)
network = conv_2d(network, 32, 3, strides = 4, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, strides = 2, activation='relu')
network = max_pool_2d(network, 2, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, len(Y[0]), activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.001,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=3)
model.fit(X, Y, n_epoch=epochNum, validation_set=(xTest, yTest),
snapshot_step=500, show_metric=True, batch_size=batchNum, shuffle=True, run_id=_id + 'artClassification')
if modelStore: model.save(_id + '-model.tflearn')
示例7: train_nmf_network
def train_nmf_network(mfcc_array, sdr_array, n_epochs, take):
"""
:param mfcc_array:
:param sdr_array:
:param n_epochs:
:param take:
:return:
"""
with tf.Graph().as_default():
network = input_data(shape=[None, 13, 100, 1])
network = conv_2d(network, 32, [5, 5], activation="relu", regularizer="L2")
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, [5, 5], activation="relu", regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 128, activation="relu")
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation="relu")
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation="linear")
regress = tflearn.regression(network, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)
# Training
model = tflearn.DNN(regress) # , session=sess)
model.fit(
mfcc_array,
sdr_array,
n_epoch=n_epochs,
snapshot_step=1000,
show_metric=True,
run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
)
return model
示例8: train_repet_network
def train_repet_network(beat_spectrum_array, sdr_array, n_epochs, take):
"""
:param beat_spectrum_array:
:param sdr_array:
:param n_epochs:
:param take:
:return:
"""
beat_spec_len = 432
with tf.Graph().as_default():
input_layer = input_data(shape=[None, beat_spec_len, 1])
conv1 = conv_1d(input_layer, 32, 4, activation="relu", regularizer="L2")
max_pool1 = max_pool_1d(conv1, 2)
conv2 = conv_1d(max_pool1, 64, 80, activation="relu", regularizer="L2")
max_pool2 = max_pool_1d(conv2, 2)
fully1 = fully_connected(max_pool2, 128, activation="relu")
dropout1 = dropout(fully1, 0.8)
fully2 = fully_connected(dropout1, 256, activation="relu")
dropout2 = dropout(fully2, 0.8)
linear = fully_connected(dropout2, 1, activation="linear")
regress = tflearn.regression(linear, optimizer="rmsprop", loss="mean_square", learning_rate=0.001)
# Training
model = tflearn.DNN(regress) # , session=sess)
model.fit(
beat_spectrum_array,
sdr_array,
n_epoch=n_epochs,
snapshot_step=1000,
show_metric=True,
run_id="repet_choice_{0}_epochs_take_{1}".format(n_epochs, take),
)
return model
示例9: alexnet
def alexnet(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例10: build_network
def build_network(self):
# Building 'AlexNet'
# https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
# https://github.com/DT42/squeezenet_demo
# https://github.com/yhenon/pysqueezenet/blob/master/squeezenet.py
print('[+] Building CNN')
self.network = input_data(shape = [None, SIZE_FACE, SIZE_FACE, 1])
self.network = conv_2d(self.network, 96, 11, strides = 4, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = local_response_normalization(self.network)
self.network = conv_2d(self.network, 256, 5, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = local_response_normalization(self.network)
self.network = conv_2d(self.network, 256, 3, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = local_response_normalization(self.network)
self.network = fully_connected(self.network, 1024, activation = 'tanh')
self.network = dropout(self.network, 0.5)
self.network = fully_connected(self.network, 1024, activation = 'tanh')
self.network = dropout(self.network, 0.5)
self.network = fully_connected(self.network, len(EMOTIONS), activation = 'softmax')
self.network = regression(self.network,
optimizer = 'momentum',
loss = 'categorical_crossentropy')
self.model = tflearn.DNN(
self.network,
checkpoint_path = SAVE_DIRECTORY + '/alexnet_mood_recognition',
max_checkpoints = 1,
tensorboard_verbose = 2
)
self.load_model()
示例11: main
def main():
pickle_folder = '../pickles_rolloff'
pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
pickle_folders_to_load = sorted(pickle_folders_to_load)
# pickle parameters
fg_or_bg = 'background'
sdr_type = 'sdr'
feature = 'sim_mat'
beat_spec_len = 432
# training params
n_classes = 16
training_percent = 0.85
testing_percent = 0.15
validation_percent = 0.00
# set up training, testing, & validation partitions
print('Loading sim_mat and sdrs')
sim_mat_array, sdr_array = get_generated_data(feature, fg_or_bg, sdr_type)
print('sim_mat and sdrs loaded')
print('splitting and grooming data')
train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
testing_percent, validation_percent)
trainX = np.expand_dims([sim_mat_array[i] for i in train], -1)
trainY = np.expand_dims([sdr_array[i] for i in train], -1)
testX = np.expand_dims([sim_mat_array[i] for i in test], -1)
testY = np.array([sdr_array[i] for i in test])
print('setting up CNN')
# Building convolutional network
network = input_data(shape=[None, beat_spec_len, beat_spec_len, 1])
network = conv_2d(network, 32, 10, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = conv_2d(network, 64, 20, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation='linear')
regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)
print('running CNN')
# Training
model = tflearn.DNN(regress, tensorboard_verbose=1)
model.fit(trainX, trainY, n_epoch=10,
snapshot_step=1000, show_metric=True, run_id='{} classes'.format(n_classes - 1))
predicted = np.array(model.predict(testX))[:,0]
print('plotting')
plot(testY, predicted)
示例12: build_network
def build_network():
network = tflearn.input_data(shape=[None, 2])
network = tflearn.fully_connected(network, 64, activation='relu')
network = dropout(network, 0.9)
network = tflearn.fully_connected(network, 128, activation='relu')
network = dropout(network, 0.9)
network = tflearn.fully_connected(network, 2, activation='softmax')
network = tflearn.regression(network, optimizer='sgd', learning_rate=0.1,
loss='categorical_crossentropy')
return network
示例13: main
def main():
pickle_folder = '../pickles_rolloff'
pickle_folders_to_load = [f for f in os.listdir(pickle_folder) if os.path.isdir(join(pickle_folder, f))]
pickle_folders_to_load = sorted(pickle_folders_to_load)
# pickle parameters
fg_or_bg = 'background'
sdr_type = 'sdr'
feature = 'beat_spec'
beat_spec_len = 432
# training params
n_classes = 16
training_percent = 0.85
testing_percent = 0.15
validation_percent = 0.00
# set up training, testing, & validation partitions
beat_spec_array, sdr_array = load_beat_spec_and_sdrs(pickle_folders_to_load, pickle_folder,
feature, fg_or_bg, sdr_type)
train, test, validate = split_into_sets(len(pickle_folders_to_load), training_percent,
testing_percent, validation_percent)
trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
trainY = np.expand_dims([sdr_array[i] for i in train], -1)
testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
testY = np.array([sdr_array[i] for i in test])
# Building convolutional network
network = input_data(shape=[None, beat_spec_len, 1])
network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation='linear')
regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)
# Training
model = tflearn.DNN(regress, tensorboard_verbose=1)
model.fit(trainX, trainY, n_epoch=100,
snapshot_step=1000, show_metric=True, run_id='relus_100_3')
predicted = np.array(model.predict(testX))[:,0]
# pprint.pprint()
print("Test MSE: ", np.square(testY - predicted).mean())
plot(testY, predicted)
示例14: build_model_1_conv
def build_model_1_conv(learning_rate, input_shape, nb_classes, base_path , drop):
network = input_data(shape=input_shape, name='input')
network = conv_2d(network, 64, [4, 16], activation='relu')
network = fully_connected(network, 128, activation='relu')
network = dropout(network, drop)
network = fully_connected(network, 64, activation='relu')
network = dropout(network, drop)
network = fully_connected(network, nb_classes, activation='softmax')
network = regression(network, optimizer='sgd', learning_rate=learning_rate,
loss='categorical_crossentropy', name='target')
model = tflearn.DNN(network, tensorboard_verbose=3, tensorboard_dir=base_path + "/tflearn_logs/",
checkpoint_path=base_path + "/checkpoints/step")
return model
示例15: main
def main():
pickle_folder = 'pickles_combined'
# pickle parameters
fg_or_bg = 'background'
sdr_type = 'sdr'
feature = 'beat_spec'
# training params
training_percent = 0.85
testing_percent = 0.15
validation_percent = 0.00
beat_spec_max = 355
# set up training, testing, & validation partitions
beat_spec_array, sdr_array = unpickle_beat_spec_and_sdrs(pickle_folder, beat_spec_max)
train, test, validate = split_into_sets(len(beat_spec_array), training_percent,
testing_percent, validation_percent)
trainX = np.expand_dims([beat_spec_array[i] for i in train], -1)
trainY = np.expand_dims([sdr_array[i] for i in train], -1)
testX = np.expand_dims([beat_spec_array[i] for i in test], -1)
testY = np.array([sdr_array[i] for i in test])
# Building convolutional network
network = input_data(shape=[None, beat_spec_max, 1])
network = conv_1d(network, 32, 4, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = conv_1d(network, 64, 80, activation='relu', regularizer="L2")
network = max_pool_1d(network, 2)
network = fully_connected(network, 128, activation='relu')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='relu') # look for non-tanh things???
network = dropout(network, 0.8)
network = fully_connected(network, 1, activation='linear')
regress = tflearn.regression(network, optimizer='sgd', loss='mean_square', learning_rate=0.01)
start = time.time()
# Training
model = tflearn.DNN(regress, tensorboard_verbose=1)
model.fit(trainX, trainY, n_epoch=2000,
snapshot_step=1000, show_metric=True, run_id='mir1k_2000_truncate')
elapsed = (time.time() - start)
predicted = np.array(model.predict(testX))[:,0]
print("Test MSE: ", np.square(testY - predicted).mean())
print(elapsed, "seconds")
plot(testY, predicted)