本文整理汇总了Python中tflearn.layers.conv.max_pool_2d方法的典型用法代码示例。如果您正苦于以下问题:Python conv.max_pool_2d方法的具体用法?Python conv.max_pool_2d怎么用?Python conv.max_pool_2d使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tflearn.layers.conv
的用法示例。
在下文中一共展示了conv.max_pool_2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: reduction_block_a
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def reduction_block_a(reduction_input_a):
reduction_a_conv1_1_1 = conv_2d(reduction_input_a,384,3,strides=2,padding='valid',activation='relu',name='reduction_a_conv1_1_1')
reduction_a_conv2_1_1 = conv_2d(reduction_input_a,192,1,activation='relu',name='reduction_a_conv2_1_1')
reduction_a_conv2_3_3 = conv_2d(reduction_a_conv2_1_1,224,3,activation='relu',name='reduction_a_conv2_3_3')
reduction_a_conv2_3_3_s2 = conv_2d(reduction_a_conv2_3_3,256,3,strides=2,padding='valid',activation='relu',name='reduction_a_conv2_3_3_s2')
reduction_a_pool = max_pool_2d(reduction_input_a,strides=2,padding='valid',kernel_size=3,name='reduction_a_pool')
# merge reduction_a
reduction_a = merge([reduction_a_conv1_1_1,reduction_a_conv2_3_3_s2,reduction_a_pool],mode='concat',axis=3)
return reduction_a
################################################################################
# InceptionV4 : definition of inception_block_b
示例2: reduction_block_b
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def reduction_block_b(reduction_input_b):
reduction_b_1_1 = conv_2d(reduction_input_b,192,1,activation='relu',name='reduction_b_1_1')
reduction_b_1_3 = conv_2d(reduction_b_1_1,192,3,strides=2,padding='valid',name='reduction_b_1_3')
reduction_b_3_3_reduce = conv_2d(reduction_input_b, 256, filter_size=1, activation='relu', name='reduction_b_3_3_reduce')
reduction_b_3_3_asym_1 = conv_2d(reduction_b_3_3_reduce, 256, filter_size=[1,7], activation='relu',name='reduction_b_3_3_asym_1')
reduction_b_3_3_asym_2 = conv_2d(reduction_b_3_3_asym_1, 320, filter_size=[7,1], activation='relu',name='reduction_b_3_3_asym_2')
reduction_b_3_3=conv_2d(reduction_b_3_3_asym_2,320,3,strides=2,activation='relu',padding='valid',name='reduction_b_3_3')
reduction_b_pool = max_pool_2d(reduction_input_b,kernel_size=3,strides=2,padding='valid')
# merge the reduction_b
reduction_b_output = merge([reduction_b_1_3,reduction_b_3_3,reduction_b_pool],mode='concat',axis=3)
return reduction_b_output
################################################################################
# InceptionV4 : defintion of inception_block_c
示例3: build_network
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def build_network(self):
print("---> Starting Neural Network")
self.network = input_data(shape = [None, 48, 48, 1])
self.network = conv_2d(self.network, 64, 5, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = conv_2d(self.network, 64, 5, activation = 'relu')
self.network = max_pool_2d(self.network, 3, strides = 2)
self.network = conv_2d(self.network, 128, 4, activation = 'relu')
self.network = dropout(self.network, 0.3)
self.network = fully_connected(self.network, 3072, activation = 'relu')
self.network = fully_connected(self.network, len(self.target_classes), activation = 'softmax')
self.network = regression(self.network,
optimizer = 'momentum',
loss = 'categorical_crossentropy')
self.model = tflearn.DNN(
self.network,
checkpoint_path = 'model_1_nimish',
max_checkpoints = 1,
tensorboard_verbose = 2
)
self.load_model()
示例4: alexnet
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def alexnet(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='log')
return model
示例5: alexnet2
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def alexnet2(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='log')
return model
示例6: sentnet_color_2d
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
network = input_data(shape=[None, width, height, 3], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network,
max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例7: alexnet2
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def alexnet2(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例8: alexnet
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def alexnet(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例9: alexnet
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def alexnet(width, height, lr):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 3, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例10: alexnet
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def alexnet(width, height, lr):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 3, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='log')
return model
示例11: make_core_network
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def make_core_network(network):
network = tflearn.reshape(network, [-1, 28, 28, 1], name="reshape")
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
return network
示例12: test_vbs1
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def test_vbs1(self):
with tf.Graph().as_default():
# Data loading and preprocessing
import tflearn.datasets.mnist as mnist
X, Y, testX, testY = mnist.load_data(one_hot=True)
X = X.reshape([-1, 28, 28, 1])
testX = testX.reshape([-1, 28, 28, 1])
X = X[:20, :, :, :]
Y = Y[:20, :]
testX = testX[:10, :, :, :]
testY = testY[:10, :]
# Building convolutional network
network = input_data(shape=[None, 28, 28, 1], name='input')
network = conv_2d(network, 32, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = conv_2d(network, 64, 3, activation='relu', regularizer="L2")
network = max_pool_2d(network, 2)
network = local_response_normalization(network)
network = fully_connected(network, 128, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 256, activation='tanh')
network = dropout(network, 0.8)
network = fully_connected(network, 10, activation='softmax')
network = regression(network, optimizer='adam', learning_rate=0.01,
loss='categorical_crossentropy', name='target')
# Training
model = tflearn.DNN(network, tensorboard_verbose=3)
model.fit({'input': X}, {'target': Y}, n_epoch=1,
batch_size=10,
validation_set=({'input': testX}, {'target': testY}),
validation_batch_size=5,
snapshot_step=10, show_metric=True, run_id='convnet_mnist_vbs')
self.assertEqual(model.train_ops[0].validation_batch_size, 5)
self.assertEqual(model.train_ops[0].batch_size, 10)
示例13: model_for_type
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def model_for_type(neural_net_type, tile_size, on_band_count):
"""The neural_net_type can be: one_layer_relu,
one_layer_relu_conv,
two_layer_relu_conv."""
network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])
# NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
if neural_net_type == 'one_layer_relu':
network = tflearn.fully_connected(network, 64, activation='relu')
elif neural_net_type == 'one_layer_relu_conv':
network = conv_2d(network, 64, 12, strides=4, activation='relu')
network = max_pool_2d(network, 3)
elif neural_net_type == 'two_layer_relu_conv':
network = conv_2d(network, 64, 12, strides=4, activation='relu')
network = max_pool_2d(network, 3)
network = conv_2d(network, 128, 4, activation='relu')
else:
print("ERROR: exiting, unknown layer type for neural net")
# classify as road or not road
softmax = tflearn.fully_connected(network, 2, activation='softmax')
# hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
momentum = tflearn.optimizers.Momentum(
learning_rate=.005, momentum=0.9,
lr_decay=0.0002, name='Momentum')
net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy')
return tflearn.DNN(net, tensorboard_verbose=0)
示例14: build_network
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def build_network(self):
# Smaller 'AlexNet'
# https://github.com/tflearn/tflearn/blob/master/examples/images/alexnet.py
print('[+] Building CNN')
self.network = input_data(shape=[None, SIZE_FACE, SIZE_FACE, 1])
self.network = conv_2d(self.network, 64, 5, activation='relu')
#self.network = local_response_normalization(self.network)
self.network = max_pool_2d(self.network, 3, strides=2)
self.network = conv_2d(self.network, 64, 5, activation='relu')
self.network = max_pool_2d(self.network, 3, strides=2)
self.network = conv_2d(self.network, 128, 4, activation='relu')
self.network = dropout(self.network, 0.3)
self.network = fully_connected(self.network, 3072, activation='relu')
self.network = fully_connected(
self.network, len(EMOTIONS), activation='softmax')
self.network = regression(
self.network,
optimizer='momentum',
loss='categorical_crossentropy'
)
self.model = tflearn.DNN(
self.network,
checkpoint_path=SAVE_DIRECTORY + '/emotion_recognition',
max_checkpoints=1,
tensorboard_verbose=2
)
self.load_model()
示例15: construct_inceptionv4onfire
# 需要导入模块: from tflearn.layers import conv [as 别名]
# 或者: from tflearn.layers.conv import max_pool_2d [as 别名]
def construct_inceptionv4onfire(x,y, training=True, enable_batch_norm=True):
network = input_data(shape=[None, y, x, 3])
#stem of inceptionV4
conv1_3_3 = conv_2d(network,32,3,strides=2,activation='relu',name='conv1_3_3_s2',padding='valid')
conv2_3_3 = conv_2d(conv1_3_3,32,3,activation='relu',name='conv2_3_3')
conv3_3_3 = conv_2d(conv2_3_3,64,3,activation='relu',name='conv3_3_3')
b_conv_1_pool = max_pool_2d(conv3_3_3,kernel_size=3,strides=2,padding='valid',name='b_conv_1_pool')
if enable_batch_norm:
b_conv_1_pool = batch_normalization(b_conv_1_pool)
b_conv_1_conv = conv_2d(conv3_3_3,96,3,strides=2,padding='valid',activation='relu',name='b_conv_1_conv')
b_conv_1 = merge([b_conv_1_conv,b_conv_1_pool],mode='concat',axis=3)
b_conv4_1_1 = conv_2d(b_conv_1,64,1,activation='relu',name='conv4_3_3')
b_conv4_3_3 = conv_2d(b_conv4_1_1,96,3,padding='valid',activation='relu',name='conv5_3_3')
b_conv4_1_1_reduce = conv_2d(b_conv_1,64,1,activation='relu',name='b_conv4_1_1_reduce')
b_conv4_1_7 = conv_2d(b_conv4_1_1_reduce,64,[1,7],activation='relu',name='b_conv4_1_7')
b_conv4_7_1 = conv_2d(b_conv4_1_7,64,[7,1],activation='relu',name='b_conv4_7_1')
b_conv4_3_3_v = conv_2d(b_conv4_7_1,96,3,padding='valid',name='b_conv4_3_3_v')
b_conv_4 = merge([b_conv4_3_3_v, b_conv4_3_3],mode='concat',axis=3)
b_conv5_3_3 = conv_2d(b_conv_4,192,3,padding='valid',activation='relu',name='b_conv5_3_3',strides=2)
b_pool5_3_3 = max_pool_2d(b_conv_4,kernel_size=3,padding='valid',strides=2,name='b_pool5_3_3')
if enable_batch_norm:
b_pool5_3_3 = batch_normalization(b_pool5_3_3)
b_conv_5 = merge([b_conv5_3_3,b_pool5_3_3],mode='concat',axis=3)
net = b_conv_5
# inceptionV4 modules
net=inception_block_a(net)
net=inception_block_b(net)
net=inception_block_c(net)
pool5_7_7=global_avg_pool(net)
if(training):
pool5_7_7=dropout(pool5_7_7,0.4)
loss = fully_connected(pool5_7_7, 2,activation='softmax')
if(training):
network = regression(loss, optimizer='rmsprop',
loss='categorical_crossentropy',
learning_rate=0.001)
else:
network=loss
model = tflearn.DNN(network, checkpoint_path='inceptionv4onfire',
max_checkpoints=1, tensorboard_verbose=0)
return model
################################################################################