本文整理汇总了Python中tflearn.conv_2d方法的典型用法代码示例。如果您正苦于以下问题:Python tflearn.conv_2d方法的具体用法?Python tflearn.conv_2d怎么用?Python tflearn.conv_2d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tflearn
的用法示例。
在下文中一共展示了tflearn.conv_2d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: resnext
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def resnext(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
net = input_data(shape=[None, width, height, 3], name='input')
net = tflearn.conv_2d(net, 16, 3, regularizer='L2', weight_decay=0.0001)
net = tflearn.layers.conv.resnext_block(net, n, 16, 32)
net = tflearn.resnext_block(net, 1, 32, 32, downsample=True)
net = tflearn.resnext_block(net, n-1, 32, 32)
net = tflearn.resnext_block(net, 1, 64, 32, downsample=True)
net = tflearn.resnext_block(net, n-1, 64, 32)
net = tflearn.batch_normalization(net)
net = tflearn.activation(net, 'relu')
net = tflearn.global_avg_pool(net)
# Regression
net = tflearn.fully_connected(net, output, activation='softmax')
opt = tflearn.Momentum(0.1, lr_decay=0.1, decay_step=32000, staircase=True)
net = tflearn.regression(net, optimizer=opt,
loss='categorical_crossentropy')
model = tflearn.DNN(net,
max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例2: create_actor_network
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def create_actor_network(self):
with tf.variable_scope('actor'):
inputs = tflearn.input_data(
shape=[None, self.s_dim[0], self.s_dim[1]])
_input = tf.expand_dims(inputs, -1)
merge_net = tflearn.conv_2d(
_input, FEATURE_NUM, KERNEL, activation='relu')
merge_net = tflearn.conv_2d(
merge_net, FEATURE_NUM, KERNEL, activation='relu')
avg_net = tflearn.global_avg_pool(merge_net)
out = tflearn.fully_connected(
avg_net, self.a_dim, activation='softmax')
return inputs, out
示例3: create_critic_network
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def create_critic_network(self):
with tf.variable_scope('critic'):
inputs = tflearn.input_data(
shape=[None, self.s_dim[0], self.s_dim[1]])
_input = tf.expand_dims(inputs, -1)
merge_net = tflearn.conv_2d(
_input, FEATURE_NUM, KERNEL, activation='relu')
merge_net = tflearn.conv_2d(
merge_net, FEATURE_NUM, KERNEL, activation='relu')
avg_net = tflearn.global_avg_pool(merge_net)
# dense_net_0 = tflearn.fully_connected(
# merge_net, 64, activation='relu')
#dense_net_0 = tflearn.dropout(dense_net_0, 0.8)
out = tflearn.fully_connected(avg_net, 1, activation='linear')
return inputs, out
示例4: vgg16
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def vgg16(input, num_class):
network = tflearn.conv_2d(
input, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.avg_pool_2d(network, 2)
network = tflearn.conv_2d(
network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.avg_pool_2d(network, 2)
network = tflearn.conv_2d(
network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.avg_pool_2d(network, 2)
network = tflearn.conv_2d(
network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.avg_pool_2d(network, 2)
x = tflearn.fully_connected(
network, num_class, activation='sigmoid', scope='fc8')
return x
示例5: CNN_Core
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def CNN_Core(x, reuse=False):
with tf.variable_scope('cnn_core', reuse=reuse):
network = tflearn.conv_2d(
x, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 2)
network = tflearn.conv_2d(
network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 2)
network = tflearn.conv_2d(
network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 2)
network = tflearn.conv_2d(
network, KERNEL // 2, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
# network = tflearn.fully_connected(
# network, DENSE_SIZE, activation='relu')
split_flat = tflearn.flatten(network)
return split_flat
示例6: build_dqn
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def build_dqn(num_actions, action_repeat):
"""
Building a DQN.
"""
inputs = tf.placeholder(tf.float32, [None, action_repeat, 84, 84])
# Inputs shape: [batch, channel, height, width] need to be changed into
# shape [batch, height, width, channel]
net = tf.transpose(inputs, [0, 2, 3, 1])
net = tflearn.conv_2d(net, 32, 8, strides=4, activation='relu')
net = tflearn.conv_2d(net, 64, 4, strides=2, activation='relu')
net = tflearn.fully_connected(net, 256, activation='relu')
q_values = tflearn.fully_connected(net, num_actions)
return inputs, q_values
# =============================
# ATARI Environment Wrapper
# =============================
示例7: vgg_net_19
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def vgg_net_19(width, height):
network = input_data(shape=[None, height, width, 3], name='input')
network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4)
opt = Momentum(learning_rate=0, momentum = 0.9)
network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets')
model = DNN(network, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='')
return model
#model of vgg-19 for testing of the activations
#rename the output you want to test, connect it to the next layer and change the output layer at the bottom (model = DNN(...))
#make sure to use the correct test function (depending if your output is a tensor or a vector)
示例8: vgg_net_19_activations
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def vgg_net_19_activations(width, height):
network = input_data(shape=[None, height, width, 3], name='input')
network1 = conv_2d(network, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network2 = conv_2d(network1, 64, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network2, 2, strides=2)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 128, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 256, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = conv_2d(network, 512, 3, activation = 'relu', regularizer='L2', weight_decay=5e-4)
network = max_pool_2d(network, 2, strides=2)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 4096, activation='relu', weight_decay=5e-4)
network = dropout(network, keep_prob=0.5)
network = fully_connected(network, 1000, activation='softmax', weight_decay=5e-4)
opt = Momentum(learning_rate=0, momentum = 0.9)
network = regression(network, optimizer=opt, loss='categorical_crossentropy', name='targets')
model = DNN(network1, checkpoint_path='', max_checkpoints=1, tensorboard_verbose=2, tensorboard_dir='')
return model
示例9: sentnet_color_2d
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def sentnet_color_2d(width, height, frame_count, lr, output=9, model_name = 'sentnet_color.model'):
network = input_data(shape=[None, width, height, 3], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network,
max_checkpoints=0, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例10: alexnet2
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def alexnet2(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例11: alexnet
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def alexnet(width, height, lr, output=3):
network = input_data(shape=[None, width, height, 1], name='input')
network = conv_2d(network, 96, 11, strides=4, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 256, 5, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 384, 3, activation='relu')
network = conv_2d(network, 256, 3, activation='relu')
network = max_pool_2d(network, 3, strides=2)
network = local_response_normalization(network)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, 4096, activation='tanh')
network = dropout(network, 0.5)
network = fully_connected(network, output, activation='softmax')
network = regression(network, optimizer='momentum',
loss='categorical_crossentropy',
learning_rate=lr, name='targets')
model = tflearn.DNN(network, checkpoint_path='model_alexnet',
max_checkpoints=1, tensorboard_verbose=0, tensorboard_dir='log')
return model
示例12: CNN_Core
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def CNN_Core(x, reuse=False):
with tf.variable_scope('cnn_core', reuse=reuse):
network = tflearn.conv_2d(
x, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 2)
cnn_network = tflearn.conv_2d(
network, KERNEL * 2, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
#network = tflearn.max_pool_2d(network, 2)
# network = tflearn.conv_2d(
# network, KERNEL * 4, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
#network = tflearn.max_pool_2d(network, 2)
network = tflearn.global_avg_pool(cnn_network)
split_flat = tflearn.flatten(network)
#print split_flat.get_shape().as_list()
return split_flat, cnn_network
示例13: CNN_Core
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def CNN_Core(self, x, reuse=False):
with tf.variable_scope('cnn_core', reuse=reuse):
network = tflearn.conv_2d(
x, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.avg_pool_2d(network, 3)
network = tflearn.conv_2d(
network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.avg_pool_2d(network, 2)
network = tflearn.fully_connected(
network, DENSE_SIZE, activation='relu')
split_flat = tflearn.flatten(network)
return split_flat
示例14: vqn_model
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def vqn_model(self, x):
with tf.variable_scope('vqn'):
inputs = tflearn.input_data(placeholder=x)
_split_array = []
for i in range(INPUT_SEQ):
tmp_network = tf.reshape(
inputs[:, i:i+1, :, :, :], [-1, INPUT_H, INPUT_W, INPUT_D])
if i == 0:
_split_array.append(self.CNN_Core(tmp_network))
else:
_split_array.append(self.CNN_Core(tmp_network, True))
merge_net = tflearn.merge(_split_array, 'concat')
merge_net = tflearn.flatten(merge_net)
_count = merge_net.get_shape().as_list()[1]
with tf.variable_scope('full-cnn'):
net = tf.reshape(
merge_net, [-1, INPUT_SEQ, _count / INPUT_SEQ, 1])
network = tflearn.conv_2d(
net, KERNEL, 5, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 3)
network = tflearn.layers.normalization.batch_normalization(
network)
network = tflearn.conv_2d(
network, KERNEL, 3, activation='relu', regularizer="L2", weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 2)
network = tflearn.layers.normalization.batch_normalization(
network)
cnn_result = tflearn.fully_connected(
network, DENSE_SIZE, activation='relu')
out = tflearn.fully_connected(
cnn_result, OUTPUT_DIM, activation='sigmoid')
return out
示例15: CNN_Core
# 需要导入模块: import tflearn [as 别名]
# 或者: from tflearn import conv_2d [as 别名]
def CNN_Core(x,reuse=False):
with tf.variable_scope('cnn_core',reuse=reuse):
network = tflearn.conv_2d(
x, KERNEL, 3, activation='relu', regularizer="L2",weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 2)
network = tflearn.conv_2d(
network, KERNEL, 2, activation='relu', regularizer="L2",weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 2)
network = tflearn.conv_2d(
network, KERNEL, 2, activation='relu', regularizer="L2",weight_decay=0.0001)
network = tflearn.max_pool_2d(network, 2)
split_flat = tflearn.flatten(network)
return split_flat