本文整理汇总了Python中tensorflow.contrib.slim.dropout函数的典型用法代码示例。如果您正苦于以下问题:Python dropout函数的具体用法?Python dropout怎么用?Python dropout使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dropout函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: conv_net_kelz
def conv_net_kelz(inputs):
"""Builds the ConvNet from Kelz 2016."""
with slim.arg_scope(
[slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.variance_scaling_initializer(
factor=2.0, mode='FAN_AVG', uniform=True)):
net = slim.conv2d(
inputs, 32, [3, 3], scope='conv1', normalizer_fn=slim.batch_norm)
net = slim.conv2d(
net, 32, [3, 3], scope='conv2', normalizer_fn=slim.batch_norm)
net = slim.max_pool2d(net, [1, 2], stride=[1, 2], scope='pool2')
net = slim.dropout(net, 0.25, scope='dropout2')
net = slim.conv2d(
net, 64, [3, 3], scope='conv3', normalizer_fn=slim.batch_norm)
net = slim.max_pool2d(net, [1, 2], stride=[1, 2], scope='pool3')
net = slim.dropout(net, 0.25, scope='dropout3')
# Flatten while preserving batch and time dimensions.
dims = tf.shape(net)
net = tf.reshape(net, (dims[0], dims[1],
net.shape[2].value * net.shape[3].value), 'flatten4')
net = slim.fully_connected(net, 512, scope='fc5')
net = slim.dropout(net, 0.5, scope='dropout5')
return net
示例2: build_graph
def build_graph(top_k):
keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch')
labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
is_training = tf.placeholder(dtype=tf.bool, shape=[], name='train_flag')
with tf.device('/gpu:0'):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
normalizer_fn=slim.batch_norm,
normalizer_params={'is_training': is_training}):
conv3_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv3_1')
max_pool_1 = slim.max_pool2d(conv3_1, [2, 2], [2, 2], padding='SAME', scope='pool1')
conv3_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv3_2')
max_pool_2 = slim.max_pool2d(conv3_2, [2, 2], [2, 2], padding='SAME', scope='pool2')
conv3_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3_3')
max_pool_3 = slim.max_pool2d(conv3_3, [2, 2], [2, 2], padding='SAME', scope='pool3')
conv3_4 = slim.conv2d(max_pool_3, 512, [3, 3], padding='SAME', scope='conv3_4')
conv3_5 = slim.conv2d(conv3_4, 512, [3, 3], padding='SAME', scope='conv3_5')
max_pool_4 = slim.max_pool2d(conv3_5, [2, 2], [2, 2], padding='SAME', scope='pool4')
flatten = slim.flatten(max_pool_4)
fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024,
activation_fn=tf.nn.relu, scope='fc1')
logits = slim.fully_connected(slim.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None,
scope='fc2')
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if update_ops:
updates = tf.group(*update_ops)
loss = control_flow_ops.with_dependencies([updates], loss)
global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
train_op = slim.learning.create_train_op(loss, optimizer, global_step=global_step)
probabilities = tf.nn.softmax(logits)
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
merged_summary_op = tf.summary.merge_all()
predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))
return {'images': images,
'labels': labels,
'keep_prob': keep_prob,
'top_k': top_k,
'global_step': global_step,
'train_op': train_op,
'loss': loss,
'is_training': is_training,
'accuracy': accuracy,
'accuracy_top_k': accuracy_in_top_k,
'merged_summary_op': merged_summary_op,
'predicted_distribution': probabilities,
'predicted_index_top_k': predicted_index_top_k,
'predicted_val_top_k': predicted_val_top_k}
示例3: construct_embedding
def construct_embedding(self):
"""Builds a conv -> spatial softmax -> FC adaptation network."""
is_training = self._is_training
normalizer_params = {'is_training': is_training}
with tf.variable_scope('tcn_net', reuse=self._reuse) as vs:
self._adaptation_scope = vs.name
with slim.arg_scope(
[slim.layers.conv2d],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
weights_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight),
biases_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight)):
with slim.arg_scope(
[slim.layers.fully_connected],
activation_fn=tf.nn.relu,
normalizer_fn=slim.batch_norm, normalizer_params=normalizer_params,
weights_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight),
biases_regularizer=slim.regularizers.l2_regularizer(
self._l2_reg_weight)):
# Input to embedder is pre-trained inception output.
net = self._pretrained_output
# Optionally add more conv layers.
for num_filters in self._additional_conv_sizes:
net = slim.layers.conv2d(
net, num_filters, kernel_size=[3, 3], stride=[1, 1])
net = slim.dropout(net, keep_prob=self._conv_hidden_keep_prob,
is_training=is_training)
# Take the spatial soft arg-max of the last convolutional layer.
# This is a form of spatial attention over the activations.
# See more here: http://arxiv.org/abs/1509.06113.
net = tf.contrib.layers.spatial_softmax(net)
self.spatial_features = net
# Add fully connected layers.
net = slim.layers.flatten(net)
for fc_hidden_size in self._fc_hidden_sizes:
net = slim.layers.fully_connected(net, fc_hidden_size)
if self._fc_hidden_keep_prob < 1.0:
net = slim.dropout(net, keep_prob=self._fc_hidden_keep_prob,
is_training=is_training)
# Connect last FC layer to embedding.
net = slim.layers.fully_connected(net, self._embedding_size,
activation_fn=None)
# Optionally L2 normalize the embedding.
if self._embedding_l2:
net = tf.nn.l2_normalize(net, dim=1)
return net
示例4: inference
def inference(inputs):
x = tf.reshape(inputs,[-1,28,28,1])
conv_1 = tf.nn.relu(slim.conv2d(x,32,[3,3])) #28 * 28 * 32
pool_1 = slim.max_pool2d(conv_1,[2,2]) # 14 * 14 * 32
block_1 = res_identity(pool_1,32,[3,3],'layer_2')
block_2 = res_change(block_1,64,[3,3],'layer_3')
block_3 = res_identity(block_2,64,[3,3],'layer_4')
block_4 = res_change(block_3,32,[3,3],'layer_5')
net_flatten = slim.flatten(block_4,scope='flatten')
fc_1 = slim.fully_connected(slim.dropout(net_flatten,0.8),200,activation_fn=tf.nn.tanh,scope='fc_1')
output = slim.fully_connected(slim.dropout(fc_1,0.8),10,activation_fn=None,scope='output_layer')
return output
示例5: _build_network
def _build_network(self, sess, is_training=True):
with tf.variable_scope('vgg_16', 'vgg_16'):
# select initializers
if cfg.TRAIN.TRUNCATED:
initializer = tf.truncated_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.truncated_normal_initializer(mean=0.0, stddev=0.001)
else:
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
initializer_bbox = tf.random_normal_initializer(mean=0.0, stddev=0.001)
net = slim.repeat(self._image, 2, slim.conv2d, 64, [3, 3],
trainable=False, scope='conv1')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool1')
net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3],
trainable=False, scope='conv2')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool2')
net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3],
trainable=is_training, scope='conv3')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool3')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
trainable=is_training, scope='conv4')
net = slim.max_pool2d(net, [2, 2], padding='SAME', scope='pool4')
net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3],
trainable=is_training, scope='conv5')
self._act_summaries.append(net)
self._layers['head'] = net
# build the anchors for the image
self._anchor_component()
# region proposal network
rois = self._region_proposal(net, is_training, initializer)
# region of interest pooling
if cfg.POOLING_MODE == 'crop':
pool5 = self._crop_pool_layer(net, rois, "pool5")
else:
raise NotImplementedError
pool5_flat = slim.flatten(pool5, scope='flatten')
fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
if is_training:
fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True, scope='dropout6')
fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
if is_training:
fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True, scope='dropout7')
# region classification
cls_prob, bbox_pred = self._region_classification(fc7,
is_training,
initializer,
initializer_bbox)
self._score_summaries.update(self._predictions)
return rois, cls_prob, bbox_pred
示例6: _head_to_tail
def _head_to_tail(self, pool5, is_training, reuse=False):
with tf.variable_scope(self._scope, self._scope, reuse=reuse):
pool5_flat = slim.flatten(pool5, scope='flatten')
fc6 = slim.fully_connected(pool5_flat, 4096, scope='fc6')
if is_training:
fc6 = slim.dropout(fc6, keep_prob=0.5, is_training=True,
scope='dropout6')
fc7 = slim.fully_connected(fc6, 4096, scope='fc7')
if is_training:
fc7 = slim.dropout(fc7, keep_prob=0.5, is_training=True,
scope='dropout7')
return fc7
示例7: clone_fn
def clone_fn(batch_queue):
"""Allows data parallelism by creating multiple clones of network_fn."""
images, b_input_mask, b_labels_input, b_box_delta_input, b_box_input = batch_queue.dequeue()
anchors = tf.convert_to_tensor(config.ANCHOR_SHAPE, dtype=tf.float32)
end_points = network_fn(images)
end_points["viz_images"] = images
conv_ds_14 = end_points['MobileNet/conv_ds_14/depthwise_conv']
dropout = slim.dropout(conv_ds_14, keep_prob=0.5, is_training=True)
num_output = config.NUM_ANCHORS * (config.NUM_CLASSES + 1 + 4)
predict = slim.conv2d(dropout, num_output, kernel_size=(3, 3), stride=1, padding='SAME',
activation_fn=None,
weights_initializer=tf.truncated_normal_initializer(stddev=0.0001),
scope="MobileNet/conv_predict")
with tf.name_scope("Interpre_prediction") as scope:
pred_box_delta, pred_class_probs, pred_conf, ious, det_probs, det_boxes, det_class = \
interpre_prediction(predict, b_input_mask, anchors, b_box_input)
end_points["viz_det_probs"] = det_probs
end_points["viz_det_boxes"] = det_boxes
end_points["viz_det_class"] = det_class
with tf.name_scope("Losses") as scope:
losses(b_input_mask, b_labels_input, ious, b_box_delta_input, pred_class_probs, pred_conf, pred_box_delta)
return end_points
示例8: metric_net
def metric_net(img, scope, df_dim=64, reuse=False, train=True):
bn = functools.partial(slim.batch_norm, scale=True, is_training=train,
decay=0.9, epsilon=1e-5, updates_collections=None)
with tf.variable_scope(scope + '_discriminator', reuse=reuse):
h0 = lrelu(conv(img, df_dim, 4, 2, scope='h0_conv')) # h0 is (128 x 128 x df_dim)
pool1 = Mpool(h0, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
h1 = lrelu(conv(pool1, df_dim * 2, 4, 2, scope='h1_conv')) # h1 is (32 x 32 x df_dim*2)
pool2 = Mpool(h1, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
h2 = lrelu(conv(pool2, df_dim * 4, 4, 2, scope='h2_conv')) # h2 is (8 x 8 x df_dim*4)
pool3 = Mpool(h2, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
h3 = lrelu(conv(pool3, df_dim * 8, 4, 2, scope='h3_conv')) # h3 is (2 x 2 x df_dim*4)
pool4 = Mpool(h3, [1, 2, 2, 1], [1, 2, 2, 1], padding='VALID')
shape = pool4.get_shape()
flatten_shape = shape[1].value * shape[2].value * shape[3].value
h3_reshape = tf.reshape(pool4, [-1, flatten_shape], name = 'h3_reshape')
fc1 = lrelu(FC(h3_reshape, df_dim*2, scope='fc1'))
dropout_fc1 = slim.dropout(fc1, 0.5, scope='dropout_fc1')
net = FC(dropout_fc1, df_dim, scope='fc2')
#print_activations(net)
#print_activations(pool4)
return net
示例9: build_arch_baseline
def build_arch_baseline(input, is_train: bool, num_classes: int):
bias_initializer = tf.truncated_normal_initializer(
mean=0.0, stddev=0.01) # tf.constant_initializer(0.0)
# The paper didnot mention any regularization, a common l2 regularizer to weights is added here
weights_regularizer = tf.contrib.layers.l2_regularizer(5e-04)
tf.logging.info('input shape: {}'.format(input.get_shape()))
# weights_initializer=initializer,
with slim.arg_scope([slim.conv2d, slim.fully_connected], trainable=is_train, biases_initializer=bias_initializer, weights_regularizer=weights_regularizer):
with tf.variable_scope('relu_conv1') as scope:
output = slim.conv2d(input, num_outputs=32, kernel_size=[
5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer1')
tf.logging.info('output shape: {}'.format(output.get_shape()))
with tf.variable_scope('relu_conv2') as scope:
output = slim.conv2d(output, num_outputs=64, kernel_size=[
5, 5], stride=1, padding='SAME', scope=scope, activation_fn=tf.nn.relu)
output = slim.max_pool2d(output, [2, 2], scope='max_2d_layer2')
tf.logging.info('output shape: {}'.format(output.get_shape()))
output = slim.flatten(output)
output = slim.fully_connected(output, 1024, scope='relu_fc3', activation_fn=tf.nn.relu)
tf.logging.info('output shape: {}'.format(output.get_shape()))
output = slim.dropout(output, 0.5, scope='dp')
output = slim.fully_connected(output, num_classes, scope='final_layer', activation_fn=None)
tf.logging.info('output shape: {}'.format(output.get_shape()))
return output
示例10: create_inner_block
def create_inner_block(
incoming, scope, nonlinearity=tf.nn.elu,
weights_initializer=tf.truncated_normal_initializer(1e-3),
bias_initializer=tf.zeros_initializer(), regularizer=None,
increase_dim=False, summarize_activations=True):
n = incoming.get_shape().as_list()[-1]
stride = 1
if increase_dim:
n *= 2
stride = 2
incoming = slim.conv2d(
incoming, n, [3, 3], stride, activation_fn=nonlinearity, padding="SAME",
normalizer_fn=_batch_norm_fn, weights_initializer=weights_initializer,
biases_initializer=bias_initializer, weights_regularizer=regularizer,
scope=scope + "/1")
if summarize_activations:
tf.summary.histogram(incoming.name + "/activations", incoming)
incoming = slim.dropout(incoming, keep_prob=0.6)
incoming = slim.conv2d(
incoming, n, [3, 3], 1, activation_fn=None, padding="SAME",
normalizer_fn=None, weights_initializer=weights_initializer,
biases_initializer=bias_initializer, weights_regularizer=regularizer,
scope=scope + "/2")
return incoming
示例11: resface36
def resface36(images, keep_probability,
phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0, reuse=None):
'''
conv name
conv[conv_layer]_[block_index]_[block_layer_index]
'''
with tf.variable_scope('Conv1'):
net = resface_pre(images,64,scope='Conv1_pre')
net = slim.repeat(net,2,resface_block,64,scope='Conv_1')
with tf.variable_scope('Conv2'):
net = resface_pre(net,128,scope='Conv2_pre')
net = slim.repeat(net,4,resface_block,128,scope='Conv_2')
with tf.variable_scope('Conv3'):
net = resface_pre(net,256,scope='Conv3_pre')
net = slim.repeat(net,8,resface_block,256,scope='Conv_3')
with tf.variable_scope('Conv4'):
net = resface_pre(net,512,scope='Conv4_pre')
#net = resface_block(Conv4_pre,512,scope='Conv4_1')
net = slim.repeat(net,1,resface_block,512,scope='Conv4')
with tf.variable_scope('Logits'):
#pylint: disable=no-member
#net = slim.avg_pool2d(net, net.get_shape()[1:3], padding='VALID',
# scope='AvgPool')
net = slim.flatten(net)
net = slim.dropout(net, keep_probability, is_training=phase_train,
scope='Dropout')
net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,
scope='Bottleneck', reuse=False)
return net,''
示例12: LResnet50E_IR
def LResnet50E_IR(images, keep_probability,
phase_train=True, bottleneck_layer_size=512,
weight_decay=0.0, reuse=None):
'''
conv name
conv[conv_layer]_[block_index]_[block_layer_index]
for resnet50 n_units=[3,4,14,3], consider one unit is dim_reduction_layer
repeat n_units=[2,3,13,2]
'''
with tf.variable_scope('Conv1'):
net = slim.conv2d(images,64,scope='Conv1_pre')
net = slim.batch_norm(net,scope='Conv1_bn')
with tf.variable_scope('Conv2'):
net = resface_block(net,64,stride=2,dim_match=False,scope='Conv2_pre')
net = slim.repeat(net,2,resface_block,64,1,True,scope='Conv2_main')
with tf.variable_scope('Conv3'):
net = resface_block(net,128,stride=2,dim_match=False,scope='Conv3_pre')
net = slim.repeat(net,3,resface_block,128,1,True,scope='Conv3_main')
with tf.variable_scope('Conv4'):
net = resface_block(net,256,stride=2,dim_match=False,scope='Conv4_pre')
net = slim.repeat(net,13,resface_block,256,1,True,scope='Conv4_main')
with tf.variable_scope('Conv5'):
net = resface_block(net,512,stride=2,dim_match=False,scope='Conv5_pre')
net = slim.repeat(net,2,resface_block,512,1,True,scope='Conv5_main')
with tf.variable_scope('Logits'):
net = slim.batch_norm(net,activation_fn=None,scope='bn1')
net = slim.dropout(net, keep_probability, is_training=phase_train,scope='Dropout')
net = slim.flatten(net)
net = slim.fully_connected(net, bottleneck_layer_size, biases_initializer=tf.contrib.layers.xavier_initializer(), scope='fc1')
net = slim.batch_norm(net, activation_fn=None, scope='Bottleneck')
return net,''
示例13: build_single_inceptionv1
def build_single_inceptionv1(train_tfdata, is_train, dropout_keep_prob):
with slim.arg_scope(inception.inception_v1_arg_scope()):
identity, end_points = inception.inception_v1(train_tfdata, dropout_keep_prob = dropout_keep_prob, is_training=is_train)
net = slim.avg_pool2d(end_points['Mixed_5c'], [7, 7], stride=1, scope='MaxPool_0a_7x7')
net = slim.dropout(net, dropout_keep_prob, scope='Dropout_0b')
feature = tf.squeeze(net, [1, 2])
return identity, feature
示例14: __init__
def __init__(self,is_training):
self.input_image = tf.placeholder(dtype=tf.float32,shape=[None,64,64,3],name='input_image')
self.input_label = tf.placeholder(dtype=tf.float32,shape=[None,100],name='input_label')
self.input_nlcd = tf.placeholder(dtype=tf.float32,shape=[None,15],name='input_nlcd')
#logits, end_points = resnet_v2.resnet_v2_50(self.input_image, num_classes=100, is_training=True)
# flatten_hist = tf.reshape(self.input_image,[-1,96])
self.keep_prob = tf.placeholder(tf.float32)
weights_regularizer=slim.l2_regularizer(FLAGS.weight_decay)
flatten_hist = tf.reshape(self.input_image,[-1,3*64*64])
flatten_hist = tf.concat([flatten_hist,self.input_nlcd],1)
x = slim.fully_connected(flatten_hist, 512,weights_regularizer=weights_regularizer,scope='decoder/fc_1')
x = slim.fully_connected(x, 1024,weights_regularizer=weights_regularizer, scope='decoder/fc_2')
flatten_hist = slim.fully_connected(x, 512,weights_regularizer=weights_regularizer, scope='decoder/fc_3')
all_logits = []
all_output = []
for i in range(100):
if i == 0 :
current_input_x = flatten_hist
else:
current_output = tf.concat(all_output,1)
current_input_x = tf.concat([flatten_hist,current_output],1)
x = slim.fully_connected(current_input_x, 256,weights_regularizer=weights_regularizer)
x = slim.fully_connected(x, 100,weights_regularizer=weights_regularizer)
#x = slim.fully_connected(x, 17,weights_regularizer=weights_regularizer)
x = slim.dropout(x,keep_prob=self.keep_prob,is_training=is_training)
all_logits.append(slim.fully_connected(x, 1, activation_fn=None, weights_regularizer=weights_regularizer))
all_output.append(tf.sigmoid(all_logits[i]))
final_logits = tf.concat(all_logits,1)
final_output = tf.sigmoid(final_logits)
self.output = final_output
self.ce_loss = tf.reduce_mean(tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=self.input_label,logits=final_logits),1))
slim.losses.add_loss(self.ce_loss)
tf.summary.scalar('ce_loss',self.ce_loss)
# l2 loss
self.l2_loss = tf.add_n(slim.losses.get_regularization_losses())
tf.summary.scalar('l2_loss',self.l2_loss)
#total loss
self.total_loss = slim.losses.get_total_loss()
tf.summary.scalar('total_loss',self.total_loss)
#self.output = tf.sigmoid(x)
示例15: build_graph
def build_graph(top_k):
# with tf.device('/cpu:0'):
keep_prob = tf.placeholder(dtype=tf.float32, shape=[], name='keep_prob')
images = tf.placeholder(dtype=tf.float32, shape=[None, 64, 64, 1], name='image_batch')
labels = tf.placeholder(dtype=tf.int64, shape=[None], name='label_batch')
conv_1 = slim.conv2d(images, 64, [3, 3], 1, padding='SAME', scope='conv1')
max_pool_1 = slim.max_pool2d(conv_1, [2, 2], [2, 2], padding='SAME')
conv_2 = slim.conv2d(max_pool_1, 128, [3, 3], padding='SAME', scope='conv2')
max_pool_2 = slim.max_pool2d(conv_2, [2, 2], [2, 2], padding='SAME')
conv_3 = slim.conv2d(max_pool_2, 256, [3, 3], padding='SAME', scope='conv3')
max_pool_3 = slim.max_pool2d(conv_3, [2, 2], [2, 2], padding='SAME')
flatten = slim.flatten(max_pool_3)
fc1 = slim.fully_connected(slim.dropout(flatten, keep_prob), 1024, activation_fn=tf.nn.tanh, scope='fc1')
logits = slim.fully_connected(slim.dropout(fc1, keep_prob), FLAGS.charset_size, activation_fn=None, scope='fc2')
# logits = slim.fully_connected(flatten, FLAGS.charset_size, activation_fn=None, reuse=reuse, scope='fc')
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=labels))
accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(logits, 1), labels), tf.float32))
global_step = tf.get_variable("step", [], initializer=tf.constant_initializer(0.0), trainable=False)
rate = tf.train.exponential_decay(2e-4, global_step, decay_steps=2000, decay_rate=0.97, staircase=True)
train_op = tf.train.AdamOptimizer(learning_rate=rate).minimize(loss, global_step=global_step)
probabilities = tf.nn.softmax(logits)
tf.summary.scalar('loss', loss)
tf.summary.scalar('accuracy', accuracy)
merged_summary_op = tf.summary.merge_all()
predicted_val_top_k, predicted_index_top_k = tf.nn.top_k(probabilities, k=top_k)
accuracy_in_top_k = tf.reduce_mean(tf.cast(tf.nn.in_top_k(probabilities, labels, top_k), tf.float32))
return {'images': images,
'labels': labels,
'keep_prob': keep_prob,
'top_k': top_k,
'global_step': global_step,
'train_op': train_op,
'loss': loss,
'accuracy': accuracy,
'accuracy_top_k': accuracy_in_top_k,
'merged_summary_op': merged_summary_op,
'predicted_distribution': probabilities,
'predicted_index_top_k': predicted_index_top_k,
'predicted_val_top_k': predicted_val_top_k}