本文整理汇总了Python中tensorflow.contrib.slim.create_global_step方法的典型用法代码示例。如果您正苦于以下问题:Python slim.create_global_step方法的具体用法?Python slim.create_global_step怎么用?Python slim.create_global_step使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.contrib.slim
的用法示例。
在下文中一共展示了slim.create_global_step方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __setup_training
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import create_global_step [as 别名]
def __setup_training(self,images, labels):
tf.logging.set_verbosity(tf.logging.INFO)
logits, end_points = self.network_fn(images)
#############################
# Specify the loss function #
#############################
loss_1 = None
if 'AuxLogits' in end_points:
loss_1 = tf.losses.softmax_cross_entropy(
logits=end_points['AuxLogits'], onehot_labels=labels,
label_smoothing=self.label_smoothing, weights=0.4, scope='aux_loss')
total_loss = tf.losses.softmax_cross_entropy(
logits=logits, onehot_labels=labels,
label_smoothing=self.label_smoothing, weights=1.0)
if loss_1 is not None:
total_loss = total_loss + loss_1
global_step = slim.create_global_step()
# Variables to train.
variables_to_train = self.__get_variables_to_train()
learning_rate = self.__configure_learning_rate(self.dataset.num_samples, global_step)
optimizer = self.__configure_optimizer(learning_rate)
train_op = slim.learning.create_train_op(total_loss, optimizer, variables_to_train=variables_to_train)
self.__add_summaries(end_points, learning_rate, total_loss)
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_op,
logdir=self.train_dir,
init_fn=self.__get_init_fn(),
number_of_steps=self.max_number_of_steps,
log_every_n_steps=self.log_every_n_steps,
save_summaries_secs=self.save_summaries_secs,
save_interval_secs=self.save_interval_secs)
return
示例2: __start_training
# 需要导入模块: from tensorflow.contrib import slim [as 别名]
# 或者: from tensorflow.contrib.slim import create_global_step [as 别名]
def __start_training(self):
tf.logging.set_verbosity(tf.logging.INFO)
#get batched training training data
image, filename,glabels,gbboxes,gdifficults,gclasses, localizations, gscores = self.get_voc_2007_2012_train_data()
#get model outputs
predictions, localisations, logits, end_points = g_ssd_model.get_model(image, weight_decay=self.weight_decay, is_training=True)
#get model training losss
total_loss = g_ssd_model.get_losses(logits, localisations, gclasses, localizations, gscores)
global_step = slim.create_global_step()
# Variables to train.
variables_to_train = self.__get_variables_to_train()
learning_rate = self.__configure_learning_rate(self.dataset.num_samples, global_step)
optimizer = self.__configure_optimizer(learning_rate)
train_op = slim.learning.create_train_op(total_loss, optimizer, variables_to_train=variables_to_train)
self.__add_summaries(end_points, learning_rate, total_loss)
self.setup_debugging(predictions, localizations, glabels, gbboxes, gdifficults)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
config = tf.ConfigProto(log_device_placement=False,
gpu_options=gpu_options)
###########################
# Kicks off the training. #
###########################
slim.learning.train(
train_op,
self.train_dir,
train_step_fn=self.train_step,
saver=tf_saver.Saver(max_to_keep=500),
init_fn=self.__get_init_fn(),
number_of_steps=self.max_number_of_steps,
log_every_n_steps=self.log_every_n_steps,
save_summaries_secs=self.save_summaries_secs,
# session_config=config,
save_interval_secs=self.save_interval_secs)
return