本文整理汇总了Python中prettytensor.apply_optimizer方法的典型用法代码示例。如果您正苦于以下问题:Python prettytensor.apply_optimizer方法的具体用法?Python prettytensor.apply_optimizer怎么用?Python prettytensor.apply_optimizer使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类prettytensor
的用法示例。
在下文中一共展示了prettytensor.apply_optimizer方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: prepare_trainer
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import apply_optimizer [as 别名]
def prepare_trainer(self, generator_loss, discriminator_loss):
'''Helper function for init_opt'''
all_vars = tf.trainable_variables()
g_vars = [var for var in all_vars if
var.name.startswith('g_')]
d_vars = [var for var in all_vars if
var.name.startswith('d_')]
generator_opt = tf.train.AdamOptimizer(self.generator_lr,
beta1=0.5)
self.generator_trainer =\
pt.apply_optimizer(generator_opt,
losses=[generator_loss],
var_list=g_vars)
discriminator_opt = tf.train.AdamOptimizer(self.discriminator_lr,
beta1=0.5)
self.discriminator_trainer =\
pt.apply_optimizer(discriminator_opt,
losses=[discriminator_loss],
var_list=d_vars)
self.log_vars.append(("g_learning_rate", self.generator_lr))
self.log_vars.append(("d_learning_rate", self.discriminator_lr))
示例2: define_one_trainer
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import apply_optimizer [as 别名]
def define_one_trainer(self, loss, learning_rate, key_word):
'''Helper function for init_opt'''
all_vars = tf.trainable_variables()
tarin_vars = [var for var in all_vars if
var.name.startswith(key_word)]
opt = tf.train.AdamOptimizer(learning_rate, beta1=0.5)
trainer = pt.apply_optimizer(opt, losses=[loss], var_list=tarin_vars)
return trainer
示例3: run_model
# 需要导入模块: import prettytensor [as 别名]
# 或者: from prettytensor import apply_optimizer [as 别名]
def run_model(result):
accuracy = result.softmax.evaluate_classifier\
(labels_placeholder,phase=pt.Phase.test)
train_images, train_labels = data_utils.mnist(training=True)
test_images, test_labels = data_utils.mnist(training=False)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_op = pt.apply_optimizer(optimizer,losses=[result.loss])
runner = pt.train.Runner(save_path=FLAGS.save_path)
with tf.Session():
for epoch in range(0,10):
train_images, train_labels = \
data_utils.permute_data\
((train_images, train_labels))
runner.train_model(train_op,result.\
loss,EPOCH_SIZE,\
feed_vars=(image_placeholder,\
labels_placeholder),\
feed_data=pt.train.\
feed_numpy(BATCH_SIZE,\
train_images,\
train_labels),\
print_every=100)
classification_accuracy = runner.evaluate_model\
(accuracy,\
TEST_SIZE,\
feed_vars=(image_placeholder,\
labels_placeholder),\
feed_data=pt.train.\
feed_numpy(BATCH_SIZE,\
test_images,\
test_labels))
print("epoch" , epoch + 1)
print("accuracy", classification_accuracy )
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-TensorFlow-Second-Edition,代码行数:35,代码来源:pretty_tensor_digit.py