本文整理匯總了Python中prettytensor.apply_optimizer方法的典型用法代碼示例。如果您正苦於以下問題:Python prettytensor.apply_optimizer方法的具體用法?Python prettytensor.apply_optimizer怎麽用?Python prettytensor.apply_optimizer使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類prettytensor
的用法示例。
在下文中一共展示了prettytensor.apply_optimizer方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: prepare_trainer
# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import apply_optimizer [as 別名]
def prepare_trainer(self, generator_loss, discriminator_loss):
'''Helper function for init_opt'''
all_vars = tf.trainable_variables()
g_vars = [var for var in all_vars if
var.name.startswith('g_')]
d_vars = [var for var in all_vars if
var.name.startswith('d_')]
generator_opt = tf.train.AdamOptimizer(self.generator_lr,
beta1=0.5)
self.generator_trainer =\
pt.apply_optimizer(generator_opt,
losses=[generator_loss],
var_list=g_vars)
discriminator_opt = tf.train.AdamOptimizer(self.discriminator_lr,
beta1=0.5)
self.discriminator_trainer =\
pt.apply_optimizer(discriminator_opt,
losses=[discriminator_loss],
var_list=d_vars)
self.log_vars.append(("g_learning_rate", self.generator_lr))
self.log_vars.append(("d_learning_rate", self.discriminator_lr))
示例2: define_one_trainer
# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import apply_optimizer [as 別名]
def define_one_trainer(self, loss, learning_rate, key_word):
'''Helper function for init_opt'''
all_vars = tf.trainable_variables()
tarin_vars = [var for var in all_vars if
var.name.startswith(key_word)]
opt = tf.train.AdamOptimizer(learning_rate, beta1=0.5)
trainer = pt.apply_optimizer(opt, losses=[loss], var_list=tarin_vars)
return trainer
示例3: run_model
# 需要導入模塊: import prettytensor [as 別名]
# 或者: from prettytensor import apply_optimizer [as 別名]
def run_model(result):
accuracy = result.softmax.evaluate_classifier\
(labels_placeholder,phase=pt.Phase.test)
train_images, train_labels = data_utils.mnist(training=True)
test_images, test_labels = data_utils.mnist(training=False)
optimizer = tf.train.GradientDescentOptimizer(0.01)
train_op = pt.apply_optimizer(optimizer,losses=[result.loss])
runner = pt.train.Runner(save_path=FLAGS.save_path)
with tf.Session():
for epoch in range(0,10):
train_images, train_labels = \
data_utils.permute_data\
((train_images, train_labels))
runner.train_model(train_op,result.\
loss,EPOCH_SIZE,\
feed_vars=(image_placeholder,\
labels_placeholder),\
feed_data=pt.train.\
feed_numpy(BATCH_SIZE,\
train_images,\
train_labels),\
print_every=100)
classification_accuracy = runner.evaluate_model\
(accuracy,\
TEST_SIZE,\
feed_vars=(image_placeholder,\
labels_placeholder),\
feed_data=pt.train.\
feed_numpy(BATCH_SIZE,\
test_images,\
test_labels))
print("epoch" , epoch + 1)
print("accuracy", classification_accuracy )
開發者ID:PacktPublishing,項目名稱:Deep-Learning-with-TensorFlow-Second-Edition,代碼行數:35,代碼來源:pretty_tensor_digit.py