本文整理汇总了Python中loader.Loader.next方法的典型用法代码示例。如果您正苦于以下问题:Python Loader.next方法的具体用法?Python Loader.next怎么用?Python Loader.next使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类loader.Loader
的用法示例。
在下文中一共展示了Loader.next方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ShapeAnalogy
# 需要导入模块: from loader import Loader [as 别名]
# 或者: from loader.Loader import next [as 别名]
#.........这里部分代码省略.........
_ = tf.image_summary("g", self.g_img, max_images=5)
self.l = tf.nn.l2_loss(d - self.g) / self.batch_size
_ = tf.scalar_summary("loss", self.l)
self.r = tf.nn.l2_loss(f_d - f_c - T) / self.batch_size
_ = tf.scalar_summary("regularizer", self.r)
def train(self, max_iter=450000,
alpha=0.01, learning_rate=0.001,
checkpoint_dir="checkpoint"):
"""Train an Deep Visual Analogy network.
Args:
max_iter: int, The size of total iterations [450000]
alpha: float, The importance of regularizer term [0.01]
learning_rate: float, The learning rate of SGD [0.001]
checkpoint_dir: str, The path for checkpoints to be saved [checkpoint]
"""
self.max_iter = max_iter
self.alpha = alpha
self.learning_rate = learning_rate
self.checkpoint_dir = checkpoint_dir
self.step = tf.Variable(0, trainable=False)
self.loss = (self.l + self.alpha * self.r)
_ = tf.scalar_summary("l_plus_r", self.loss)
self.lr = tf.train.exponential_decay(self.learning_rate,
global_step=self.step,
decay_steps=100000,
decay_rate=0.999)
#self.optim = tf.train.MomentumOptimizer(self.lr, momentum=0.9) \
# .minimize(self.loss, global_step=self.step)
self.optim = tf.train.AdamOptimizer(self.lr, beta1=0.5) \
.minimize(self.loss, global_step=self.step)
#self.optim = tf.train.RMSPropOptimizer(self.lr, momentum=0.9, decay=0.95) \
# .minimize(self.loss, global_step=self.step)
merged_sum = tf.merge_all_summaries()
writer = tf.train.SummaryWriter("./logs", self.sess.graph_def)
tf.initialize_all_variables().run()
self.load(self.checkpoint_dir)
start_time = time.time()
start_iter = self.step.eval()
#test_a, test_b, test_c, test_d = self.loader.tests['rotate']
for step in xrange(start_iter, start_iter + self.max_iter):
if step != 0 and step % 10000 == 0:
self.test(fixed=True)
self.save(checkpoint_dir, step)
if step % 5 == 1:
feed = {self.a: a,
self.b: b,
self.c: c,
self.d: d}
summary_str, loss = self.sess.run([merged_sum, self.loss], feed_dict=feed)
writer.add_summary(summary_str, step)
if step % 50 == 1:
print("Epoch: [%2d/%7d] time: %4.4f, loss: %.8f" % (step, self.max_iter, time.time() - start_time, loss))
a, b, c, d = self.loader.next()
feed = {self.a: a,
self.b: b,
self.c: c,
self.d: d}
self.sess.run(self.optim, feed_dict=feed)
def test(self, name="test", options=None, fixed=False):
if options == None:
options = self.options
sample_dir = "samples"
t = strfnow()
for option in options:
if fixed:
a, b, c, d = self.loader.tests['rotate']
else:
a, b, c, d = self.loader.next(set_option=option)
feed = {self.a: a,
self.b: b,
self.c: c,
self.d: d}
fname = "%s/%s_option:%s_time:%s.png" % (sample_dir, name, option, t)
g_img = self.sess.run(self.g_img, feed_dict=feed)
g2_img = self.sess.run(self.g2_img, feed_dict=feed)
g3_img = self.sess.run(self.g3_img, feed_dict=feed)
imsave(fname, merge(a, b, c, d, g_img, g2_img, g3_img))