本文整理汇总了Python中utils.TextLoader方法的典型用法代码示例。如果您正苦于以下问题:Python utils.TextLoader方法的具体用法?Python utils.TextLoader怎么用?Python utils.TextLoader使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.TextLoader方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setUp
# 需要导入模块: import utils [as 别名]
# 或者: from utils import TextLoader [as 别名]
def setUp(self):
self.data_loader = TextLoader("tests/test_data", batch_size=2, seq_length=5)
示例2: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import TextLoader [as 别名]
def train(args):
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
cPickle.dump(args, f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:
cPickle.dump((data_loader.chars, data_loader.vocab), f)
model = Model(args)
with tf.Session() as sess:
tf.initialize_all_variables().run()
saver = tf.train.Saver(tf.all_variables())
for e in range(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
state = model.initial_state.eval()
for b in range(data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.targets: y, model.initial_state: state}
train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
end = time.time()
print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(e * data_loader.num_batches + b,
args.num_epochs * data_loader.num_batches,
e, train_loss, end - start))
if (e * data_loader.num_batches + b) % args.save_every == 0:
checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step = e * data_loader.num_batches + b)
print("model saved to {}".format(checkpoint_path))
示例3: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import TextLoader [as 别名]
def train(args):
data_loader = TextLoader(args.data_dir, args.batch_size, args.seq_length)
args.vocab_size = data_loader.vocab_size
with open(os.path.join(args.save_dir, 'config.pkl'), 'wb') as f:
pickle.dump(args, f)
with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'wb') as f:
pickle.dump((data_loader.chars, data_loader.vocab), f)
model = Model(args)
with tf.Session() as sess:
tf.global_variables_initializer().run()
saver = tf.train.Saver(tf.global_variables())
train_loss_iterations = {'iteration': [], 'epoch': [], 'train_loss': [], 'val_loss': []}
for e in range(args.num_epochs):
sess.run(tf.assign(model.lr, args.learning_rate * (args.decay_rate ** e)))
data_loader.reset_batch_pointer()
state = sess.run(model.initial_state)
for b in range(data_loader.num_batches):
start = time.time()
x, y = data_loader.next_batch()
feed = {model.input_data: x, model.targets: y, model.initial_state: state}
train_loss, state, _ = sess.run([model.cost, model.final_state, model.train_op], feed)
end = time.time()
batch_idx = e * data_loader.num_batches + b
print("{}/{} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(batch_idx,
args.num_epochs * data_loader.num_batches,
e, train_loss, end - start))
train_loss_iterations['iteration'].append(batch_idx)
train_loss_iterations['epoch'].append(e)
train_loss_iterations['train_loss'].append(train_loss)
if batch_idx % args.save_every == 0:
# evaluate
state_val = sess.run(model.initial_state)
avg_val_loss = 0
for x_val, y_val in data_loader.val_batches:
feed_val = {model.input_data: x_val, model.targets: y_val, model.initial_state: state_val}
val_loss, state_val, _ = sess.run([model.cost, model.final_state, model.train_op], feed_val)
avg_val_loss += val_loss / len(list(data_loader.val_batches))
print('val_loss: {:.3f}'.format(avg_val_loss))
train_loss_iterations['val_loss'].append(avg_val_loss)
checkpoint_path = os.path.join(args.save_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=e * data_loader.num_batches + b)
print("model saved to {}".format(checkpoint_path))
else:
train_loss_iterations['val_loss'].append(None)
pd.DataFrame(data=train_loss_iterations,
columns=train_loss_iterations.keys()).to_csv(os.path.join(args.save_dir, 'log.csv'))