本文整理汇总了Python中reader.ptb_raw_data方法的典型用法代码示例。如果您正苦于以下问题:Python reader.ptb_raw_data方法的具体用法?Python reader.ptb_raw_data怎么用?Python reader.ptb_raw_data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类reader
的用法示例。
在下文中一共展示了reader.ptb_raw_data方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main():
sys.stdout.write("start ptb")
raw_data = reader.ptb_raw_data("")
train_data, valid_data, test_data, word_to_id = raw_data
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.random_uniform_initializer(-0.04, 0.04)
with tf.variable_scope("model", reuse=None, initializer=initializer):
model = PTBModel()
saver = tf.train.Saver()
tf.initialize_all_variables().run()
model.train_writer = tf.train.SummaryWriter('./train', graph=session.graph)
for i in range(13):
sys.stdout.write("Epoch: %d\n" % (i + 1))
train_perplexity = model.train(session, train_data)
sys.stdout.write("Epoch: %d Train Perplexity: %.3f\n" % (i + 1, train_perplexity))
valid_perplexity = model.evaluate(session, valid_data)
sys.stdout.write("Epoch: %d Valid Perplexity: %.3f\n" % (i + 1, valid_perplexity))
test_perplexity = model.evaluate(session, test_data)
sys.stdout.write("Epoch: %d Test Perplexity: %.3f\n" % (i + 1, test_perplexity))
# model.predict(session, test_data, word_to_id)
saver.save(session, 'model.ckpt')
示例2: testPtbRawData
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def testPtbRawData(self):
tmpdir = tf.test.get_temp_dir()
for suffix in "train", "valid", "test":
filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
with tf.gfile.GFile(filename, "w") as fh:
fh.write(self._string_data)
# Smoke test
output = reader.ptb_raw_data(tmpdir)
self.assertEqual(len(output), 4)
示例3: get_data
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def get_data(data_path, dataset):
if dataset == 'ptb':
import reader
raw_data = reader.ptb_raw_data(data_path)
elif dataset == 'enwik8':
from data import reader
raw_data = reader.enwik8_raw_data(data_path)
elif dataset == 'text8':
from data import reader
raw_data = reader.text8_raw_data(data_path)
return reader, raw_data
示例4: testPtbRawData
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def testPtbRawData(self):
tmpdir = tf.test.get_temp_dir()
for suffix in "train", "valid", "test":
filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
with tf.gfile.GFile(filename, "w") as fh:
fh.write(self._string_data)
# Smoke test
output = reader.ptb_raw_data(tmpdir)
self.assertEqual(len(output), 4)
示例5: main
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.summary.scalar("Training Loss", m.cost)
tf.summary.scalar("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.summary.scalar("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
示例6: main
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default():
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.name_scope("Train"):
train_input = PTBInput(config=config, data=train_data, name="TrainInput")
with tf.variable_scope("Model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config, input_=train_input)
tf.scalar_summary("Training Loss", m.cost)
tf.scalar_summary("Learning Rate", m.lr)
with tf.name_scope("Valid"):
valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
tf.scalar_summary("Validation Loss", mvalid.cost)
with tf.name_scope("Test"):
test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
with tf.variable_scope("Model", reuse=True, initializer=initializer):
mtest = PTBModel(is_training=False, config=eval_config,
input_=test_input)
sv = tf.train.Supervisor(logdir=FLAGS.save_path)
with sv.managed_session() as session:
# session = sv.managed_session()
# with tf.Session() as session:
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, eval_op=m.train_op,
verbose=True)
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
valid_perplexity = run_epoch(session, mvalid)
print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
test_perplexity = run_epoch(session, mtest)
print("Test Perplexity: %.3f" % test_perplexity)
if FLAGS.save_path:
print("Saving model to %s." % FLAGS.save_path)
sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step)
示例7: main
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
with tf.Graph().as_default(), tf.Session() as session:
initializer = tf.initializers.variance_scaling(distribution='uniform')
with tf.variable_scope("model", reuse=tf.AUTO_REUSE, initializer=initializer):
m = PTBModel(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
mvalid = PTBModel(is_training=False, config=config)
mtest = PTBModel(is_training=False, config=eval_config)
tf.global_variables_initializer().run()
def get_learning_rate(epoch, config):
base_lr = config.learning_rate
if epoch <= config.nr_epoch_first_stage:
return base_lr
elif epoch <= config.nr_epoch_second_stage:
return base_lr * 0.1
else:
return base_lr * 0.01
for i in range(config.max_epoch):
m.assign_lr(session, get_learning_rate(i, config))
print("Epoch: %d Learning rate: %f"
% (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(
session, m, train_data, m.train_op, verbose=True)
print("Epoch: %d Train Perplexity: %.3f"
% (i + 1, train_perplexity))
valid_perplexity = run_epoch(
session, mvalid, valid_data, tf.no_op())
print("Epoch: %d Valid Perplexity: %.3f"
% (i + 1, valid_perplexity))
test_perplexity = run_epoch(
session, mtest, test_data, tf.no_op())
print("Test Perplexity: %.3f" % test_perplexity)
示例8: main
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
if config.device == '-1':
tf_dev = '/cpu:0'
else:
tf_dev = '/gpu:' + config.device
print(tf_dev)
tconfig = tf.ConfigProto(allow_soft_placement=True)
if tf_dev.find('cpu') >= 0: # cpu version
num_threads = os.getenv('OMP_NUM_THREADS', 1)
tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config)
# with tf.variable_scope("model", reuse=True, initializer=initializer):
# mvalid = PTBModel(is_training=False, config=config)
# mtest = PTBModel(is_training=False, config=eval_config)
tf.initialize_all_variables().run()
for i in range(config.max_max_epoch):
lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
m.assign_lr(session, config.learning_rate * lr_decay)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity = run_epoch(session, m, train_data, m.train_op, config.iters,
verbose=True)
# print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
# valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
# print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
# test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
# print("Test Perplexity: %.3f" % test_perplexity)
示例9: main
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
if not FLAGS.data_path:
raise ValueError("Must set --data_path to PTB data directory")
raw_data = reader.ptb_raw_data(FLAGS.data_path)
train_data, valid_data, test_data, _ = raw_data
config = get_config()
eval_config = get_config()
eval_config.batch_size = 1
eval_config.num_steps = 1
if config.device == '-1':
tf_dev = '/cpu:0'
else:
tf_dev = '/gpu:' + config.device
print(tf_dev)
tconfig = tf.ConfigProto(allow_soft_placement=True)
if tf_dev.find('cpu') >= 0: # cpu version
num_threads = os.getenv('OMP_NUM_THREADS', 1)
tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session:
initializer = tf.random_uniform_initializer(-config.init_scale,
config.init_scale)
with tf.variable_scope("model", reuse=None, initializer=initializer):
m = PTBModel(is_training=True, config=config)
with tf.variable_scope("model", reuse=True, initializer=initializer):
#mvalid = PTBModel(is_training=False, config=config)
mtest = PTBModel(is_training=False, config=eval_config)
tf.global_variables_initializer().run()
total_average_batch_time = 0.0
epochs_info = []
for i in range(config.max_max_epoch):
#lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
#m.assign_lr(session, config.learning_rate * lr_decay)
m.assign_lr(session, config.learning_rate)
print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
train_perplexity, average_batch_time = run_epoch(session, m, train_data, m.train_op, verbose=True)
total_average_batch_time += average_batch_time
print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
if i % 2 == 0:
epochs_info.append('%d:_:%.3f'%(i, train_perplexity))
# valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
# print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))
print("average_batch_time: %.6f" % (total_average_batch_time/int(config.max_max_epoch)))
print('epoch_info:'+','.join(epochs_info))
test_perplexity, test_average_batch_time = run_epoch(session, mtest, test_data, tf.no_op())
print("Test Perplexity: %.3f" % test_perplexity)