本文整理汇总了Python中reader.ptb_producer方法的典型用法代码示例。如果您正苦于以下问题:Python reader.ptb_producer方法的具体用法?Python reader.ptb_producer怎么用?Python reader.ptb_producer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类reader
的用法示例。
在下文中一共展示了reader.ptb_producer方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testPtbProducer
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_producer [as 别名]
def testPtbProducer(self):
raw_data = [4, 3, 2, 1, 0, 5, 6, 1, 1, 1, 1, 0, 3, 4, 1]
batch_size = 3
num_steps = 2
x, y = reader.ptb_producer(raw_data, batch_size, num_steps)
with self.test_session() as session:
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
try:
xval, yval = session.run([x, y])
self.assertAllEqual(xval, [[4, 3], [5, 6], [1, 0]])
self.assertAllEqual(yval, [[3, 2], [6, 1], [0, 3]])
xval, yval = session.run([x, y])
self.assertAllEqual(xval, [[2, 1], [1, 1], [3, 4]])
self.assertAllEqual(yval, [[1, 0], [1, 1], [4, 1]])
finally:
coord.request_stop()
coord.join()
示例2: testPtbProducer
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_producer [as 别名]
def testPtbProducer(self):
raw_data = [4, 3, 2, 1, 0, 5, 6, 1, 1, 1, 1, 0, 3, 4, 1]
batch_size = 3
num_steps = 2
x, y = reader.ptb_producer(raw_data, batch_size, num_steps)
with self.test_session() as session:
coord = tf.train.Coordinator()
tf.train.start_queue_runners(session, coord=coord)
try:
xval, yval = session.run([x, y])
self.assertAllEqual(xval, [[4, 3], [5, 6], [1, 0]])
self.assertAllEqual(yval, [[3, 2], [6, 1], [0, 3]])
xval, yval = session.run([x, y])
self.assertAllEqual(xval, [[2, 1], [1, 1], [3, 4]])
self.assertAllEqual(yval, [[1, 0], [1, 1], [4, 1]])
finally:
coord.request_stop()
coord.join()
示例3: __init__
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_producer [as 别名]
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
示例4: __init__
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_producer [as 别名]
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
示例5: __init__
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_producer [as 别名]
def __init__(self, config, data, name=None):
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
示例6: __init__
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_producer [as 别名]
def __init__(self, config, data, name=None):
'''
num_steps: the number of timesteps (or unrolled steps)
'''
self.batch_size = batch_size = config.batch_size
self.num_steps = num_steps = config.num_steps
self.epoch_size = ((len(data) // batch_size) - 1) // num_steps
self.input_data, self.targets = reader.ptb_producer(
data, batch_size, num_steps, name=name)
示例7: get_config
# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_producer [as 别名]
def get_config():
logger.auto_set_dir()
data3, wd2id = get_PennTreeBank()
global VOCAB_SIZE
VOCAB_SIZE = len(wd2id)
steps_per_epoch = (data3[0].shape[0] // BATCH - 1) // SEQ_LEN
train_data = TensorInput(
lambda: ptb_producer(data3[0], BATCH, SEQ_LEN),
steps_per_epoch)
val_data = TensorInput(
lambda: ptb_producer(data3[1], BATCH, SEQ_LEN),
(data3[1].shape[0] // BATCH - 1) // SEQ_LEN)
test_data = TensorInput(
lambda: ptb_producer(data3[2], BATCH, SEQ_LEN),
(data3[2].shape[0] // BATCH - 1) // SEQ_LEN)
M = Model()
return TrainConfig(
data=train_data,
model=M,
callbacks=[
ModelSaver(),
HyperParamSetterWithFunc(
'learning_rate',
lambda e, x: x * 0.80 if e > 6 else x),
RunOp(lambda: M.reset_lstm_state()),
InferenceRunner(val_data, [ScalarStats(['cost'])]),
RunOp(lambda: M.reset_lstm_state()),
InferenceRunner(
test_data,
[ScalarStats(['cost'], prefix='test')], tower_name='InferenceTowerTest'),
RunOp(lambda: M.reset_lstm_state()),
CallbackFactory(
trigger=lambda self:
[self.trainer.monitors.put_scalar(
'validation_perplexity',
np.exp(self.trainer.monitors.get_latest('validation_cost') / SEQ_LEN)),
self.trainer.monitors.put_scalar(
'test_perplexity',
np.exp(self.trainer.monitors.get_latest('test_cost') / SEQ_LEN))]
),
],
max_epoch=70,
)