当前位置: 首页>>代码示例>>Python>>正文


Python reader.ptb_raw_data方法代码示例

本文整理汇总了Python中reader.ptb_raw_data方法的典型用法代码示例。如果您正苦于以下问题:Python reader.ptb_raw_data方法的具体用法?Python reader.ptb_raw_data怎么用?Python reader.ptb_raw_data使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在reader的用法示例。


在下文中一共展示了reader.ptb_raw_data方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: main

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main():
    sys.stdout.write("start ptb")
    raw_data = reader.ptb_raw_data("")
    train_data, valid_data, test_data, word_to_id = raw_data

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.random_uniform_initializer(-0.04, 0.04)
        with tf.variable_scope("model", reuse=None, initializer=initializer):
            model = PTBModel()

        saver = tf.train.Saver()
        tf.initialize_all_variables().run()
        model.train_writer = tf.train.SummaryWriter('./train', graph=session.graph)

        for i in range(13):
            sys.stdout.write("Epoch: %d\n" % (i + 1))
            train_perplexity = model.train(session, train_data)
            sys.stdout.write("Epoch: %d Train Perplexity: %.3f\n" % (i + 1, train_perplexity))
            valid_perplexity = model.evaluate(session, valid_data)
            sys.stdout.write("Epoch: %d Valid Perplexity: %.3f\n" % (i + 1, valid_perplexity))
            test_perplexity = model.evaluate(session, test_data)
            sys.stdout.write("Epoch: %d Test Perplexity: %.3f\n" % (i + 1, test_perplexity))

        # model.predict(session, test_data, word_to_id)
        saver.save(session, 'model.ckpt') 
开发者ID:katsugeneration,项目名称:tensor-fsmn,代码行数:27,代码来源:ptb.py

示例2: testPtbRawData

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def testPtbRawData(self):
    tmpdir = tf.test.get_temp_dir()
    for suffix in "train", "valid", "test":
      filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
      with tf.gfile.GFile(filename, "w") as fh:
        fh.write(self._string_data)
    # Smoke test
    output = reader.ptb_raw_data(tmpdir)
    self.assertEqual(len(output), 4) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:11,代码来源:reader_test.py

示例3: get_data

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def get_data(data_path, dataset):
  if dataset == 'ptb':
    import reader
    raw_data = reader.ptb_raw_data(data_path)
  elif dataset == 'enwik8':
    from data import reader
    raw_data = reader.enwik8_raw_data(data_path)
  elif dataset == 'text8':
    from data import reader
    raw_data = reader.text8_raw_data(data_path)
  return reader, raw_data 
开发者ID:wenwei202,项目名称:iss-rnns,代码行数:13,代码来源:rhn_train.py

示例4: testPtbRawData

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def testPtbRawData(self):
        tmpdir = tf.test.get_temp_dir()
        for suffix in "train", "valid", "test":
            filename = os.path.join(tmpdir, "ptb.%s.txt" % suffix)
            with tf.gfile.GFile(filename, "w") as fh:
                fh.write(self._string_data)
        # Smoke test
        output = reader.ptb_raw_data(tmpdir)
        self.assertEqual(len(output), 4) 
开发者ID:woodfrog,项目名称:ActionRecognition,代码行数:11,代码来源:reader_test.py

示例5: main

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default():
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)

    with tf.name_scope("Train"):
      train_input = PTBInput(config=config, data=train_data, name="TrainInput")
      with tf.variable_scope("Model", reuse=None, initializer=initializer):
        m = PTBModel(is_training=True, config=config, input_=train_input)
      tf.summary.scalar("Training Loss", m.cost)
      tf.summary.scalar("Learning Rate", m.lr)

    with tf.name_scope("Valid"):
      valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
      tf.summary.scalar("Validation Loss", mvalid.cost)

    with tf.name_scope("Test"):
      test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mtest = PTBModel(is_training=False, config=eval_config,
                         input_=test_input)

    sv = tf.train.Supervisor(logdir=FLAGS.save_path)
    with sv.managed_session() as session:
      for i in range(config.max_max_epoch):
        lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)

        print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
        train_perplexity = run_epoch(session, m, eval_op=m.train_op,
                                     verbose=True)
        print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
        valid_perplexity = run_epoch(session, mvalid)
        print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

      test_perplexity = run_epoch(session, mtest)
      print("Test Perplexity: %.3f" % test_perplexity)

      if FLAGS.save_path:
        print("Saving model to %s." % FLAGS.save_path)
        sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:56,代码来源:ptb_word_lm.py

示例6: main

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  with tf.Graph().as_default():
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)

    with tf.name_scope("Train"):
      train_input = PTBInput(config=config, data=train_data, name="TrainInput")
      with tf.variable_scope("Model", reuse=None, initializer=initializer):
        m = PTBModel(is_training=True, config=config, input_=train_input)
      tf.scalar_summary("Training Loss", m.cost)
      tf.scalar_summary("Learning Rate", m.lr)

    with tf.name_scope("Valid"):
      valid_input = PTBInput(config=config, data=valid_data, name="ValidInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mvalid = PTBModel(is_training=False, config=config, input_=valid_input)
      tf.scalar_summary("Validation Loss", mvalid.cost)

    with tf.name_scope("Test"):
      test_input = PTBInput(config=eval_config, data=test_data, name="TestInput")
      with tf.variable_scope("Model", reuse=True, initializer=initializer):
        mtest = PTBModel(is_training=False, config=eval_config,
                         input_=test_input)

    sv = tf.train.Supervisor(logdir=FLAGS.save_path)
    with sv.managed_session() as session:
    # session = sv.managed_session()
    # with tf.Session() as session:
      for i in range(config.max_max_epoch):
        lr_decay = config.lr_decay ** max(i + 1 - config.max_epoch, 0.0)
        m.assign_lr(session, config.learning_rate * lr_decay)

        print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
        train_perplexity = run_epoch(session, m, eval_op=m.train_op,
                                     verbose=True)
        print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
        valid_perplexity = run_epoch(session, mvalid)
        print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

      test_perplexity = run_epoch(session, mtest)
      print("Test Perplexity: %.3f" % test_perplexity)

      if FLAGS.save_path:
        print("Saving model to %s." % FLAGS.save_path)
        sv.saver.save(session, FLAGS.save_path, global_step=sv.global_step) 
开发者ID:JianGoForIt,项目名称:YellowFin,代码行数:58,代码来源:ptb_word_lm.py

示例7: main

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
    if not FLAGS.data_path:
        raise ValueError("Must set --data_path to PTB data directory")

    raw_data = reader.ptb_raw_data(FLAGS.data_path)
    train_data, valid_data, test_data, _ = raw_data

    config = get_config()
    eval_config = get_config()
    eval_config.batch_size = 1
    eval_config.num_steps = 1

    with tf.Graph().as_default(), tf.Session() as session:
        initializer = tf.initializers.variance_scaling(distribution='uniform')
        with tf.variable_scope("model", reuse=tf.AUTO_REUSE, initializer=initializer):
            m = PTBModel(is_training=True, config=config)
        with tf.variable_scope("model", reuse=True, initializer=initializer):
            mvalid = PTBModel(is_training=False, config=config)
            mtest = PTBModel(is_training=False, config=eval_config)

        tf.global_variables_initializer().run()

        def get_learning_rate(epoch, config):
            base_lr = config.learning_rate
            if epoch <= config.nr_epoch_first_stage:
                return base_lr
            elif epoch <= config.nr_epoch_second_stage:
                return base_lr * 0.1
            else:
                return base_lr * 0.01

        for i in range(config.max_epoch):
            m.assign_lr(session, get_learning_rate(i, config))

            print("Epoch: %d Learning rate: %f"
                  % (i + 1, session.run(m.lr)))
            train_perplexity = run_epoch(
                session, m, train_data, m.train_op, verbose=True)
            print("Epoch: %d Train Perplexity: %.3f"
                  % (i + 1, train_perplexity))
            valid_perplexity = run_epoch(
                session, mvalid, valid_data, tf.no_op())
            print("Epoch: %d Valid Perplexity: %.3f"
                  % (i + 1, valid_perplexity))

        test_perplexity = run_epoch(
            session, mtest, test_data, tf.no_op())
        print("Test Perplexity: %.3f" % test_perplexity) 
开发者ID:qinyao-he,项目名称:bit-rnn,代码行数:50,代码来源:train.py

示例8: main

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  if config.device == '-1':
    tf_dev = '/cpu:0'
  else:
    tf_dev = '/gpu:' + config.device

  print(tf_dev)
  tconfig = tf.ConfigProto(allow_soft_placement=True)
  if tf_dev.find('cpu') >= 0: # cpu version
    num_threads = os.getenv('OMP_NUM_THREADS', 1)
    tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
  with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    # with tf.variable_scope("model", reuse=True, initializer=initializer):
    #   mvalid = PTBModel(is_training=False, config=config)
    #   mtest = PTBModel(is_training=False, config=eval_config)

    tf.initialize_all_variables().run()

    for i in range(config.max_max_epoch):
      lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      m.assign_lr(session, config.learning_rate * lr_decay)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity = run_epoch(session, m, train_data, m.train_op, config.iters, 
                                   verbose=True)
#      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
#      valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
#      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

#    test_perplexity = run_epoch(session, mtest, test_data, tf.no_op())
#    print("Test Perplexity: %.3f" % test_perplexity) 
开发者ID:hclhkbu,项目名称:dlbench,代码行数:48,代码来源:lstm.py

示例9: main

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_raw_data [as 别名]
def main(_):
  if not FLAGS.data_path:
    raise ValueError("Must set --data_path to PTB data directory")

  raw_data = reader.ptb_raw_data(FLAGS.data_path)
  train_data, valid_data, test_data, _ = raw_data

  config = get_config()
  eval_config = get_config()
  eval_config.batch_size = 1
  eval_config.num_steps = 1

  if config.device == '-1':
    tf_dev = '/cpu:0'
  else:
    tf_dev = '/gpu:' + config.device

  print(tf_dev)
  tconfig = tf.ConfigProto(allow_soft_placement=True)
  if tf_dev.find('cpu') >= 0: # cpu version
    num_threads = os.getenv('OMP_NUM_THREADS', 1)
    tconfig = tf.ConfigProto(allow_soft_placement=True, intra_op_parallelism_threads=int(num_threads))
  with tf.Graph().as_default(), tf.device(tf_dev), tf.Session(config=tconfig) as session:
    initializer = tf.random_uniform_initializer(-config.init_scale,
                                                config.init_scale)
    with tf.variable_scope("model", reuse=None, initializer=initializer):
      m = PTBModel(is_training=True, config=config)
    with tf.variable_scope("model", reuse=True, initializer=initializer):
       #mvalid = PTBModel(is_training=False, config=config)
       mtest = PTBModel(is_training=False, config=eval_config)

    tf.global_variables_initializer().run()

    total_average_batch_time = 0.0

    epochs_info = []
    for i in range(config.max_max_epoch):
      #lr_decay = config.lr_decay ** max(i - config.max_epoch, 0.0)
      #m.assign_lr(session, config.learning_rate * lr_decay)
      m.assign_lr(session, config.learning_rate)

      print("Epoch: %d Learning rate: %.3f" % (i + 1, session.run(m.lr)))
      train_perplexity, average_batch_time = run_epoch(session, m, train_data, m.train_op, verbose=True)
      total_average_batch_time += average_batch_time
      print("Epoch: %d Train Perplexity: %.3f" % (i + 1, train_perplexity))
      if i % 2 == 0:
         epochs_info.append('%d:_:%.3f'%(i, train_perplexity)) 
#      valid_perplexity = run_epoch(session, mvalid, valid_data, tf.no_op())
#      print("Epoch: %d Valid Perplexity: %.3f" % (i + 1, valid_perplexity))

    print("average_batch_time: %.6f" % (total_average_batch_time/int(config.max_max_epoch)))
    print('epoch_info:'+','.join(epochs_info))

    test_perplexity, test_average_batch_time = run_epoch(session, mtest, test_data, tf.no_op())
    print("Test Perplexity: %.3f" % test_perplexity) 
开发者ID:hclhkbu,项目名称:dlbench,代码行数:57,代码来源:lstm.py


注:本文中的reader.ptb_raw_data方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。