当前位置: 首页>>代码示例>>Python>>正文


Python reader.ptb_iterator方法代码示例

本文整理汇总了Python中reader.ptb_iterator方法的典型用法代码示例。如果您正苦于以下问题:Python reader.ptb_iterator方法的具体用法?Python reader.ptb_iterator怎么用?Python reader.ptb_iterator使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在reader的用法示例。


在下文中一共展示了reader.ptb_iterator方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _run_epoch

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_iterator [as 别名]
def _run_epoch(self, session, data, eval_op, verbose=False):
        epoch_size = ((len(data) // self._batch_size) - 1) // self._num_steps
        start_time = time.time()
        costs = 0.0
        iters = 0

        for step, (x, y) in enumerate(reader.ptb_iterator(data, self._batch_size, self._num_steps)):
            fetches, feed_dict = self._one_loop_setup(eval_op)
            feed_dict[self._input_data] = x
            feed_dict[self._targets] = y

            res = session.run(fetches, feed_dict)
            self.train_writer.add_summary(res[2], step / 13)
            cost = res[0]

            costs += cost
            iters += self._num_steps

            if verbose and step % (epoch_size // 10) == 10:
                sys.stdout.write("%.3f perplexity: %.3f speed: %.0f wps\n" %
                    (step * 1.0 / epoch_size, np.exp(costs / iters),
                    iters * self._batch_size * self._num_steps / (time.time() - start_time)))
                sys.stdout.flush()

        return np.exp(costs / iters) 
开发者ID:katsugeneration,项目名称:tensor-fsmn,代码行数:27,代码来源:ptb.py

示例2: predict

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_iterator [as 别名]
def predict(self, session, data, word_to_id):
        def _get_word_fromid(word_to_id, search_id):
            for word, wid in word_to_id.items():
                if wid == search_id:
                    return word

        for step, (x, y) in enumerate(reader.ptb_iterator(data, self._batch_size, self._num_steps)):
            fetches, feed_dict = self._one_loop_setup(self._logits)
            feed_dict[self._input_data] = x
            feed_dict[self._targets] = y

            res = session.run(fetches, feed_dict)
            label = res[1]
            label = np.argmax(label, 1)
            y = np.reshape(y, (self._batch_size * self._num_steps))
            for pre, real in zip(label, y):
                sys.stdout.write("Predict %s : Real %s\n" % (_get_word_fromid(word_to_id, pre), _get_word_fromid(word_to_id, real))) 
开发者ID:katsugeneration,项目名称:tensor-fsmn,代码行数:19,代码来源:ptb.py

示例3: run_epoch

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_iterator [as 别名]
def run_epoch(session, m, data, eval_op, verbose=False):
    """Runs the model on the given data."""
    epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
    start_time = time.time()
    costs = 0.0
    iters = 0
    state = m.initial_state.eval()
    for step, (x, y) in enumerate(
            reader.ptb_iterator(data, m.batch_size, m.num_steps)):
        cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                     {m.input_data: x,
                                      m.targets: y,
                                      m.initial_state: state})
        costs += cost
        iters += m.num_steps

        if verbose and step % (epoch_size // 10) == 10:
            print("%.3f perplexity: %.3f speed: %.0f wps" %
                  (step * 1.0 / epoch_size, np.exp(costs / iters),
                   iters * m.batch_size / (time.time() - start_time)))

    return np.exp(costs / iters) 
开发者ID:qinyao-he,项目名称:bit-rnn,代码行数:24,代码来源:train.py

示例4: run_epoch

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_iterator [as 别名]
def run_epoch(session, model, data, is_train=False, verbose=False):
  """Runs the model on the given data."""
  epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
  start_time = time.time()
  costs = 0.0
  iters = 0
  state = session.run(model.initial_state)

  for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
    if is_train:
      fetches = [model.cost, model.final_state, model.train_op]
    else:
      fetches = [model.cost, model.final_state]
    feed_dict = {}
    feed_dict[model.input_data] = x
    feed_dict[model.targets] = y
    for layer_num, (c, h) in enumerate(model.initial_state):
      feed_dict[c] = state[layer_num].c
      feed_dict[h] = state[layer_num].h

    if is_train:
      cost, state, _ = session.run(fetches, feed_dict)
    else:
      cost, state = session.run(fetches, feed_dict)

    costs += cost
    iters += model.num_steps

    if verbose and step % (epoch_size // 10) == 10:
      print("%.3f perplexity: %.3f speed: %.0f wps" %
            (step * 1.0 / epoch_size, np.exp(costs / iters),
             iters * model.batch_size / (time.time() - start_time)))

  return np.exp(costs / iters) 
开发者ID:deeplearningathome,项目名称:rnn_text_writer,代码行数:36,代码来源:ptb_word_lm.py

示例5: run_epoch

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_iterator [as 别名]
def run_epoch(session, m, data, eval_op, ITERS, verbose=False):
  """Runs the model on the given data."""
  epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
  start_time = time.time()
  costs = 0.0
  iters = 0
  #state = m.initial_state.eval()
  state = session.run(m.initial_state) #.eval()
  step = 0
  for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
                                                    m.num_steps)):
    cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                 {m.input_data: x,
                                  m.targets: y,
                                  m.initial_state: state})
    costs += cost
    iters += m.num_steps

    if verbose and step % (epoch_size // 10) == 10:
      print("%.3f perplexity: %.3f speed: %.0f wps" %
            (step * 1.0 / epoch_size, np.exp(costs / iters),
             iters * m.batch_size / (time.time() - start_time)))
   
    # few iters for profiling, remove if complete training is needed
    if step > ITERS - 1:
      break

  print("Time for %d iterations %.4f seconds" %
            (ITERS, time.time() - start_time))
   
  return np.exp(costs / iters) 
开发者ID:hclhkbu,项目名称:dlbench,代码行数:33,代码来源:lstm.py

示例6: run_epoch

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_iterator [as 别名]
def run_epoch(session, m, data, eval_op, verbose=False):
  """Runs the model on the given data."""
  epoch_size = ((len(data) // m.batch_size) - 1) // m.num_steps
  start_time = time.time()
  costs = 0.0
  iters = 0
  print('m.initial_state:', m.initial_state)
  state = session.run(m.initial_state) #.eval()
  step = 0
  for step, (x, y) in enumerate(reader.ptb_iterator(data, m.batch_size,
                                                    m.num_steps)):
    cost, state, _ = session.run([m.cost, m.final_state, eval_op],
                                 {m.input_data: x,
                                  m.targets: y,
                                  m.initial_state: state})
    costs += cost
    iters += m.num_steps

    if verbose and step % (epoch_size // 10) == 10:
      print("%.3f perplexity: %.3f speed: %.0f wps" %
            (step * 1.0 / epoch_size, np.exp(costs / iters),
             iters * m.batch_size / (time.time() - start_time)))

  print("Time for one epoch, %d iters: %.4f seconds" %
            (step+1, time.time() - start_time))
  average_batch_time = (time.time() - start_time)/(step+1)
  print("Average time per minibatch in this epoch: %.4f seconds" % average_batch_time)
   
  return np.exp(costs / iters), average_batch_time 
开发者ID:hclhkbu,项目名称:dlbench,代码行数:31,代码来源:lstm.py

示例7: run_epoch

# 需要导入模块: import reader [as 别名]
# 或者: from reader import ptb_iterator [as 别名]
def run_epoch(model, data, is_train=False, lr=1.0):
  """Runs the model on the given data."""
  if is_train:
    model.train()
  else:
    model.eval()
  epoch_size = ((len(data) // model.batch_size) - 1) // model.num_steps
  start_time = time.time()
  hidden = model.init_hidden()
  costs = 0.0
  iters = 0
  for step, (x, y) in enumerate(reader.ptb_iterator(data, model.batch_size, model.num_steps)):
    inputs = Variable(torch.from_numpy(x.astype(np.int64)).transpose(0, 1).contiguous()).cuda()
    model.zero_grad()
    hidden = repackage_hidden(hidden)
    outputs, hidden = model(inputs, hidden)
    targets = Variable(torch.from_numpy(y.astype(np.int64)).transpose(0, 1).contiguous()).cuda()
    tt = torch.squeeze(targets.view(-1, model.batch_size * model.num_steps))

    loss = criterion(outputs.view(-1, model.vocab_size), tt)
    costs += loss.data[0] * model.num_steps
    iters += model.num_steps

    if is_train:
      loss.backward()
      torch.nn.utils.clip_grad_norm(model.parameters(), 0.25)
      for p in model.parameters():
        p.data.add_(-lr, p.grad.data)
      if step % (epoch_size // 10) == 10:
        print("{} perplexity: {:8.2f} speed: {} wps".format(step * 1.0 / epoch_size, np.exp(costs / iters),
                                                       iters * model.batch_size / (time.time() - start_time)))
  return np.exp(costs / iters) 
开发者ID:deeplearningathome,项目名称:pytorch-language-model,代码行数:34,代码来源:ptb-lm.py


注:本文中的reader.ptb_iterator方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。