当前位置: 首页>>代码示例>>Python>>正文


Python data_utils.bins方法代码示例

本文整理汇总了Python中data_utils.bins方法的典型用法代码示例。如果您正苦于以下问题:Python data_utils.bins方法的具体用法?Python data_utils.bins怎么用?Python data_utils.bins使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在data_utils的用法示例。


在下文中一共展示了data_utils.bins方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_bucket_id

# 需要导入模块: import data_utils [as 别名]
# 或者: from data_utils import bins [as 别名]
def get_bucket_id(train_buckets_scale_c, max_cur_length, data_set):
  """Get a random bucket id."""
  # Choose a bucket according to data distribution. Pick a random number
  # in [0, 1] and use the corresponding interval in train_buckets_scale.
  random_number_01 = np.random.random_sample()
  bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                   if train_buckets_scale_c[i] > random_number_01])
  while bucket_id > 0 and not data_set[bucket_id]:
    bucket_id -= 1
  for _ in xrange(10 if np.random.random_sample() < 0.9 else 1):
    if data.bins[bucket_id] > max_cur_length:
      random_number_01 = min(random_number_01, np.random.random_sample())
      bucket_id = min([i for i in xrange(len(train_buckets_scale_c))
                       if train_buckets_scale_c[i] > random_number_01])
      while bucket_id > 0 and not data_set[bucket_id]:
        bucket_id -= 1
  return bucket_id 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:19,代码来源:neural_gpu_trainer.py

示例2: single_test

# 需要导入模块: import data_utils [as 别名]
# 或者: from data_utils import bins [as 别名]
def single_test(bin_id, model, sess, nprint, batch_size, dev, p, print_out=True,
                offset=None, beam_model=None):
  """Test model on test data of length l using the given session."""
  if not dev[p][bin_id]:
    data.print_out("  bin %d (%d)\t%s\tppl NA errors NA seq-errors NA"
                   % (bin_id, data.bins[bin_id], p))
    return 1.0, 1.0, 0.0
  inpt, target = data.get_batch(
      bin_id, batch_size, dev[p], FLAGS.height, offset)
  if FLAGS.beam_size > 1 and beam_model:
    loss, res, new_tgt, scores = m_step(
        model, beam_model, sess, batch_size, inpt, target, bin_id,
        FLAGS.eval_beam_steps, p)
    score_avgs = [sum(s) / float(len(s)) for s in scores]
    score_maxs = [max(s) for s in scores]
    score_str = ["(%.2f, %.2f)" % (score_avgs[i], score_maxs[i])
                 for i in xrange(FLAGS.eval_beam_steps)]
    data.print_out("  == scores (avg, max): %s" % "; ".join(score_str))
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint, new_tgt, scores[-1])
  else:
    loss, res, _, _ = model.step(sess, inpt, target, False)
    errors, total, seq_err = data.accuracy(inpt, res, target, batch_size,
                                           nprint)
  seq_err = float(seq_err) / batch_size
  if total > 0:
    errors = float(errors) / total
  if print_out:
    data.print_out("  bin %d (%d)\t%s\tppl %.2f errors %.2f seq-errors %.2f"
                   % (bin_id, data.bins[bin_id], p, data.safe_exp(loss),
                      100 * errors, 100 * seq_err))
  return (errors, seq_err, loss) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:34,代码来源:neural_gpu_trainer.py

示例3: get_best_beam

# 需要导入模块: import data_utils [as 别名]
# 或者: from data_utils import bins [as 别名]
def get_best_beam(beam_model, sess, inp, target, batch_size, beam_size,
                  bucket, history, p, test_mode=False):
  """Run beam_model, score beams, and return the best as target and in input."""
  _, output_logits, _, _ = beam_model.step(
      sess, inp, target, None, beam_size=FLAGS.beam_size)
  new_targets, new_firsts, scores, new_inp = [], [], [], np.copy(inp)
  for b in xrange(batch_size):
    outputs = []
    history_b = [[h[b, 0, l] for l in xrange(data.bins[bucket])]
                 for h in history]
    for beam_idx in xrange(beam_size):
      outputs.append([int(o[beam_idx * batch_size + b])
                      for o in output_logits])
    target_t = [target[b, 0, l] for l in xrange(data.bins[bucket])]
    best, best_score = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=test_mode)
    scores.append(best_score)
    if 1 in best:  # Only until _EOS.
      best = best[:best.index(1) + 1]
    best += [0 for _ in xrange(len(target_t) - len(best))]
    new_targets.append([best])
    first, _ = score_beams(
        outputs, [t for t in target_t if t > 0], inp[b, :, :],
        [[t for t in h if t > 0] for h in history_b], p, test_mode=True)
    if 1 in first:  # Only until _EOS.
      first = first[:first.index(1) + 1]
    first += [0 for _ in xrange(len(target_t) - len(first))]
    new_inp[b, 0, :] = np.array(first, dtype=np.int32)
    new_firsts.append([first])
  # Change target if we found a great answer.
  new_target = np.array(new_targets, dtype=np.int32)
  for b in xrange(batch_size):
    if scores[b] >= 10.0:
      target[b, 0, :] = new_target[b, 0, :]
  new_first = np.array(new_firsts, dtype=np.int32)
  return new_target, new_first, new_inp, scores 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:39,代码来源:neural_gpu_trainer.py

示例4: step

# 需要导入模块: import data_utils [as 别名]
# 或者: from data_utils import bins [as 别名]
def step(self, sess, inp, target, do_backward, noise_param=None,
           get_steps=False):
    """Run a step of the network."""
    assert len(inp) == len(target)
    length = len(target)
    feed_in = {}
    feed_in[self.noise_param.name] = noise_param if noise_param else 0.0
    feed_in[self.do_training.name] = 1.0 if do_backward else 0.0
    feed_out = []
    index = len(data_utils.bins)
    if length < data_utils.bins[-1] + 1:
      index = data_utils.bins.index(length)
    if do_backward:
      feed_out.append(self.updates[index])
      feed_out.append(self.grad_norms[index])
    feed_out.append(self.losses[index])
    for l in xrange(length):
      feed_in[self.input[l].name] = inp[l]
    for l in xrange(length):
      feed_in[self.target[l].name] = target[l]
      feed_out.append(self.outputs[index][l])
    if get_steps:
      for l in xrange(length+1):
        feed_out.append(self.steps[index][l])
    res = sess.run(feed_out, feed_in)
    offset = 0
    norm = None
    if do_backward:
      offset = 2
      norm = res[1]
    outputs = res[offset + 1:offset + 1 + length]
    steps = res[offset + 1 + length:] if get_steps else None
    return res[offset], outputs, norm, steps 
开发者ID:coderSkyChen,项目名称:Action_Recognition_Zoo,代码行数:35,代码来源:neural_gpu.py

示例5: evaluate

# 需要导入模块: import data_utils [as 别名]
# 或者: from data_utils import bins [as 别名]
def evaluate():
  """Evaluate an existing model."""
  batch_size = FLAGS.batch_size
  tasks = FLAGS.task.split("-")
  with tf.Session() as sess:
    model, min_length, max_length, _, _, ensemble = initialize(sess)
    bound = data.bins[-1] + 1
    for t in tasks:
      l = min_length
      while l < max_length + EXTRA_EVAL and l < bound:
        _, seq_err, _ = single_test(l, model, sess, t, FLAGS.nprint,
                                    batch_size, ensemble=ensemble)
        l += 1
        while l < bound + 1 and not data.test_set[t][l]:
          l += 1
      # Animate.
      if FLAGS.animate:
        anim_size = 2
        _, _, test_data = single_test(l, model, sess, t, 0, anim_size,
                                      get_steps=True)
        animate(l, test_data, anim_size)
      # More tests.
      _, seq_err = multi_test(data.forward_max, model, sess, t, FLAGS.nprint,
                              batch_size * 4, ensemble=ensemble)
    if seq_err < 0.01:  # Super-test if we're very good and in large-test mode.
      if data.forward_max > 4000 and len(tasks) == 1:
        multi_test(data.forward_max, model, sess, tasks[0], FLAGS.nprint,
                   batch_size * 64, 0, ensemble=ensemble) 
开发者ID:coderSkyChen,项目名称:Action_Recognition_Zoo,代码行数:30,代码来源:neural_gpu_trainer.py


注:本文中的data_utils.bins方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。