当前位置: 首页>>代码示例>>Python>>正文


Python utils.merge方法代码示例

本文整理汇总了Python中utils.merge方法的典型用法代码示例。如果您正苦于以下问题:Python utils.merge方法的具体用法?Python utils.merge怎么用?Python utils.merge使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在utils的用法示例。


在下文中一共展示了utils.merge方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test

# 需要导入模块: import utils [as 别名]
# 或者: from utils import merge [as 别名]
def test(self, name="test", options=None, fixed=False):
    if options == None:
      options = self.options

    t = strfnow()

    for option in options:
      if fixed == True:
        a, b, c, d = self.loader.tests[option]
      else:
        a, b, c, d = self.loader.next(set_option=option)

      feed = {self.a: a,
              self.b: b,
              self.c: c,
              self.d: d}

      fname = "%s/%s_option:%s_time:%s.png" % (self.sample_dir, name, option, t)
      g_img, g2_img, g3_img = self.sess.run([self.g1_img, self.g2_img, self.g3_img], feed_dict=feed)

      imsave(fname, merge(a, b, c, d, g_img, g2_img, g3_img)) 
开发者ID:carpedm20,项目名称:visual-analogy-tensorflow,代码行数:23,代码来源:shape.py

示例2: generate_train_batch

# 需要导入模块: import utils [as 别名]
# 或者: from utils import merge [as 别名]
def generate_train_batch(required_input_keys, required_output_keys):
    """Creates an iterator that returns train batches."""

    sunny_chunk_size = _config().sunny_batch_size * _config().batches_per_chunk
    chunk_size = _config().batch_size * _config().batches_per_chunk

    while True:
        result = {}
        input_keys_to_do = list(required_input_keys) #clone
        output_keys_to_do = list(required_output_keys) #clone
        if "sunny" in input_keys_to_do or "segmentation" in output_keys_to_do:
            indices = _config().rng.randint(0, len(sunny_train_images), sunny_chunk_size)
            sunny_patient_data = get_sunny_patient_data(indices, set="train")
            result = utils.merge(result, sunny_patient_data)
            input_keys_to_do.remove("sunny")
            output_keys_to_do.remove("segmentation")

        indices = _config().rng.randint(0, len(train_patient_folders), chunk_size)  #
        kaggle_data = get_patient_data(indices, input_keys_to_do, output_keys_to_do, set="train",
                                       preprocess_function=_config().preprocess_train)

        result = utils.merge(result, kaggle_data)

        yield result 
开发者ID:317070,项目名称:kaggle-heart,代码行数:26,代码来源:data_loader.py

示例3: evaluate

# 需要导入模块: import utils [as 别名]
# 或者: from utils import merge [as 别名]
def evaluate(override_cfg, model_dir, continuous=True):
  """Run training and evaluation."""
  tf.logging.info("model_dir = " + model_dir)
  try:
    cfg = _load_config(model_dir)
  except tf.errors.NotFoundError:
    tf.logging.info("Model directory does not exist yet. Creating new config.")
    cfg = model.build_config(model_dir=model_dir, data_path=FLAGS.data_path)
  tf.logging.info(cfg)
  tf.logging.info(override_cfg)
  cfg = utils.merge(cfg, override_cfg)

  cfg.tpu.enable = False
  cfg.dataset.max_length = None

  # Construct inputs and estimator
  _, eval_input = data.build_dataset(cfg.dataset, is_tpu=cfg.tpu.enable)
  estimator = model.get_estimator(**cfg)
  if continuous:
    checkpoints_iterator = tf.contrib.training.checkpoints_iterator(
        cfg.model_dir)
    eval_metrics = None
    for ckpt_path in checkpoints_iterator:
      eval_metrics = estimator.evaluate(
          input_fn=eval_input, checkpoint_path=ckpt_path)
      tf.logging.info(pprint.pformat(eval_metrics))
    return eval_metrics
  else:
    eval_metrics = estimator.evaluate(input_fn=eval_input)
    return eval_metrics 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:32,代码来源:run_lib.py

示例4: predict

# 需要导入模块: import utils [as 别名]
# 或者: from utils import merge [as 别名]
def predict(override_cfg, model_dir):
  """Run model over a dataset and dump predictions to json file."""
  assert FLAGS.predict_path
  cfg = _load_config(model_dir)
  cfg = utils.merge(cfg, override_cfg)
  input_fn = data.get_input_fn(
      split=cfg.dataset.eval_split,
      max_length=None,
      repeat=False,
      shuffle=False,
      cache=False,
      limit=None,
      data_path=cfg.dataset.data_path,
      vocab_path=cfg.dataset.vocab_path,
      is_tpu=False,
      use_generator=True,
      is_training=False)
  estimator = model.get_estimator(**cfg)
  predictions = dict()
  for i, prediction in enumerate(estimator.predict(input_fn)):
    predictions[prediction["id"]] = prediction["answer"]
    if i % 100 == 0:
      tf.logging.info("Prediction %s | %s: %s" % (i, prediction["id"],
                                                  prediction["answer"]))

  # Dump results to a file
  with tf.gfile.GFile(FLAGS.predict_path, "w") as f:
    json.dump(predictions, f) 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:30,代码来源:run_lib.py

示例5: generate_validation_batch

# 需要导入模块: import utils [as 别名]
# 或者: from utils import merge [as 别名]
def generate_validation_batch(required_input_keys, required_output_keys, set="validation"):
    # generate sunny data
    sunny_length = get_lenght_of_set(name="sunny", set=set)
    regular_length = get_lenght_of_set(name="regular", set=set)

    sunny_batches = int(np.ceil(sunny_length / float(_config().sunny_batch_size)))
    regular_batches = int(np.ceil(regular_length / float(_config().batch_size)))

    if "sunny" in required_input_keys or "segmentation" in required_output_keys:
        num_batches = max(sunny_batches, regular_batches)
    else:
        num_batches = regular_batches

    num_chunks = int(np.ceil(num_batches / float(_config().batches_per_chunk)))

    sunny_chunk_size = _config().batches_per_chunk * _config().sunny_batch_size
    regular_chunk_size = _config().batches_per_chunk * _config().batch_size

    for n in xrange(num_chunks):

        result = {}
        input_keys_to_do  = list(required_input_keys)  # clone
        output_keys_to_do = list(required_output_keys) # clone

        if "sunny" in input_keys_to_do or "segmentation" in output_keys_to_do:

            indices = range(n*sunny_chunk_size, (n+1)*sunny_chunk_size)

            sunny_patient_data = get_sunny_patient_data(indices, set="train")
            result = utils.merge(result, sunny_patient_data)
            input_keys_to_do.remove("sunny")
            output_keys_to_do.remove("segmentation")

        indices = range(n*regular_chunk_size, (n+1)*regular_chunk_size)
        kaggle_data = get_patient_data(indices, input_keys_to_do, output_keys_to_do, set=set,
                                       preprocess_function=_config().preprocess_validation)

        result = utils.merge(result, kaggle_data)

        yield result 
开发者ID:317070,项目名称:kaggle-heart,代码行数:42,代码来源:data_loader.py


注:本文中的utils.merge方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。