当前位置: 首页>>代码示例>>Python>>正文


Python gfile.MakeDirs方法代码示例

本文整理汇总了Python中tensorflow.gfile.MakeDirs方法的典型用法代码示例。如果您正苦于以下问题:Python gfile.MakeDirs方法的具体用法?Python gfile.MakeDirs怎么用?Python gfile.MakeDirs使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.gfile的用法示例。


在下文中一共展示了gfile.MakeDirs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: save_subvolume

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def save_subvolume(labels, origins, output_path, **misc_items):
  """Saves an FFN subvolume.

  Args:
    labels: 3d zyx number array with the segment labels
    origins: dictionary mapping segment ID to origin information
    output_path: path at which to save the segmentation in the form
        of a .npz file
    **misc_items: (optional) additional values to save
        in the output file
  """
  seg = segmentation.reduce_id_bits(labels)
  gfile.MakeDirs(os.path.dirname(output_path))
  with atomic_file(output_path) as fd:
    np.savez_compressed(fd,
                        segmentation=seg,
                        origins=origins,
                        **misc_items) 
开发者ID:google,项目名称:ffn,代码行数:20,代码来源:storage.py

示例2: save_checkpoint

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def save_checkpoint(self, path):
    """Saves a inference checkpoint to `path`."""
    self.log_info('Saving inference checkpoint to %s.', path)
    with timer_counter(self.counters, 'save_checkpoint'):
      gfile.MakeDirs(os.path.dirname(path))
      with storage.atomic_file(path) as fd:
        seed_policy_state = None
        if self.seed_policy is not None:
          seed_policy_state = self.seed_policy.get_state()

        np.savez_compressed(fd,
                            movement_policy=self.movement_policy.get_state(),
                            segmentation=self.segmentation,
                            seg_qprob=self.seg_prob,
                            seed=self.seed,
                            origins=self.origins,
                            overlaps=self.overlaps,
                            history=np.array(self.history),
                            history_deleted=np.array(self.history_deleted),
                            seed_policy_state=seed_policy_state,
                            counters=self.counters.dumps())
    self.log_info('Inference checkpoint saved.') 
开发者ID:google,项目名称:ffn,代码行数:24,代码来源:inference.py

示例3: main

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def main(unused_argv):
  request = inference_flags.request_from_flags()

  if not gfile.Exists(request.segmentation_output_dir):
    gfile.MakeDirs(request.segmentation_output_dir)

  bbox = bounding_box_pb2.BoundingBox()
  text_format.Parse(FLAGS.bounding_box, bbox)

  runner = inference.Runner()
  runner.start(request)
  runner.run((bbox.start.z, bbox.start.y, bbox.start.x),
             (bbox.size.z, bbox.size.y, bbox.size.x))

  counter_path = os.path.join(request.segmentation_output_dir, 'counters.txt')
  if not gfile.Exists(counter_path):
    runner.counters.dump(counter_path) 
开发者ID:google,项目名称:ffn,代码行数:19,代码来源:run_inference.py

示例4: main

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def main(_argv):
    # load flags from config file
    model_configs = load_from_config_path(FLAGS.config_paths)
    # replace parameters in configs_file with tf FLAGS
    model_configs = update_configs_from_flags(model_configs, FLAGS, TRAIN_ARGS.keys())
    model_dir = model_configs["model_dir"]
    if not gfile.Exists(model_dir):
        gfile.MakeDirs(model_dir)

    if "CUDA_VISIBLE_DEVICES" not in os.environ.keys():
        raise OSError("need CUDA_VISIBLE_DEVICES environment variable")
    tf.logging.info("CUDA_VISIBLE_DEVICES={}".format(os.environ["CUDA_VISIBLE_DEVICES"]))

    training_runner = TrainingExperiment(
        model_configs=model_configs)

    training_runner.run() 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:19,代码来源:train.py

示例5: get_target_path

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def get_target_path(request, point_num):
  """Computes the output path for a specific point.

  Args:
    request: ResegmentationRequest proto
    point_num: index of the point of interest within the proto

  Returns:
    path to the output file where resegmentation results will be saved
  """
  # Prepare the output directory.
  output_dir = request.output_directory

  id_a = request.points[point_num].id_a
  id_b = request.points[point_num].id_b

  if request.subdir_digits > 1:
    m = hashlib.md5()
    m.update(str(id_a))
    m.update(str(id_b))
    output_dir = os.path.join(output_dir, m.hexdigest()[:request.subdir_digits])
  gfile.MakeDirs(output_dir)

  # Terminate early if the output already exists.
  dp = request.points[point_num].point
  target_path = os.path.join(output_dir, '%d-%d_at_%d_%d_%d.npz' % (
      id_a, id_b, dp.x, dp.y, dp.z))
  if gfile.Exists(target_path):
    logging.info('Output already exists: %s', target_path)
    return

  return target_path 
开发者ID:google,项目名称:ffn,代码行数:34,代码来源:resegmentation.py

示例6: save_flags

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def save_flags():
  gfile.MakeDirs(FLAGS.train_dir)
  with gfile.Open(os.path.join(FLAGS.train_dir,
                               'flags.%d' % time.time()), 'w') as f:
    for mod, flag_list in FLAGS.flags_by_module_dict().items():
      if (mod.startswith('google3.research.neuromancer.tensorflow') or
          mod.startswith('/')):
        for flag in flag_list:
          f.write('%s\n' % flag.serialize()) 
开发者ID:google,项目名称:ffn,代码行数:11,代码来源:train.py

示例7: dump_object

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def dump_object(object_to_dump, output_path):
  """Pickle the object and save to the output_path.

  Args:
    object_to_dump: Python object to be pickled
    output_path: (string) output path which can be Google Cloud Storage

  Returns:
    None
  """

  if not gfile.Exists(output_path):
    gfile.MakeDirs(os.path.dirname(output_path))
  with gfile.Open(output_path, 'w') as wf:
    joblib.dump(object_to_dump, wf) 
开发者ID:GoogleCloudPlatform,项目名称:cloudml-samples,代码行数:17,代码来源:utils.py

示例8: dump

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def dump(model_config, output_dir):
        """ Dumps model configurations.

        Args:
            model_config: A dict.
            output_dir: A string, the output directory.
        """
        model_config_filename = os.path.join(output_dir, Constants.MODEL_CONFIG_YAML_FILENAME)
        if not gfile.Exists(output_dir):
            gfile.MakeDirs(output_dir)
        with open_file(model_config_filename, mode="w") as file:
            yaml.dump(model_config, file) 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:14,代码来源:configurable.py

示例9: begin

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def begin(self):
    super(DumpAttention, self).begin()
    gfile.MakeDirs(self.params["output_dir"]) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:5,代码来源:dump_attention.py

示例10: dump

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def dump(self, model_dir):
    """Dumps the options to a file in the model directory.

    Args:
      model_dir: Path to the model directory. The options will be
      dumped into a file in this directory.
    """
    gfile.MakeDirs(model_dir)
    options_dict = {
        "model_class": self.model_class,
        "model_params": self.model_params,
    }

    with gfile.GFile(TrainOptions.path(model_dir), "wb") as file:
      file.write(json.dumps(options_dict).encode("utf-8")) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:17,代码来源:utils.py

示例11: after_run

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def after_run(self, _run_context, run_values):
    if not self.is_chief or self._done:
      return

    step_done = run_values.results
    if self._active:
      tf.logging.info("Captured full trace at step %s", step_done)
      # Create output directory
      gfile.MakeDirs(self._output_dir)

      # Save run metadata
      trace_path = os.path.join(self._output_dir, "run_meta")
      with gfile.GFile(trace_path, "wb") as trace_file:
        trace_file.write(run_values.run_metadata.SerializeToString())
        tf.logging.info("Saved run_metadata to %s", trace_path)

      # Save timeline
      timeline_path = os.path.join(self._output_dir, "timeline.json")
      with gfile.GFile(timeline_path, "w") as timeline_file:
        tl_info = timeline.Timeline(run_values.run_metadata.step_stats)
        tl_chrome = tl_info.generate_chrome_trace_format(show_memory=True)
        timeline_file.write(tl_chrome)
        tf.logging.info("Saved timeline to %s", timeline_path)

      # Save tfprof op log
      tf.contrib.tfprof.tfprof_logger.write_op_log(
          graph=tf.get_default_graph(),
          log_dir=self._output_dir,
          run_meta=run_values.run_metadata)
      tf.logging.info("Saved op log to %s", self._output_dir)
      self._active = False
      self._done = True

    self._active = (step_done >= self.params["step"]) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:36,代码来源:hooks.py

示例12: begin

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def begin(self):
    self._iter_count = 0
    self._global_step = tf.train.get_global_step()
    self._pred_dict = graph_utils.get_dict_from_collection("predictions")
    # Create the sample directory
    if self._sample_dir is not None:
      gfile.MakeDirs(self._sample_dir) 
开发者ID:akanimax,项目名称:natural-language-summary-generation-from-structured-data,代码行数:9,代码来源:hooks.py

示例13: main

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def main(_argv):
  """Main functions. Runs all anaylses."""
  # pylint: disable=W0212
  tfprof_logger._merge_default_with_oplog = merge_default_with_oplog

  FLAGS.model_dir = os.path.abspath(os.path.expanduser(FLAGS.model_dir))
  output_dir = os.path.join(FLAGS.model_dir, "profile")
  gfile.MakeDirs(output_dir)

  run_meta, graph, op_log = load_metadata(FLAGS.model_dir)

  param_arguments = [
      param_analysis_options(output_dir),
      micro_anaylsis_options(output_dir),
      flops_analysis_options(output_dir),
      device_analysis_options(output_dir),
  ]

  for tfprof_cmd, params in param_arguments:
    model_analyzer.print_model_analysis(
        graph=graph,
        run_meta=run_meta,
        op_log=op_log,
        tfprof_cmd=tfprof_cmd,
        tfprof_options=params)

    if params["dump_to_file"] != "":
      print("Wrote {}".format(params["dump_to_file"])) 
开发者ID:pandegroup,项目名称:reaction_prediction_seq2seq,代码行数:30,代码来源:profile.py

示例14: extract_holdout_model

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def extract_holdout_model(model):
    game_output_path = OUTPUT_PATH.format(FLAGS.base_dir, 'games', model)
    move_output_path = OUTPUT_PATH.format(FLAGS.base_dir, 'moves', model)
    gfile.MakeDirs(os.path.basename(game_output_path))
    gfile.MakeDirs(os.path.basename(move_output_path))

    with gfile.GFile(game_output_path, 'w') as game_f, \
            gfile.GFile(move_output_path, 'w') as move_f:
        for sgf_name in tqdm(get_sgf_names(model)):
            game_data, move_data = extract_data(sgf_name)
            game_f.write(json.dumps(game_data) + '\n')
            for move_datum in move_data:
                move_f.write(json.dumps(move_datum) + '\n') 
开发者ID:mlperf,项目名称:training,代码行数:15,代码来源:prepare_bigquery.py

示例15: _prepare

# 需要导入模块: from tensorflow import gfile [as 别名]
# 或者: from tensorflow.gfile import MakeDirs [as 别名]
def _prepare(self):
        """ Prepares for evaluation.

        Builds the model with reuse=True, mode=EVAL and preprocesses
        data file(s).
        """
        features_file = self._dataset["features_file"]
        labels_file = self._dataset["labels_file"]
        vocab_source = self._dataset["vocab_source"]
        vocab_target = self._dataset["vocab_target"]
        self._model_configs = update_infer_params(  # update inference parameters
            self._model_configs,
            beam_size=self._beam_size,
            maximum_labels_length=self._maximum_labels_length,
            length_penalty=self._length_penalty)
        estimator_spec = model_fn(model_configs=self._model_configs,
                                  mode=ModeKeys.INFER,
                                  vocab_source=vocab_source,
                                  vocab_target=vocab_target,
                                  name=self._model_name, reuse=True,
                                  verbose=False)
        self._predict_ops = estimator_spec.predictions
        text_inputter = TextLineInputter(
            line_readers=LineReader(
                data=features_file,
                preprocessing_fn=lambda x: vocab_source.convert_to_idlist(x)),
            padding_id=vocab_source.pad_id,
            batch_size=self._batch_size)
        self._infer_data = text_inputter.make_feeding_data(
            input_fields=estimator_spec.input_fields)
        tmp_trans_dir = os.path.join(self._model_configs["model_dir"], Constants.TMP_TRANS_DIRNAME)
        if not gfile.Exists(tmp_trans_dir):
            gfile.MakeDirs(tmp_trans_dir)
        self._tmp_trans_file_prefix = os.path.join(tmp_trans_dir, Constants.TMP_TRANS_FILENAME_PREFIX)
        self._read_ckpt_bleulog()
        # load references
        self._references = []
        for rfile in access_multiple_files(labels_file):
            with open_file(rfile) as fp:
                if self._char_level:
                    self._references.append(to_chinese_char(fp.readlines()))
                else:
                    self._references.append(fp.readlines())
        self._references = list(map(list, zip(*self._references)))
        with open_file(features_file) as fp:
            self._sources = fp.readlines()
        self._bad_count = 0
        self._best_bleu_score = 0. 
开发者ID:zhaocq-nlp,项目名称:NJUNMT-tf,代码行数:50,代码来源:text_metrics_spec.py


注:本文中的tensorflow.gfile.MakeDirs方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。