当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.gfile方法代码示例

本文整理汇总了Python中tensorflow.gfile方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.gfile方法的具体用法?Python tensorflow.gfile怎么用?Python tensorflow.gfile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.gfile方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_batch_hqjitter

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def load_batch_hqjitter(dataset_dir, patches_per_img=32, min_queue=8, BURST_LENGTH=1, batch_size=32,
                        repeats=1, height=64, width=64, degamma=1.,
                        to_shift=1., upscale=1, jitter=1, smalljitter=1):

  filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
  filename_queue = tf.train.string_input_producer(filenames)

  _, image_file = tf.WholeFileReader().read(filename_queue)
  image = tf.image.decode_image(image_file)
  patches = make_stack_hqjitter((tf.cast(image[0], tf.float32) / 255.)**degamma,
                                                    height, width, patches_per_img, BURST_LENGTH, to_shift, upscale, jitter)
  unique = batch_size//repeats
  # Batch it up.
  patches  = tf.train.shuffle_batch(
        [patches],
        batch_size=unique,
        num_threads=2,
        capacity=min_queue + 3 * batch_size,
        enqueue_many=True,
        min_after_dequeue=min_queue)

  print('PATCHES =================',patches.get_shape().as_list())

  patches = make_batch_hqjitter(patches, BURST_LENGTH, batch_size, repeats, height, width, to_shift, upscale, jitter, smalljitter)
  return patches 
开发者ID:google,项目名称:burst-denoising,代码行数:27,代码来源:kpn_data_provider.py

示例2: tee_out

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def tee_out(out_dir):
    out_dir = Path(out_dir)
    stdout = tempfile.NamedTemporaryFile(delete=False)
    old_stdout = sys.stdout
    old_stderr = sys.stderr
    stderr = tempfile.NamedTemporaryFile(delete=False)
    try:
        with StdoutTee(stdout.name, buff=1) as out, StderrTee(stderr.name, buff=1) as err:
            yield
    except:
        raise
    finally:
        sys.stdout = old_stdout
        sys.stderr = old_stderr
        with gfile.GFile(out_dir / 'stdout.log', 'w') as fp:
            with gfile.GFile(stdout.name, 'r') as out:
                fp.write(out.read())
        with gfile.GFile(out_dir / 'stderr.log', 'w') as fp:
            with gfile.GFile(stderr.name, 'r') as err:
                fp.write(err.read())
        os.remove(stdout.name)
        os.remove(stderr.name) 
开发者ID:sharadmv,项目名称:parasol,代码行数:24,代码来源:logging.py

示例3: find_files

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def find_files(path, name):
    if gfile.IsDirectory(path):
        files = gfile.ListDirectory(path)
        for p in files:
            if p == name:
                yield path / p
                return
        for p in files:
            yield from find_files(path / p, name)
    else:
        for p in gfile.ListDirectory(path.parent):
            if not fnmatch.fnmatch(path.parent / p, path.replace('[', 'asdf').replace(']', 'fdsa').replace('asdf', '[[]').replace('fdsa', '[]]')):
                continue
            p = Path(path.parent / p)
            if p == path:
                continue
            yield from find_files(p, name) 
开发者ID:sharadmv,项目名称:parasol,代码行数:19,代码来源:plot_results.py

示例4: read_MNIST

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def read_MNIST(binarize=False):
  """Reads in MNIST images.

  Args:
    binarize: whether to use the fixed binarization

  Returns:
    x_train: 50k training images
    x_valid: 10k validation images
    x_test: 10k test images

  """
  with gfile.FastGFile(os.path.join(config.DATA_DIR, config.MNIST_BINARIZED), 'r') as f:
    (x_train, _), (x_valid, _), (x_test, _) = pickle.load(f)

  if not binarize:
    with gfile.FastGFile(os.path.join(config.DATA_DIR, config.MNIST_FLOAT), 'r') as f:
      x_train = np.load(f).reshape(-1, 784)

  return x_train, x_valid, x_test 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:22,代码来源:datasets.py

示例5: load_batch_demosaic

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def load_batch_demosaic(BURST_LENGTH, dataset_dir, batch_size=32, height=64, width=64, degamma=1., to_shift=1., upscale=1, jitter=1):

  filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
  filename_queue = tf.train.string_input_producer(filenames)

  mosaic = None
  while mosaic == None:
    _, image_file = tf.WholeFileReader().read(filename_queue)
    image = tf.image.decode_image(image_file)
    mosaic, demosaic, shift = make_stack_demosaic((tf.cast(image[0], tf.float32) / 255.)**degamma,
                                                  height, width, 128, BURST_LENGTH, to_shift, upscale, jitter)

  # Batch it up.
  mosaic, demosaic, shift = tf.train.shuffle_batch(
        [mosaic, demosaic, shift],
        batch_size=batch_size,
        num_threads=2,
        capacity=500 + 3 * batch_size,
        enqueue_many=True,
        min_after_dequeue=100)

  return mosaic, demosaic, shift 
开发者ID:google,项目名称:burst-denoising,代码行数:24,代码来源:kpn_data_provider.py

示例6: load_batch_noised

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def load_batch_noised(depth, dataset_dir, batch_size=32, height=64, width=64, degamma=1., sig_range=20.):

  filenames = [os.path.join(dataset_dir, f) for f in gfile.ListDirectory(dataset_dir)]
  filename_queue = tf.train.string_input_producer(filenames)

  noised_stack = None
  while noised_stack == None:
    _, image_file = tf.WholeFileReader().read(filename_queue)
    image = tf.image.decode_image(image_file)
    noised_stack, denoised_stack, sig_stack = make_stack_noised((tf.cast(image[0], tf.float32) / 255.)**degamma, height, width, depth, sig_range)

  # Batch it up.
  noised, denoised, sig = tf.train.shuffle_batch(
        [noised_stack, denoised_stack, sig_stack],
        batch_size=batch_size,
        num_threads=2,
        capacity=1024 + 3 * batch_size,
        enqueue_many=True,
        min_after_dequeue=500)

  return noised, denoised, sig 
开发者ID:google,项目名称:burst-denoising,代码行数:23,代码来源:kpn_data_provider.py

示例7: assert_images_near

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def assert_images_near(self, directory: str, only_check_size: bool = False):
    """Assert images in the golden directory match those in the test."""
    # We assume all images are pngs.
    glob = os.path.join(os.environ['TEST_SRCDIR'], 'isl/testdata', directory,
                        '*.png')
    golden_image_paths = gfile.Glob(glob)
    assert golden_image_paths, glob

    logging.info('Golden images for test match are: %s', golden_image_paths)

    for gip in golden_image_paths:
      test_image_path = os.path.join(os.environ['TEST_TMPDIR'], directory,
                                     os.path.basename(gip))
      assert gfile.Exists(
          test_image_path), "Test image doesn't exist: %s" % test_image_path

      golden = util.read_image(gip)
      test = util.read_image(test_image_path)

      if only_check_size:
        assert golden.shape == test.shape, (golden.shape, test.shape)
      else:
        np.testing.assert_allclose(golden, test, rtol=0.0001, atol=0.0001) 
开发者ID:google,项目名称:in-silico-labeling,代码行数:25,代码来源:test_util.py

示例8: infer_single_image

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def infer_single_image(gitapp: controller.GetInputTargetAndPredictedParameters):
  """Predicts the labels for a single image."""
  if not gfile.Exists(output_directory()):
    gfile.MakeDirs(output_directory())

  if FLAGS.infer_channel_whitelist is not None:
    infer_channel_whitelist = FLAGS.infer_channel_whitelist.split(',')
  else:
    infer_channel_whitelist = None

  while True:
    infer.infer(
        gitapp=gitapp,
        restore_directory=FLAGS.restore_directory or train_directory(),
        output_directory=output_directory(),
        extract_patch_size=CONCORDANCE_EXTRACT_PATCH_SIZE,
        stitch_stride=CONCORDANCE_STITCH_STRIDE,
        infer_size=FLAGS.infer_size,
        channel_whitelist=infer_channel_whitelist,
        simplify_error_panels=FLAGS.infer_simplify_error_panels,
    )
    if not FLAGS.infer_continuously:
      break 
开发者ID:google,项目名称:in-silico-labeling,代码行数:25,代码来源:launch.py

示例9: _gen_example

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def _gen_example(i, all_examples):
  """Saves one example to file.  Also adds it to all_examples dict."""
  example = dataloader.get_example_with_index(i)
  if not example:
    return
  image_seq_stack = _stack_image_seq(example['image_seq'])
  example.pop('image_seq', None)  # Free up memory.
  intrinsics = example['intrinsics']
  fx = intrinsics[0, 0]
  fy = intrinsics[1, 1]
  cx = intrinsics[0, 2]
  cy = intrinsics[1, 2]
  save_dir = os.path.join(FLAGS.data_dir, example['folder_name'])
  if not gfile.Exists(save_dir):
    gfile.MakeDirs(save_dir)
  img_filepath = os.path.join(save_dir, '%s.jpg' % example['file_name'])
  scipy.misc.imsave(img_filepath, image_seq_stack.astype(np.uint8))
  cam_filepath = os.path.join(save_dir, '%s_cam.txt' % example['file_name'])
  example['cam'] = '%f,0.,%f,0.,%f,%f,0.,0.,1.' % (fx, cx, fy, cy)
  with open(cam_filepath, 'w') as cam_f:
    cam_f.write(example['cam'])

  key = example['folder_name'] + '_' + example['file_name']
  all_examples[key] = example 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:26,代码来源:gen_data.py

示例10: write_flagfile

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def write_flagfile(flags_, ncf_dataset):
  """Write flagfile to begin async data generation."""
  if ncf_dataset.deterministic:
    flags_["seed"] = stat_utils.random_int32()

  # We write to a temp file then atomically rename it to the final file,
  # because writing directly to the final file can cause the data generation
  # async process to read a partially written JSON file.
  flagfile_temp = os.path.join(ncf_dataset.cache_paths.cache_root,
                               rconst.FLAGFILE_TEMP)
  tf.logging.info("Preparing flagfile for async data generation in {} ..."
                  .format(flagfile_temp))
  with tf.gfile.Open(flagfile_temp, "w") as f:
    for k, v in six.iteritems(flags_):
      f.write("--{}={}\n".format(k, v))
  flagfile = os.path.join(ncf_dataset.cache_paths.cache_root, rconst.FLAGFILE)
  tf.gfile.Rename(flagfile_temp, flagfile)
  tf.logging.info(
      "Wrote flagfile for async data generation in {}.".format(flagfile)) 
开发者ID:isobar-us,项目名称:multilabel-image-classification-tensorflow,代码行数:21,代码来源:data_preprocessing.py

示例11: sample_expert_paths

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def sample_expert_paths(num, env_str, env_spec,
                        load_trajectories_file=None):
  """Sample a number of expert paths randomly."""
  if load_trajectories_file is not None:
    if not gfile.Exists(load_trajectories_file):
      assert False, 'trajectories file %s does not exist' % load_trajectories_file

    with gfile.GFile(load_trajectories_file, 'r') as f:
      episodes = pickle.load(f)
      episodes = random.sample(episodes, num)
      return [ep[1:] for ep in episodes]

  return [sample_expert_path(env_str, env_spec)
          for _ in xrange(num)] 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:16,代码来源:expert_paths.py

示例12: path_exists

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def path_exists(path):
  return tf.gfile.Exists(path) 
开发者ID:Octavian-ai,项目名称:shortest-path,代码行数:4,代码来源:file.py

示例13: __enter__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def __enter__(self):
    self.file = tf.gfile.GFile(self.file_path, self.open_str)
    return self.file 
开发者ID:Octavian-ai,项目名称:shortest-path,代码行数:5,代码来源:file.py

示例14: train

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def train(self, rollouts, train_step, out_dir=None):
        self.fit_dynamics(rollouts, train_step)
        if train_step > 0:
            self.actual_impr = self.prev_cost_estimate - self.estimate_cost()
            self.step_adjust()
        self.prev_cost_estimate = self.estimate_cost()
        self.policy_params = self.tr_update()
        self.predicted_impr = self.prev_cost_estimate - self.estimate_cost()
        with gfile.GFile(out_dir / 'policy' / '{}.pkl'.format(train_step), 'wb') as fp:
            pickle.dump(self.policy_params, fp) 
开发者ID:sharadmv,项目名称:parasol,代码行数:12,代码来源:lqrflm.py

示例15: dump_weights

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import gfile [as 别名]
def dump_weights(self, epoch, out_dir):
        if out_dir is not None:
            with gfile.GFile(out_dir / "weights" / ("model-%s.pkl" % epoch), 'wb') as fp:
                pickle.dump(self, fp) 
开发者ID:sharadmv,项目名称:parasol,代码行数:6,代码来源:vae.py


注:本文中的tensorflow.gfile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。