當前位置: 首頁>>代碼示例>>Python>>正文


Python gfile.Open方法代碼示例

本文整理匯總了Python中tensorflow.gfile.Open方法的典型用法代碼示例。如果您正苦於以下問題:Python gfile.Open方法的具體用法?Python gfile.Open怎麽用?Python gfile.Open使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.gfile的用法示例。


在下文中一共展示了gfile.Open方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: read_df_from_gcs

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def read_df_from_gcs(file_pattern):
  """Read data from Google Cloud Storage, split into train and validation sets.

  Assume that the data on GCS is in csv format without header.
  The column names will be provided through metadata

  Args:
    file_pattern: (string) pattern of the files containing training data.
    For example: [gs://bucket/folder_name/prefix]

  Returns:
    pandas.DataFrame
  """

  # Download the files to local /tmp/ folder
  df_list = []

  for filepath in gfile.Glob(file_pattern):
    with gfile.Open(filepath, 'r') as f:
      # Assume there is no header
      df_list.append(pd.read_csv(f, names=metadata.CSV_COLUMNS))

  data_df = pd.concat(df_list)

  return data_df 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:27,代碼來源:utils.py

示例2: count_file

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def count_file(self, path, verbose=False, add_eos=False):
        if verbose: print('counting file {} ...'.format(path))
        assert exists(path)

        sents = []
        with open(path, 'r') as f:
            for idx, line in enumerate(f):
                if verbose and idx > 0 and idx % 500000 == 0:
                    print('  line {}'.format(idx))
                symbols = self.tokenize(line, add_eos=True)
                self.counter.update(symbols)
                sents.append(symbols)

        return sents

    # 更新counter 中的token 
開發者ID:GaoPeng97,項目名稱:transformer-xl-chinese,代碼行數:18,代碼來源:vocabulary.py

示例3: encode_file

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def encode_file(self, path, ordered=False, verbose=False,
                    add_double_eos=False):
        if verbose: print('encoding file {} ...'.format(path))
        assert exists(path)
        encoded = []
        with open(path, 'r') as f:
            for idx, line in enumerate(f):
                if verbose and idx > 0 and idx % 500000 == 0:
                    print('  line {}'.format(idx))
                symbols = self.tokenize(line, add_eos=True, add_double_eos=add_double_eos)

                encoded.append(self.convert_to_nparray(symbols))

        if ordered:
            encoded = np.concatenate(encoded)

        return encoded

    # 
開發者ID:GaoPeng97,項目名稱:transformer-xl-chinese,代碼行數:21,代碼來源:vocabulary.py

示例4: count_file

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def count_file(self, path, verbose=False, add_eos=False):
        if verbose: print('counting file {} ...'.format(path))
        assert exists(path)

        sents = []
        with open(path, 'r') as f:
            for idx, line in enumerate(f):
                if verbose and idx > 0 and idx % 500000 == 0:
                    print('  line {}'.format(idx))
                symbols = self.tokenize(line, add_eos=add_eos)
                self.counter.update(symbols)
                sents.append(symbols)

        return sents

    # 更新counter 中的token 
開發者ID:GaoPeng97,項目名稱:transformer-xl-chinese,代碼行數:18,代碼來源:old_vocabulary.py

示例5: encode_file

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
          add_double_eos=False):
    if verbose: print('encoding file {} ...'.format(path))
    assert exists(path)
    encoded = []
    with open(path, 'r') as f:
      for idx, line in enumerate(f):
        if verbose and idx > 0 and idx % 500000 == 0:
          print('  line {}'.format(idx))
        symbols = self.tokenize(line, add_eos=add_eos,
          add_double_eos=add_double_eos)
        encoded.append(self.convert_to_nparray(symbols))

    if ordered:
      encoded = np.concatenate(encoded)

    return encoded 
開發者ID:kimiyoung,項目名稱:transformer-xl,代碼行數:19,代碼來源:vocabulary.py

示例6: __init__

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def __init__(self, latent_factor_indices=None):
    # By default, all factors (including shape) are considered ground truth
    # factors.
    if latent_factor_indices is None:
      latent_factor_indices = list(range(6))
    self.latent_factor_indices = latent_factor_indices
    self.data_shape = [64, 64, 1]
    # Load the data so that we can sample from it.
    with gfile.Open(DSPRITES_PATH, "rb") as data_file:
      # Data was saved originally using python2, so we need to set the encoding.
      data = np.load(data_file, encoding="latin1", allow_pickle=True)
      self.images = np.array(data["imgs"])
      self.factor_sizes = np.array(
          data["metadata"][()]["latents_sizes"], dtype=np.int64)
    self.full_factor_sizes = [1, 3, 6, 40, 32, 32]
    self.factor_bases = np.prod(self.factor_sizes) / np.cumprod(
        self.factor_sizes)
    self.state_space = util.SplitDiscreteStateSpace(self.factor_sizes,
                                                    self.latent_factor_indices) 
開發者ID:google-research,項目名稱:disentanglement_lib,代碼行數:21,代碼來源:dsprites.py

示例7: run_real_data

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def run_real_data():
    print("Starting on real data.")
    metadata_path = "{}_train_metadata.pkl".format(_PREFIX)
    with Open(metadata_path, "rb") as f:
        train_metadata = pickle.load(f)
    num_items = train_metadata.num_cols
    print("num_items:", num_items)

    st = timeit.default_timer()
    sampler_cache = _PREFIX + "cached_sampler.pkl"
    if os.path.exists(sampler_cache):
      print("Using cache: {}".format(sampler_cache))
      with open(sampler_cache, "rb") as f:
        sampler, pos_users, pos_items = pickle.load(f)
    else:
      sampler, pos_users, pos_items = process_data(num_items=num_items, min_items_per_user=1, iter_fn=iter_data)
      with open(sampler_cache, "wb") as f:
        pickle.dump([sampler, pos_users, pos_items], f, pickle.HIGHEST_PROTOCOL)
    preproc_time = timeit.default_timer() - st
    num_users = len(sampler.num_regions)
    print("num_users:", num_users)
    print("Preprocessing complete: {:.1f} sec".format(preproc_time))
    print()

    _ = profile_sampler(sampler=sampler, batch_size=int(1e6), num_batches=1000, num_users=num_users) 
開發者ID:mlperf,項目名稱:training,代碼行數:27,代碼來源:alias_generator.py

示例8: run

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def run(*cmd):
  """Run the given subprocess command in a coroutine.

  Args:
    *cmd: the command to run and its arguments.

  Returns:
    The output that the command wrote to stdout as a list of strings, one line
    per element (stderr output is piped to stdout).

  Raises:
    RuntimeError: if the command returns a non-zero result.
  """

  stdout = await checked_run(*cmd)

  log_path = os.path.join(FLAGS.base_dir, get_cmd_name(cmd) + '.log')
  with gfile.Open(log_path, 'a') as f:
    f.write(expand_cmd_str(cmd))
    f.write('\n')
    f.write(stdout)
    f.write('\n')

  # Split stdout into lines.
  return stdout.split('\n') 
開發者ID:mlperf,項目名稱:training,代碼行數:27,代碼來源:reference_implementation.py

示例9: train

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def train(state, tf_records):
  """Run training and write a new model to the fsdb models_dir.

  Args:
    state: the RL loop State instance.
    tf_records: a list of paths to TensorFlow records to train on.
  """

  model_path = os.path.join(fsdb.models_dir(), state.train_model_name)
  await run(
      'python3', 'train.py', *tf_records,
      '--flagfile={}'.format(os.path.join(FLAGS.flags_dir, 'train.flags')),
      '--work_dir={}'.format(fsdb.working_dir()),
      '--export_path={}'.format(model_path),
      '--training_seed={}'.format(state.seed),
      '--freeze=true')
  # Append the time elapsed from when the RL was started to when this model
  # was trained.
  elapsed = time.time() - state.start_time
  timestamps_path = os.path.join(fsdb.models_dir(), 'train_times.txt')
  with gfile.Open(timestamps_path, 'a') as f:
    print('{:.3f} {}'.format(elapsed, state.train_model_name), file=f) 
開發者ID:mlperf,項目名稱:training,代碼行數:24,代碼來源:reference_implementation.py

示例10: threshold_segmentation

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def threshold_segmentation(segmentation_dir, corner, labels, threshold):
  prob_path = object_prob_path(segmentation_dir, corner)
  if not gfile.Exists(prob_path):
    prob_path = legacy_object_prob_path(segmentation_dir, corner)
    if not gfile.Exists(prob_path):
      raise ValueError('Cannot find probability map %s' % prob_path)

  with gfile.Open(prob_path, 'rb') as f:
    data = np.load(f)
    if 'qprob' not in data:
      raise ValueError('Invalid FFN probability map.')

    prob = dequantize_probability(data['qprob'])
    labels[prob < threshold] = 0 
開發者ID:google,項目名稱:ffn,代碼行數:16,代碼來源:storage.py

示例11: load_origins

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def load_origins(segmentation_dir, corner):
  target_path = get_existing_subvolume_path(segmentation_dir, corner, False)
  if target_path is None:
    raise ValueError('Segmentation not found: %s, %s' % (segmentation_dir,
                                                         corner))

  with gfile.Open(target_path, 'rb') as f:
    data = np.load(f)
    return data['origins'].item() 
開發者ID:google,項目名稱:ffn,代碼行數:11,代碼來源:storage.py

示例12: restore_checkpoint

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def restore_checkpoint(self, path):
    """Restores state from the checkpoint at `path`."""
    self.log_info('Restoring inference checkpoint: %s', path)
    with gfile.Open(path, 'rb') as f:
      data = np.load(f)

      self.segmentation[:] = data['segmentation']
      self.seed[:] = data['seed']
      self.seg_prob[:] = data['seg_qprob']
      self.history_deleted = list(data['history_deleted'])
      self.history = list(data['history'])
      self.origins = data['origins'].item()
      if 'overlaps' in data:
        self.overlaps = data['overlaps'].item()

      segmented_voxels = np.sum(self.segmentation != 0)
      self.counters['voxels-segmented'].Set(segmented_voxels)
      self._max_id = np.max(self.segmentation)

      self.movement_policy.restore_state(data['movement_policy'])

      seed_policy_state = data['seed_policy_state']
      # When restoring the state of a previously unused Canvas, the seed
      # policy will not be defined. We just save the seed policy state here
      # for future use in .segment_all().
      self._seed_policy_state = seed_policy_state

      self.counters.loads(data['counters'].item())

    self.log_info('Inference checkpoint restored.') 
開發者ID:google,項目名稱:ffn,代碼行數:32,代碼來源:inference.py

示例13: save_flags

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def save_flags():
  gfile.MakeDirs(FLAGS.train_dir)
  with gfile.Open(os.path.join(FLAGS.train_dir,
                               'flags.%d' % time.time()), 'w') as f:
    for mod, flag_list in FLAGS.flags_by_module_dict().items():
      if (mod.startswith('google3.research.neuromancer.tensorflow') or
          mod.startswith('/')):
        for flag in flag_list:
          f.write('%s\n' % flag.serialize()) 
開發者ID:google,項目名稱:ffn,代碼行數:11,代碼來源:train.py

示例14: main

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def main(unused_argv):
  logging.set_verbosity(tf.logging.INFO)

  if not FLAGS.json_prediction_files_pattern:
    raise ValueError(
        "The flag --json_prediction_files_pattern must be specified.")

  if not FLAGS.csv_output_file:
    raise ValueError("The flag --csv_output_file must be specified.")

  logging.info("Looking for prediction files with pattern: %s", 
               FLAGS.json_prediction_files_pattern)

  file_paths = gfile.Glob(FLAGS.json_prediction_files_pattern)  
  logging.info("Found files: %s", file_paths)

  logging.info("Writing submission file to: %s", FLAGS.csv_output_file)
  with gfile.Open(FLAGS.csv_output_file, "w+") as output_file:
    output_file.write(get_csv_header())

    for file_path in file_paths:
      logging.info("processing file: %s", file_path)

      with gfile.Open(file_path) as input_file:

        for line in input_file: 
          json_data = json.loads(line)
          output_file.write(to_csv_row(json_data))

    output_file.flush()
  logging.info("done") 
開發者ID:antoine77340,項目名稱:Youtube-8M-WILLOW,代碼行數:33,代碼來源:convert_prediction_from_json_to_csv.py

示例15: dump_object

# 需要導入模塊: from tensorflow import gfile [as 別名]
# 或者: from tensorflow.gfile import Open [as 別名]
def dump_object(object_to_dump, output_path):
  """Pickle the object and save to the output_path.

  Args:
    object_to_dump: Python object to be pickled
    output_path: (string) output path which can be Google Cloud Storage

  Returns:
    None
  """

  if not gfile.Exists(output_path):
    gfile.MakeDirs(os.path.dirname(output_path))
  with gfile.Open(output_path, 'w') as wf:
    joblib.dump(object_to_dump, wf) 
開發者ID:GoogleCloudPlatform,項目名稱:cloudml-samples,代碼行數:17,代碼來源:utils.py


注:本文中的tensorflow.gfile.Open方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。