当前位置: 首页>>代码示例>>Python>>正文


Python file_io.read_file_to_string方法代码示例

本文整理汇总了Python中tensorflow.python.lib.io.file_io.read_file_to_string方法的典型用法代码示例。如果您正苦于以下问题:Python file_io.read_file_to_string方法的具体用法?Python file_io.read_file_to_string怎么用?Python file_io.read_file_to_string使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.lib.io.file_io的用法示例。


在下文中一共展示了file_io.read_file_to_string方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _latest_checkpoints_changed

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def _latest_checkpoints_changed(configs, run_path_pairs):
  """Returns true if the latest checkpoint has changed in any of the runs."""
  for run_name, assets_dir in run_path_pairs:
    if run_name not in configs:
      config = projector_config_pb2.ProjectorConfig()
      config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)
      if file_io.file_exists(config_fpath):
        file_content = file_io.read_file_to_string(config_fpath)
        text_format.Merge(file_content, config)
    else:
      config = configs[run_name]

    # See if you can find a checkpoint file in the logdir.
    logdir = _assets_dir_to_logdir(assets_dir)
    ckpt_path = _find_latest_checkpoint(logdir)
    if not ckpt_path:
      continue
    if config.model_checkpoint_path != ckpt_path:
      return True
  return False 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:projector_plugin.py

示例2: _validate_asset_collection

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def _validate_asset_collection(self, export_dir, graph_collection_def,
                                 expected_asset_file_name,
                                 expected_asset_file_contents,
                                 expected_asset_tensor_name):
    assets_any = graph_collection_def[constants.ASSETS_KEY].any_list.value
    asset = meta_graph_pb2.AssetFileDef()
    assets_any[0].Unpack(asset)
    assets_path = os.path.join(
        compat.as_bytes(export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY),
        compat.as_bytes(expected_asset_file_name))
    actual_asset_contents = file_io.read_file_to_string(assets_path)
    self.assertEqual(expected_asset_file_contents,
                     compat.as_text(actual_asset_contents))
    self.assertEqual(expected_asset_file_name, asset.filename)
    self.assertEqual(expected_asset_tensor_name, asset.tensor_info.name) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:18,代码来源:saved_model_test.py

示例3: get_vocabulary

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def get_vocabulary(preprocess_output_dir, name):
  """Loads the vocabulary file as a list of strings.

  Args:
    preprocess_output_dir: Should contain the file CATEGORICAL_ANALYSIS % name.
    name: name of the csv column.

  Returns:
    List of strings.

  Raises:
    ValueError: if file is missing.
  """
  vocab_file = os.path.join(preprocess_output_dir, CATEGORICAL_ANALYSIS % name)
  if not file_io.file_exists(vocab_file):
    raise ValueError('File %s not found in %s' %
                     (CATEGORICAL_ANALYSIS % name, preprocess_output_dir))

  labels = python_portable_string(
      file_io.read_file_to_string(vocab_file)).split('\n')
  label_values = [x for x in labels if x]  # remove empty lines

  return label_values 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:25,代码来源:util.py

示例4: local_analysis

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def local_analysis(args):
  if args.analysis:
    # Already analyzed.
    return

  if not args.schema or not args.features:
    raise ValueError('Either --analysis, or both --schema and --features are provided.')

  tf_config = json.loads(os.environ.get('TF_CONFIG', '{}'))
  cluster_spec = tf_config.get('cluster', {})
  if len(cluster_spec.get('worker', [])) > 0:
    raise ValueError('If "schema" and "features" are provided, local analysis will run and ' +
                     'only BASIC scale-tier (no workers node) is supported.')

  if cluster_spec and not (args.schema.startswith('gs://') and args.features.startswith('gs://')):
    raise ValueError('Cloud trainer requires GCS paths for --schema and --features.')

  print('Running analysis.')
  schema = json.loads(file_io.read_file_to_string(args.schema).decode())
  features = json.loads(file_io.read_file_to_string(args.features).decode())
  args.analysis = os.path.join(args.job_dir, 'analysis')
  args.transform = True
  file_io.recursive_create_dir(args.analysis)
  feature_analysis.run_local_analysis(args.analysis, args.train, schema, features)
  print('Analysis done.') 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:27,代码来源:task.py

示例5: _run_batch_prediction

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def _run_batch_prediction(self):
    """Run batch prediction using the cloudml engine prediction service.

    There is no local version of this step as it's the last step.
    """

    job_name = 'test_mltoolbox_batchprediction_%s' % uuid.uuid4().hex
    cmd = ['gcloud ml-engine jobs submit prediction ' + job_name,
           '--data-format=TEXT',
           '--input-paths=' + self._csv_predict_filename,
           '--output-path=' + self._prediction_output,
           '--model-dir=' + os.path.join(self._train_output, 'model'),
           '--runtime-version=1.0',
           '--region=us-central1']
    self._logger.debug('Running subprocess: %s \n\n' % ' '.join(cmd))
    subprocess.check_call(' '.join(cmd), shell=True)  # async call.
    subprocess.check_call('gcloud ml-engine jobs stream-logs ' + job_name, shell=True)

    # check that there was no errors.
    error_files = file_io.get_matching_files(
        os.path.join(self._prediction_output, 'prediction.errors_stats*'))
    self.assertEqual(1, len(error_files))
    error_str = file_io.read_file_to_string(error_files[0])
    self.assertEqual('', error_str) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:26,代码来源:test_cloud_workflow.py

示例6: parse_schema_txt_file

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def parse_schema_txt_file(schema_path):  # type: (str) -> Schema
    """
    Parse a tf.metadata Schema txt file into its in-memory representation.
    """
    assert file_io.file_exists(schema_path), "File not found: {}".format(schema_path)
    schema = Schema()
    schema_text = file_io.read_file_to_string(schema_path)
    google.protobuf.text_format.Parse(schema_text, schema)
    return schema 
开发者ID:spotify,项目名称:spotify-tensorflow,代码行数:11,代码来源:tf_schema_utils.py

示例7: _GetState

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def _GetState(self):
    """Returns the latest checkpoint id."""
    state = CheckpointState()
    if file_io.file_exists(self._state_file):
      content = file_io.read_file_to_string(self._state_file)
      text_format.Merge(content, state)
    return state 
开发者ID:tensorflow,项目名称:lingvo,代码行数:9,代码来源:saver.py

示例8: _read_latest_config_files

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def _read_latest_config_files(self, run_path_pairs):
    """Reads and returns the projector config files in every run directory."""
    configs = {}
    config_fpaths = {}
    for run_name, assets_dir in run_path_pairs:
      config = projector_config_pb2.ProjectorConfig()
      config_fpath = os.path.join(assets_dir, PROJECTOR_FILENAME)
      if file_io.file_exists(config_fpath):
        file_content = file_io.read_file_to_string(config_fpath)
        text_format.Merge(file_content, config)
      has_tensor_files = False
      for embedding in config.embeddings:
        if embedding.tensor_path:
          if not embedding.tensor_name:
            embedding.tensor_name = os.path.basename(embedding.tensor_path)
          has_tensor_files = True
          break

      if not config.model_checkpoint_path:
        # See if you can find a checkpoint file in the logdir.
        logdir = _assets_dir_to_logdir(assets_dir)
        ckpt_path = _find_latest_checkpoint(logdir)
        if not ckpt_path and not has_tensor_files:
          continue
        if ckpt_path:
          config.model_checkpoint_path = ckpt_path

      # Sanity check for the checkpoint file.
      if (config.model_checkpoint_path and
          not checkpoint_exists(config.model_checkpoint_path)):
        logging.warning('Checkpoint file "%s" not found',
                        config.model_checkpoint_path)
        continue
      configs[run_name] = config
      config_fpaths[run_name] = config_fpath
    return configs, config_fpaths 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:38,代码来源:projector_plugin.py

示例9: _read_file

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def _read_file(filename):
  """Reads a file containing `GraphDef` and returns the protocol buffer.

  Args:
    filename: `graph_def` filename including the path.

  Returns:
    A `GraphDef` protocol buffer.

  Raises:
    IOError: If the file doesn't exist, or cannot be successfully parsed.
  """
  graph_def = graph_pb2.GraphDef()
  if not file_io.file_exists(filename):
    raise IOError("File %s does not exist." % filename)
  # First try to read it as a binary file.
  file_content = file_io.read_file_to_string(filename)
  try:
    graph_def.ParseFromString(file_content)
    return graph_def
  except Exception:  # pylint: disable=broad-except
    pass

  # Next try to read it as a text file.
  try:
    text_format.Merge(file_content.decode("utf-8"), graph_def)
  except text_format.ParseError as e:
    raise IOError("Cannot parse file %s: %s." % (filename, str(e)))

  return graph_def 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:32,代码来源:meta_graph.py

示例10: read_meta_graph_file

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def read_meta_graph_file(filename):
  """Reads a file containing `MetaGraphDef` and returns the protocol buffer.

  Args:
    filename: `meta_graph_def` filename including the path.

  Returns:
    A `MetaGraphDef` protocol buffer.

  Raises:
    IOError: If the file doesn't exist, or cannot be successfully parsed.
  """
  meta_graph_def = meta_graph_pb2.MetaGraphDef()
  if not file_io.file_exists(filename):
    raise IOError("File %s does not exist." % filename)
  # First try to read it as a binary file.
  file_content = file_io.read_file_to_string(filename)
  try:
    meta_graph_def.ParseFromString(file_content)
    return meta_graph_def
  except Exception:  # pylint: disable=broad-except
    pass

  # Next try to read it as a text file.
  try:
    text_format.Merge(file_content.decode("utf-8"), meta_graph_def)
  except text_format.ParseError as e:
    raise IOError("Cannot parse file %s: %s." % (filename, str(e)))

  return meta_graph_def 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:32,代码来源:meta_graph.py

示例11: _read_latest_config_files

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def _read_latest_config_files(self, run_path_pairs):
    """Reads and returns the projector config files in every run directory."""
    configs = {}
    config_fpaths = {}
    for run_name, logdir in run_path_pairs:
      config = ProjectorConfig()
      config_fpath = os.path.join(logdir, PROJECTOR_FILENAME)
      if file_io.file_exists(config_fpath):
        file_content = file_io.read_file_to_string(config_fpath).decode('utf-8')
        text_format.Merge(file_content, config)

      has_tensor_files = False
      for embedding in config.embeddings:
        if embedding.tensor_path:
          has_tensor_files = True
          break

      if not config.model_checkpoint_path:
        # See if you can find a checkpoint file in the logdir.
        ckpt_path = latest_checkpoint(logdir)
        if not ckpt_path:
          # Or in the parent of logdir.
          ckpt_path = latest_checkpoint(os.path.join(logdir, os.pardir))
          if not ckpt_path and not has_tensor_files:
            continue
        if ckpt_path:
          config.model_checkpoint_path = ckpt_path

      # Sanity check for the checkpoint file.
      if (config.model_checkpoint_path and
          not checkpoint_exists(config.model_checkpoint_path)):
        logging.warning('Checkpoint file %s not found',
                        config.model_checkpoint_path)
        continue
      configs[run_name] = config
      config_fpaths[run_name] = config_fpath
    return configs, config_fpaths 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:39,代码来源:plugin.py

示例12: __init__

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def __init__(self, datasources, schema, metadata, features):
    """Initializes a DataSet with the specified DataSource instances.

    Arguments:
      datasources: the set of contained DataSource instances key'ed by name.
      schema: the description of the source data.
      metadata: additional per-field information associated with the data.
      features: the optional description of the transformed data.
    """
    self._datasources = datasources

    if type(schema) is str:
      # Interpret this as a file path if the value is a string
      schema = tfio.read_file_to_string(schema)
      schema = Schema.parse(schema)
    self._schema = schema

    if metadata:
      if type(metadata) is str:
        # Interpret this as a file path if the value is a string
        metadata = tfio.read_file_to_string(metadata)
        metadata = Metadata.parse(metadata)
    self._metadata = metadata

    if features:
      if type(features) is str:
        # Interpret this as a file path if the value is a string
        features = tfio.read_file_to_string(features)
        features = FeatureSet.parse(features)
    self._features = features 
开发者ID:TensorLab,项目名称:tensorfx,代码行数:32,代码来源:_dataset.py

示例13: _run_batch_prediction

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def _run_batch_prediction(self, output_dir, use_target):
    reglinear.batch_predict(
        training_dir=self._train_output,
        prediction_input_file=(self._csv_eval_filename if use_target
                               else self._csv_predict_filename),
        output_dir=output_dir,
        mode='evaluation' if use_target else 'prediction',
        batch_size=4,
        output_format='csv')

    # check errors file is empty
    errors = file_io.get_matching_files(os.path.join(output_dir, 'errors*'))
    self.assertEqual(len(errors), 1)
    if os.path.getsize(errors[0]):
      with open(errors[0]) as errors_file:
        self.fail(msg=errors_file.read())

    # check predictions files are not empty
    predictions = file_io.get_matching_files(os.path.join(output_dir,
                                                          'predictions*'))
    self.assertGreater(os.path.getsize(predictions[0]), 0)

    # check the schema is correct
    schema_file = os.path.join(output_dir, 'csv_schema.json')
    self.assertTrue(os.path.isfile(schema_file))
    schema = json.loads(file_io.read_file_to_string(schema_file))
    self.assertEqual(schema[0]['name'], 'key')
    self.assertEqual(schema[1]['name'], 'predicted')
    if use_target:
      self.assertEqual(schema[2]['name'], 'target')
      self.assertEqual(len(schema), 3)
    else:
      self.assertEqual(len(schema), 2) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:35,代码来源:test_datalab_e2e.py

示例14: run_analysis

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def run_analysis(args):
  """Builds an analysis files for training."""

  # Read the schema and input feature types
  schema_list = json.loads(
      file_io.read_file_to_string(args.schema_file))

  run_numerical_categorical_analysis(args, schema_list)

  # Also save a copy of the schema in the output folder.
  file_io.copy(args.schema_file,
               os.path.join(args.output_dir, SCHEMA_FILE),
               overwrite=True) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:15,代码来源:local_preprocess.py

示例15: run_analysis

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import read_file_to_string [as 别名]
def run_analysis(args):
  """Builds an analysis file for training.

  Uses BiqQuery tables to do the analysis.

  Args:
    args: command line args

  Raises:
    ValueError if schema contains unknown types.
  """
  import google.datalab.bigquery as bq
  if args.bigquery_table:
    table = bq.Table(args.bigquery_table)
    schema_list = table.schema._bq_schema
  else:
    schema_list = json.loads(
        file_io.read_file_to_string(args.schema_file).decode())
    table = bq.ExternalDataSource(
        source=args.input_file_pattern,
        schema=bq.Schema(schema_list))

  # Check the schema is supported.
  for col_schema in schema_list:
    col_type = col_schema['type'].lower()
    if col_type != 'string' and col_type != 'integer' and col_type != 'float':
      raise ValueError('Schema contains an unsupported type %s.' % col_type)

  run_numerical_analysis(table, schema_list, args)
  run_categorical_analysis(table, schema_list, args)

  # Save a copy of the schema to the output location.
  file_io.write_string_to_file(
      os.path.join(args.output_dir, SCHEMA_FILE),
      json.dumps(schema_list, indent=2, separators=(',', ': '))) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:37,代码来源:cloud_preprocess.py


注:本文中的tensorflow.python.lib.io.file_io.read_file_to_string方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。