当前位置: 首页>>代码示例>>Python>>正文


Python file_io.write_string_to_file方法代码示例

本文整理汇总了Python中tensorflow.python.lib.io.file_io.write_string_to_file方法的典型用法代码示例。如果您正苦于以下问题:Python file_io.write_string_to_file方法的具体用法?Python file_io.write_string_to_file怎么用?Python file_io.write_string_to_file使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.lib.io.file_io的用法示例。


在下文中一共展示了file_io.write_string_to_file方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _DoSanityCheck

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def _DoSanityCheck(self, prefix):
    """Sanity-check the content of the checkpoint."""
    if not self._sanity_checks:
      return
    reader = tf.train.NewCheckpointReader(prefix)
    content = {}
    for variables, rule in self._sanity_checks:
      args = []
      for v in variables:
        key = _VarKey(v)
        if key in content:
          args.append(content[key])
        else:
          value = reader.get_tensor(key)
          content[key] = value
          args.append(value)
      if not rule.Check(*args):
        # TODO(zhifengc): Maybe should return an explicit signal
        # so that the caller (the controller loop) can Restore()
        # the latest checkpoint before raise the error.
        msg = "Checkpoint sanity check failed: {} {} {}\n".format(
            prefix, ",".join([_VarKey(v) for v in variables]), rule)
        # Also saves the error messge into a file.
        file_io.write_string_to_file("{}.failed".format(prefix), msg)
        raise tf.errors.AbortedError(None, None, msg) 
开发者ID:tensorflow,项目名称:lingvo,代码行数:27,代码来源:saver.py

示例2: visualize_embeddings

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def visualize_embeddings(summary_writer, config):
  """Stores a config file used by the embedding projector.

  Args:
    summary_writer: The summary writer used for writting events.
    config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
      proto that holds the configuration for the projector such as paths to
      checkpoint files and metadata files for the embeddings. If
      `config.model_checkpoint_path` is none, it defaults to the
      `logdir` used by the summary_writer.

  Raises:
    ValueError: If the summary writer does not have a `logdir`.
  """
  logdir = summary_writer.get_logdir()

  # Sanity checks.
  if logdir is None:
    raise ValueError('Summary writer must have a logdir')

  # Saving the config file in the logdir.
  config_pbtxt = text_format.MessageToString(config)
  file_io.write_string_to_file(
      os.path.join(logdir, projector_plugin.PROJECTOR_FILENAME), config_pbtxt) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:26,代码来源:__init__.py

示例3: visualize_embeddings

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def visualize_embeddings(summary_writer, config):
  """Stores a config file used by the embedding projector.

  Args:
    summary_writer: The summary writer used for writting events.
    config: `tf.contrib.tensorboard.plugins.projector.ProjectorConfig`
      proto that holds the configuration for the projector such as paths to
      checkpoint files and metadata files for the embeddings. If
      `config.model_checkpoint_path` is none, it defaults to the
      `logdir` used by the summary_writer.

  Raises:
    ValueError: If the summary writer does not have a `logdir`.
  """
  logdir = summary_writer.get_logdir()

  # Sanity checks.
  if logdir is None:
    raise ValueError('Summary writer must have a logdir')

  # Saving the config file in the logdir.
  config_pbtxt = text_format.MessageToString(config)
  file_io.write_string_to_file(
      os.path.join(logdir, PROJECTOR_FILENAME), config_pbtxt) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:26,代码来源:__init__.py

示例4: start

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def start(self):
    """Performs startup logic, including building graphs.
    """
    if self._config.master:
      # Save out job information for later reference alongside all other outputs.
      job_args = ' '.join(self._model_builder.args._args).replace(' --', '\n--').split('\n')
      job_info = {
        'config': self._config._env,
        'args': job_args
      }
      job_spec = yaml.safe_dump(job_info, default_flow_style=False)
      job_file = os.path.join(self._output, 'job.yaml')

      tfio.recursive_create_dir(self._output)
      tfio.write_string_to_file(job_file, job_spec)

      # Create a checkpoints directory. This is needed to ensure checkpoint restoration logic
      # can lookup an existing directory.
      tfio.recursive_create_dir(self.checkpoints_path)

    # Build the graphs that will be used during the course of the job.
    self._training, self._evaluation, self._prediction = \
      self._model_builder.build_graph_interfaces(self._inputs, self._config) 
开发者ID:TensorLab,项目名称:tensorfx,代码行数:25,代码来源:_job.py

示例5: _write_assets

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def _write_assets(assets_directory, assets_filename):
  """Writes asset files to be used with SavedModel for half plus two.

  Args:
    assets_directory: The directory to which the assets should be written.
    assets_filename: Name of the file to which the asset contents should be
        written.

  Returns:
    The path to which the assets file was written.
  """
  if not file_io.file_exists(assets_directory):
    file_io.recursive_create_dir(assets_directory)

  path = os.path.join(
      compat.as_bytes(assets_directory), compat.as_bytes(assets_filename))
  file_io.write_string_to_file(path, "asset-file-contents")
  return path 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:20,代码来源:saved_model_half_plus_two.py

示例6: _do_mlengine_inference

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def _do_mlengine_inference(model, version, serialized_examples):
  """Performs inference on the model:version in CMLE."""
  working_dir = tempfile.mkdtemp()
  instances_file = os.path.join(working_dir, 'test.json')
  json_examples = []
  for serialized_example in serialized_examples:
    # The encoding follows the example in:
    # https://github.com/GoogleCloudPlatform/training-data-analyst/blob/master/quests/tpu/invoke_model.py
    json_examples.append(
        '{ "inputs": { "b64": "%s" } }' % base64.b64encode(serialized_example))
  # print('\n'.join(json_examples))
  file_io.write_string_to_file(instances_file, '\n'.join(json_examples))
  gcloud_command = [
      'gcloud', 'ml-engine', 'predict', '--model', model, '--version', version,
      '--json-instances', instances_file
  ]
  print(subprocess.check_output(gcloud_command)) 
开发者ID:amygdala,项目名称:code-snippets,代码行数:19,代码来源:chicago_taxi_client.py

示例7: _write_assets

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def _write_assets(assets_directory, assets_filename):
  """Writes asset files to be used with SavedModel for half plus two.

  Args:
    assets_directory: The directory to which the assets should be written.
    assets_filename: Name of the file to which the asset contents should be
        written.

  Returns:
    The path to which the assets file was written.
  """
  if not file_io.file_exists(assets_directory):
    file_io.recursive_create_dir(assets_directory)

  path = os.path.join(
      tf.compat.as_bytes(assets_directory), tf.compat.as_bytes(assets_filename))
  file_io.write_string_to_file(path, "asset-file-contents")
  return path 
开发者ID:helmut-hoffer-von-ankershoffen,项目名称:jetson,代码行数:20,代码来源:saved_model_half_plus_two.py

示例8: save

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def save(self, as_text=False):
    """Writes a `SavedModel` protocol buffer to disk.

    The function writes the SavedModel protocol buffer to the export directory
    in serialized format.

    Args:
      as_text: Writes the SavedModel protocol buffer in text format to disk.

    Returns:
      The path to which the SavedModel protocol buffer was written.
    """
    if not file_io.file_exists(self._export_dir):
      file_io.recursive_create_dir(self._export_dir)

    if as_text:
      path = os.path.join(
          compat.as_bytes(self._export_dir),
          compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
      file_io.write_string_to_file(path, str(self._saved_model))
    else:
      path = os.path.join(
          compat.as_bytes(self._export_dir),
          compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
      file_io.write_string_to_file(path, self._saved_model.SerializeToString())
    tf_logging.info("SavedModel written to: %s", path)

    return path 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:30,代码来源:builder_impl.py

示例9: _build_asset_collection

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def _build_asset_collection(self, asset_file_name, asset_file_contents,
                              asset_file_tensor_name):
    asset_filepath = os.path.join(
        compat.as_bytes(test.get_temp_dir()), compat.as_bytes(asset_file_name))
    file_io.write_string_to_file(asset_filepath, asset_file_contents)
    asset_file_tensor = constant_op.constant(
        asset_filepath, name=asset_file_tensor_name)
    ops.add_to_collection(ops.GraphKeys.ASSET_FILEPATHS, asset_file_tensor)
    asset_collection = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
    return asset_collection 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:12,代码来源:saved_model_test.py

示例10: testAssets

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def testAssets(self):
    export_dir = os.path.join(test.get_temp_dir(), "test_assets")
    builder = saved_model_builder.SavedModelBuilder(export_dir)

    with self.test_session(graph=ops.Graph()) as sess:
      self._init_and_validate_variable(sess, "v", 42)

      # Build an asset collection.
      ignored_filepath = os.path.join(
          compat.as_bytes(test.get_temp_dir()), compat.as_bytes("ignored.txt"))
      file_io.write_string_to_file(ignored_filepath, "will be ignored")

      asset_collection = self._build_asset_collection("hello42.txt",
                                                      "foo bar baz",
                                                      "asset_file_tensor")

      builder.add_meta_graph_and_variables(
          sess, ["foo"], assets_collection=asset_collection)

    # Save the SavedModel to disk.
    builder.save()

    with self.test_session(graph=ops.Graph()) as sess:
      foo_graph = loader.load(sess, ["foo"], export_dir)
      self._validate_asset_collection(export_dir, foo_graph.collection_def,
                                      "hello42.txt", "foo bar baz",
                                      "asset_file_tensor:0")
      ignored_asset_path = os.path.join(
          compat.as_bytes(export_dir),
          compat.as_bytes(constants.ASSETS_DIRECTORY),
          compat.as_bytes("ignored.txt"))
      self.assertFalse(file_io.file_exists(ignored_asset_path)) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:34,代码来源:saved_model_test.py

示例11: run_analysis

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def run_analysis(args):
  """Builds an analysis file for training.

  Uses BiqQuery tables to do the analysis.

  Args:
    args: command line args

  Raises:
    ValueError if schema contains unknown types.
  """
  import google.datalab.bigquery as bq
  if args.bigquery_table:
    table = bq.Table(args.bigquery_table)
    schema_list = table.schema._bq_schema
  else:
    schema_list = json.loads(
        file_io.read_file_to_string(args.schema_file).decode())
    table = bq.ExternalDataSource(
        source=args.input_file_pattern,
        schema=bq.Schema(schema_list))

  # Check the schema is supported.
  for col_schema in schema_list:
    col_type = col_schema['type'].lower()
    if col_type != 'string' and col_type != 'integer' and col_type != 'float':
      raise ValueError('Schema contains an unsupported type %s.' % col_type)

  run_numerical_analysis(table, schema_list, args)
  run_categorical_analysis(table, schema_list, args)

  # Save a copy of the schema to the output location.
  file_io.write_string_to_file(
      os.path.join(args.output_dir, SCHEMA_FILE),
      json.dumps(schema_list, indent=2, separators=(',', ': '))) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:37,代码来源:cloud_preprocess.py

示例12: test_numerics

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def test_numerics(self):
    output_folder = tempfile.mkdtemp()
    input_file_path = tempfile.mkstemp(dir=output_folder)[1]
    try:
      file_io.write_string_to_file(
        input_file_path,
        '\n'.join(['%s,%s,%s' % (i, 10 * i + 0.5, i + 0.5) for i in range(100)]))

      schema = [{'name': 'col1', 'type': 'INTEGER'},
                {'name': 'col2', 'type': 'FLOAT'},
                {'name': 'col3', 'type': 'FLOAT'}]
      features = {'col1': {'transform': 'scale', 'source_column': 'col1'},
                  'col2': {'transform': 'identity', 'source_column': 'col2'},
                  'col3': {'transform': 'target'}}
      feature_analysis.run_local_analysis(
          output_folder, [input_file_path], schema, features)

      stats = json.loads(
          file_io.read_file_to_string(
              os.path.join(output_folder, analyze.constant.STATS_FILE)).decode())

      self.assertEqual(stats['num_examples'], 100)
      col = stats['column_stats']['col1']
      self.assertAlmostEqual(col['max'], 99.0)
      self.assertAlmostEqual(col['min'], 0.0)
      self.assertAlmostEqual(col['mean'], 49.5)

      col = stats['column_stats']['col2']
      self.assertAlmostEqual(col['max'], 990.5)
      self.assertAlmostEqual(col['min'], 0.5)
      self.assertAlmostEqual(col['mean'], 495.5)
    finally:
      shutil.rmtree(output_folder) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:35,代码来源:test_analyze.py

示例13: test_categorical

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def test_categorical(self):
    output_folder = tempfile.mkdtemp()
    input_file_path = tempfile.mkstemp(dir=output_folder)[1]
    try:
      csv_file = ['red,apple', 'red,pepper', 'red,apple', 'blue,grape',
                  'blue,apple', 'green,pepper']
      file_io.write_string_to_file(
        input_file_path,
        '\n'.join(csv_file))

      schema = [{'name': 'color', 'type': 'STRING'},
                {'name': 'type', 'type': 'STRING'}]
      features = {'color': {'transform': 'one_hot', 'source_column': 'color'},
                  'type': {'transform': 'target'}}
      feature_analysis.run_local_analysis(
        output_folder, [input_file_path], schema, features)

      stats = json.loads(
          file_io.read_file_to_string(
              os.path.join(output_folder, analyze.constant.STATS_FILE)).decode())
      self.assertEqual(stats['column_stats']['color']['vocab_size'], 3)

      # Color column.
      vocab_str = file_io.read_file_to_string(
        os.path.join(output_folder, analyze.constant.VOCAB_ANALYSIS_FILE % 'color'))
      vocab = pd.read_csv(six.StringIO(vocab_str),
                          header=None,
                          names=['color', 'count'])
      expected_vocab = pd.DataFrame(
          {'color': ['red', 'blue', 'green'], 'count': [3, 2, 1]},
          columns=['color', 'count'])
      pd.util.testing.assert_frame_equal(vocab, expected_vocab)

    finally:
      shutil.rmtree(output_folder) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:37,代码来源:test_analyze.py

示例14: save_schema_features

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def save_schema_features(schema, features, output):
  # Save a copy of the schema and features in the output folder.
  file_io.write_string_to_file(
    os.path.join(output, constant.SCHEMA_FILE),
    json.dumps(schema, indent=2))

  file_io.write_string_to_file(
    os.path.join(output, constant.FEATURES_FILE),
    json.dumps(features, indent=2)) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:11,代码来源:feature_analysis.py

示例15: test_make_transform_graph_numerics

# 需要导入模块: from tensorflow.python.lib.io import file_io [as 别名]
# 或者: from tensorflow.python.lib.io.file_io import write_string_to_file [as 别名]
def test_make_transform_graph_numerics(self):
    output_folder = tempfile.mkdtemp()
    stats_file_path = os.path.join(output_folder, feature_transforms.STATS_FILE)
    try:
      stats = {'column_stats':
                {'num1': {'max': 10.0, 'mean': 9.5, 'min': 0.0},  # noqa
                 'num2': {'max': 1.0, 'mean': 2.0, 'min': -1.0},
                 'num3': {'max': 10.0, 'mean': 2.0, 'min': 5.0}}}
      schema = [{'name': 'num1', 'type': 'FLOAT'},
                {'name': 'num2', 'type': 'FLOAT'},
                {'name': 'num3', 'type': 'INTEGER'}]
      features = {'num1': {'transform': 'identity', 'source_column': 'num1'},
                  'num2': {'transform': 'scale', 'value': 10, 'source_column': 'num2'},
                  'num3': {'transform': 'scale', 'source_column': 'num3'}}
      input_data = ['5.0,-1.0,10',
                    '10.0,1.0,5',
                    '15.0,0.5,7']
      file_io.write_string_to_file(
          stats_file_path,
          json.dumps(stats))

      results = self._run_graph(output_folder, features, schema, stats, input_data)

      for result, expected_result in zip(results['num1'].flatten().tolist(),
                                         [5, 10, 15]):
        self.assertAlmostEqual(result, expected_result)

      for result, expected_result in zip(results['num2'].flatten().tolist(),
                                         [-10, 10, 5]):
        self.assertAlmostEqual(result, expected_result)

      for result, expected_result in zip(results['num3'].flatten().tolist(),
                                         [1, -1, (7.0 - 5) * 2.0 / 5.0 - 1]):
        self.assertAlmostEqual(result, expected_result)
    finally:
      shutil.rmtree(output_folder) 
开发者ID:googledatalab,项目名称:pydatalab,代码行数:38,代码来源:test_feature_transforms.py


注:本文中的tensorflow.python.lib.io.file_io.write_string_to_file方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。