当前位置: 首页>>代码示例>>Python>>正文


Python file_io.copy函数代码示例

本文整理汇总了Python中tensorflow.python.lib.io.file_io.copy函数的典型用法代码示例。如果您正苦于以下问题:Python copy函数的具体用法?Python copy怎么用?Python copy使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了copy函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _save_and_write_assets

  def _save_and_write_assets(self, assets_collection_to_add=None):
    """Saves asset to the meta graph and writes asset files to disk.

    Args:
      assets_collection_to_add: The collection where the asset paths are setup.
    """
    asset_source_filepath_list = self._save_assets(assets_collection_to_add)

    # Return if there are no assets to write.
    if len(asset_source_filepath_list) is 0:
      tf_logging.info("No assets to write.")
      return

    assets_destination_dir = os.path.join(
        compat.as_bytes(self._export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY))

    if not file_io.file_exists(assets_destination_dir):
      file_io.recursive_create_dir(assets_destination_dir)

    # Copy each asset from source path to destination path.
    for asset_source_filepath in asset_source_filepath_list:
      asset_source_filename = os.path.basename(asset_source_filepath)

      asset_destination_filepath = os.path.join(
          compat.as_bytes(assets_destination_dir),
          compat.as_bytes(asset_source_filename))
      file_io.copy(
          asset_source_filepath, asset_destination_filepath, overwrite=True)

    tf_logging.info("Assets written to: %s", assets_destination_dir)
开发者ID:Qstar,项目名称:tensorflow,代码行数:31,代码来源:builder.py

示例2: testCopyOverwriteFalse

 def testCopyOverwriteFalse(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.write_string_to_file(file_path, "testing")
   copy_path = os.path.join(self._base_dir, "copy_file")
   file_io.write_string_to_file(copy_path, "copy")
   with self.assertRaises(errors.AlreadyExistsError):
     file_io.copy(file_path, copy_path, overwrite=False)
开发者ID:AriaAsuka,项目名称:tensorflow,代码行数:7,代码来源:file_io_test.py

示例3: testCopy

 def testCopy(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   copy_path = os.path.join(self._base_dir, "copy_file")
   file_io.copy(file_path, copy_path)
   self.assertTrue(file_io.file_exists(copy_path))
   self.assertEqual(b"testing", file_io.read_file_to_string(file_path))
开发者ID:JamesFysh,项目名称:tensorflow,代码行数:7,代码来源:file_io_test.py

示例4: _save_and_write_assets

  def _save_and_write_assets(self, assets_collection_to_add=None):
    """Saves asset to the meta graph and writes asset files to disk.

    Args:
      assets_collection_to_add: The collection where the asset paths are setup.
    """
    asset_filename_map = _maybe_save_assets(assets_collection_to_add)

    # Return if there are no assets to write.
    if not asset_filename_map:
      tf_logging.info("No assets to write.")
      return

    assets_destination_dir = saved_model_utils.get_or_create_assets_dir(
        self._export_dir)

    # Copy each asset from source path to destination path.
    for asset_basename, asset_source_filepath in asset_filename_map.items():
      asset_destination_filepath = os.path.join(
          compat.as_bytes(assets_destination_dir),
          compat.as_bytes(asset_basename))

      # Only copy the asset file to the destination if it does not already
      # exist. This is to ensure that an asset with the same name defined as
      # part of multiple graphs is only copied the first time.
      if not file_io.file_exists(asset_destination_filepath):
        file_io.copy(asset_source_filepath, asset_destination_filepath)

    tf_logging.info("Assets written to: %s",
                    compat.as_text(assets_destination_dir))
开发者ID:AnishShah,项目名称:tensorflow,代码行数:30,代码来源:builder_impl.py

示例5: testCopyOverwriteFalse

 def testCopyOverwriteFalse(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   copy_path = os.path.join(self._base_dir, "copy_file")
   file_io.FileIO(copy_path, mode="w").write("copy")
   with self.assertRaises(errors.AlreadyExistsError):
     file_io.copy(file_path, copy_path, overwrite=False)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:7,代码来源:file_io_test.py

示例6: _save_and_write_assets

  def _save_and_write_assets(self, assets_collection_to_add=None):
    """Saves asset to the meta graph and writes asset files to disk.

    Args:
      assets_collection_to_add: The collection where the asset paths are setup.
    """
    asset_source_filepath_list = _maybe_save_assets(assets_collection_to_add)

    # Return if there are no assets to write.
    if len(asset_source_filepath_list) is 0:
      tf_logging.info("No assets to write.")
      return

    assets_destination_dir = os.path.join(
        compat.as_bytes(self._export_dir),
        compat.as_bytes(constants.ASSETS_DIRECTORY))

    if not file_io.file_exists(assets_destination_dir):
      file_io.recursive_create_dir(assets_destination_dir)

    # Copy each asset from source path to destination path.
    for asset_source_filepath in asset_source_filepath_list:
      asset_source_filename = os.path.basename(asset_source_filepath)

      asset_destination_filepath = os.path.join(
          compat.as_bytes(assets_destination_dir),
          compat.as_bytes(asset_source_filename))

      # Only copy the asset file to the destination if it does not already
      # exist. This is to ensure that an asset with the same name defined as
      # part of multiple graphs is only copied the first time.
      if not file_io.file_exists(asset_destination_filepath):
        file_io.copy(asset_source_filepath, asset_destination_filepath)

    tf_logging.info("Assets written to: %s", assets_destination_dir)
开发者ID:1000sprites,项目名称:tensorflow,代码行数:35,代码来源:builder_impl.py

示例7: testCopyOverwrite

 def testCopyOverwrite(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.write_string_to_file(file_path, "testing")
   copy_path = os.path.join(self._base_dir, "copy_file")
   file_io.write_string_to_file(copy_path, "copy")
   file_io.copy(file_path, copy_path, overwrite=True)
   self.assertTrue(file_io.file_exists(copy_path))
   self.assertEqual(b"testing", file_io.read_file_to_string(file_path))
开发者ID:AriaAsuka,项目名称:tensorflow,代码行数:8,代码来源:file_io_test.py

示例8: testCopyOverwrite

 def testCopyOverwrite(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   copy_path = os.path.join(self._base_dir, "copy_file")
   file_io.FileIO(copy_path, mode="w").write("copy")
   file_io.copy(file_path, copy_path, overwrite=True)
   self.assertTrue(file_io.file_exists(copy_path))
   self.assertEqual("testing", file_io.FileIO(file_path, mode="r").read())
开发者ID:1000sprites,项目名称:tensorflow,代码行数:8,代码来源:file_io_test.py

示例9: testCopy

 def testCopy(self):
   file_path = os.path.join(self._base_dir, "temp_file")
   file_io.FileIO(file_path, mode="w").write("testing")
   copy_path = os.path.join(self._base_dir, "copy_file")
   file_io.copy(file_path, copy_path)
   self.assertTrue(file_io.file_exists(copy_path))
   f = file_io.FileIO(file_path, mode="r")
   self.assertEqual("testing", f.read())
   self.assertEqual(7, f.tell())
开发者ID:1000sprites,项目名称:tensorflow,代码行数:9,代码来源:file_io_test.py

示例10: testCopy

 def testCopy(self):
   file_path = os.path.join(self.get_temp_dir(), "temp_file")
   file_io.write_string_to_file(file_path, "testing")
   copy_path = os.path.join(self.get_temp_dir(), "copy_file")
   file_io.copy(file_path, copy_path)
   self.assertTrue(file_io.file_exists(copy_path))
   self.assertEqual(b"testing", file_io.read_file_to_string(file_path))
   file_io.delete_file(file_path)
   file_io.delete_file(copy_path)
开发者ID:AI-MR-Related,项目名称:tensorflow,代码行数:9,代码来源:file_io_test.py

示例11: preprocess

  def preprocess(train_dataset, output_dir, eval_dataset, checkpoint, pipeline_option):
    """Preprocess data in Cloud with DataFlow."""

    import apache_beam as beam
    import google.datalab.utils
    from . import _preprocess

    if checkpoint is None:
      checkpoint = _util._DEFAULT_CHECKPOINT_GSURL

    job_name = ('preprocess-image-classification-' +
                datetime.datetime.now().strftime('%y%m%d-%H%M%S'))

    staging_package_url = _util.repackage_to_staging(output_dir)
    tmpdir = tempfile.mkdtemp()
    # suppress DataFlow warnings about wheel package as extra package.
    original_level = logging.getLogger().getEffectiveLevel()
    logging.getLogger().setLevel(logging.ERROR)
    try:
      # Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
      # Remove when the issue is fixed and new version of DataFlow is included in Datalab.
      extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
      local_packages = [os.path.join(tmpdir, os.path.basename(p))
                        for p in extra_packages]
      for source, dest in zip(extra_packages, local_packages):
        file_io.copy(source, dest, overwrite=True)

      options = {
          'staging_location': os.path.join(output_dir, 'tmp', 'staging'),
          'temp_location': os.path.join(output_dir, 'tmp'),
          'job_name': job_name,
          'project': _util.default_project(),
          'extra_packages': local_packages,
          'teardown_policy': 'TEARDOWN_ALWAYS',
          'no_save_main_session': True
      }
      if pipeline_option is not None:
        options.update(pipeline_option)

      opts = beam.pipeline.PipelineOptions(flags=[], **options)
      p = beam.Pipeline('DataflowRunner', options=opts)
      _preprocess.configure_pipeline(p, train_dataset, eval_dataset,
                                     checkpoint, output_dir, job_name)
      job_results = p.run()
    finally:
      shutil.rmtree(tmpdir)
      logging.getLogger().setLevel(original_level)

    if (_util.is_in_IPython()):
      import IPython
      dataflow_url = 'https://console.developers.google.com/dataflow?project=%s' % \
                     _util.default_project()
      html = 'Job "%s" submitted.' % job_name
      html += '<p>Click <a href="%s" target="_blank">here</a> to track preprocessing job. <br/>' \
          % dataflow_url
      IPython.display.display_html(html, raw=True)
    return google.datalab.utils.DataflowJob(job_results)
开发者ID:parthea,项目名称:pydatalab,代码行数:57,代码来源:_cloud.py

示例12: batch_predict

  def batch_predict(dataset, model_dir, output_csv, output_bq_table, pipeline_option):
    """Batch predict running in cloud."""

    import apache_beam as beam
    import google.datalab.utils
    from . import _predictor

    if output_csv is None and output_bq_table is None:
      raise ValueError('output_csv and output_bq_table cannot both be None.')
    if 'temp_location' not in pipeline_option:
      raise ValueError('"temp_location" is not set in cloud.')

    job_name = ('batch-predict-image-classification-' +
                datetime.datetime.now().strftime('%y%m%d-%H%M%S'))
    staging_package_url = _util.repackage_to_staging(pipeline_option['temp_location'])
    tmpdir = tempfile.mkdtemp()
    # suppress DataFlow warnings about wheel package as extra package.
    original_level = logging.getLogger().getEffectiveLevel()
    logging.getLogger().setLevel(logging.ERROR)
    try:
      # Workaround for DataFlow 2.0, which doesn't work well with extra packages in GCS.
      # Remove when the issue is fixed and new version of DataFlow is included in Datalab.
      extra_packages = [staging_package_url, _TF_GS_URL, _PROTOBUF_GS_URL]
      local_packages = [os.path.join(tmpdir, os.path.basename(p))
                        for p in extra_packages]
      for source, dest in zip(extra_packages, local_packages):
        file_io.copy(source, dest, overwrite=True)

      options = {
          'staging_location': os.path.join(pipeline_option['temp_location'], 'staging'),
          'job_name': job_name,
          'project': _util.default_project(),
          'extra_packages': local_packages,
          'teardown_policy': 'TEARDOWN_ALWAYS',
          'no_save_main_session': True
      }
      options.update(pipeline_option)

      opts = beam.pipeline.PipelineOptions(flags=[], **options)
      p = beam.Pipeline('DataflowRunner', options=opts)
      _predictor.configure_pipeline(p, dataset, model_dir, output_csv, output_bq_table)
      job_results = p.run()
    finally:
      shutil.rmtree(tmpdir)
      logging.getLogger().setLevel(original_level)

    if (_util.is_in_IPython()):
      import IPython
      dataflow_url = ('https://console.developers.google.com/dataflow?project=%s' %
                      _util.default_project())
      html = 'Job "%s" submitted.' % job_name
      html += ('<p>Click <a href="%s" target="_blank">here</a> to track batch prediction job. <br/>'
               % dataflow_url)
      IPython.display.display_html(html, raw=True)
    return google.datalab.utils.DataflowJob(job_results)
开发者ID:parthea,项目名称:pydatalab,代码行数:55,代码来源:_cloud.py

示例13: run_analysis

def run_analysis(args):
  """Builds an analysis files for training."""

  # Read the schema and input feature types
  schema_list = json.loads(
      file_io.read_file_to_string(args.schema_file))

  run_numerical_categorical_analysis(args, schema_list)

  # Also save a copy of the schema in the output folder.
  file_io.copy(args.schema_file,
               os.path.join(args.output_dir, SCHEMA_FILE),
               overwrite=True)
开发者ID:googledatalab,项目名称:pydatalab,代码行数:13,代码来源:local_preprocess.py

示例14: recursive_copy

def recursive_copy(src_dir, dest_dir):
  """Copy the contents of src_dir into the folder dest_dir.
  Args:
    src_dir: gsc or local path.
    dest_dir: gcs or local path.
  """

  file_io.recursive_create_dir(dest_dir)
  for file_name in file_io.list_directory(src_dir):
    old_path = os.path.join(src_dir, file_name)
    new_path = os.path.join(dest_dir, file_name)

    if file_io.is_directory(old_path):
      recursive_copy(old_path, new_path)
    else:
      file_io.copy(old_path, new_path, overwrite=True)
开发者ID:javiervicho,项目名称:pydatalab,代码行数:16,代码来源:task.py

示例15: _copy_assets_to_destination_dir

  def _copy_assets_to_destination_dir(self, asset_filename_map):
    """Copy all assets from source path to destination path."""
    assets_destination_dir = saved_model_utils.get_or_create_assets_dir(
        self._export_dir)

    # Copy each asset from source path to destination path.
    for asset_basename, asset_source_filepath in asset_filename_map.items():
      asset_destination_filepath = os.path.join(
          compat.as_bytes(assets_destination_dir),
          compat.as_bytes(asset_basename))

      # Only copy the asset file to the destination if it does not already
      # exist. This is to ensure that an asset with the same name defined as
      # part of multiple graphs is only copied the first time.
      if not file_io.file_exists(asset_destination_filepath):
        file_io.copy(asset_source_filepath, asset_destination_filepath)

    tf_logging.info("Assets written to: %s",
                    compat.as_text(assets_destination_dir))
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:19,代码来源:builder_impl.py


注:本文中的tensorflow.python.lib.io.file_io.copy函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。