当前位置: 首页>>代码示例>>Python>>正文


Python dataset_utils.download_and_uncompress_tarball方法代码示例

本文整理汇总了Python中datasets.dataset_utils.download_and_uncompress_tarball方法的典型用法代码示例。如果您正苦于以下问题:Python dataset_utils.download_and_uncompress_tarball方法的具体用法?Python dataset_utils.download_and_uncompress_tarball怎么用?Python dataset_utils.download_and_uncompress_tarball使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在datasets.dataset_utils的用法示例。


在下文中一共展示了dataset_utils.download_and_uncompress_tarball方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: run

# 需要导入模块: from datasets import dataset_utils [as 别名]
# 或者: from datasets.dataset_utils import download_and_uncompress_tarball [as 别名]
def run(dataset_dir):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  training_filename = _get_output_filename(dataset_dir, 'train')
  testing_filename = _get_output_filename(dataset_dir, 'test')

  if tf.gfile.Exists(training_filename) and tf.gfile.Exists(testing_filename):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)

  # First, process the training data:
  with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
    offset = 0
    for i in range(_NUM_TRAIN_FILES):
      filename = os.path.join(dataset_dir,
                              'cifar-10-batches-py',
                              'data_batch_%d' % (i + 1))  # 1-indexed.
      offset = _add_to_tfrecord(filename, tfrecord_writer, offset)

  # Next, process the testing data:
  with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
    filename = os.path.join(dataset_dir,
                            'cifar-10-batches-py',
                            'test_batch')
    _add_to_tfrecord(filename, tfrecord_writer)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the Cifar10 dataset!') 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:42,代码来源:download_and_convert_cifar10.py

示例2: run

# 需要导入模块: from datasets import dataset_utils [as 别名]
# 或者: from datasets.dataset_utils import download_and_uncompress_tarball [as 别名]
def run(dataset_dir):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  if _dataset_exists(dataset_dir):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
  photo_filenames, class_names = _get_filenames_and_classes(dataset_dir)
  class_names_to_ids = dict(zip(class_names, range(len(class_names))))

  # Divide into train and test:
  random.seed(_RANDOM_SEED)
  random.shuffle(photo_filenames)
  training_filenames = photo_filenames[_NUM_VALIDATION:]
  validation_filenames = photo_filenames[:_NUM_VALIDATION]

  # First, convert the training and validation sets.
  _convert_dataset('train', training_filenames, class_names_to_ids,
                   dataset_dir)
  _convert_dataset('validation', validation_filenames, class_names_to_ids,
                   dataset_dir)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(class_names)), class_names))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the Flowers dataset!') 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:37,代码来源:download_and_convert_flowers.py

示例3: run

# 需要导入模块: from datasets import dataset_utils [as 别名]
# 或者: from datasets.dataset_utils import download_and_uncompress_tarball [as 别名]
def run(input_dataset_dir, dataset_dir):
  """Runs the download and conversion operation.

  Args:
    input_dataset_dir: The dataset directory to use as input
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  if _dataset_exists(dataset_dir):
    print('Dataset files already exist. Exiting without re-creating them.')
    return

  # dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)
  photo_filenames, class_names = _get_filenames_and_classes(input_dataset_dir)
  class_names_to_ids = dict(zip(class_names, range(len(class_names))))

  # Divide into train and test:
  random.seed(_RANDOM_SEED)
  random.shuffle(photo_filenames)
  training_filenames = photo_filenames[_NUM_VALIDATION:-_NUM_TEST]
  validation_filenames = photo_filenames[:_NUM_VALIDATION]
  test_filenames = photo_filenames[-_NUM_TEST:]

  # First, convert the training, test, and validation sets.
  # _convert_dataset('train', training_filenames, class_names_to_ids,
  #                  dataset_dir)
  # _convert_dataset('validation', validation_filenames, class_names_to_ids,
  #                  dataset_dir)
  _convert_dataset('test', test_filenames, class_names_to_ids,
                   dataset_dir)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(class_names)), class_names))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  # _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the Flowers dataset!') 
开发者ID:mlberkeley,项目名称:Creative-Adversarial-Networks,代码行数:41,代码来源:convert_wikiart.py

示例4: run

# 需要导入模块: from datasets import dataset_utils [as 别名]
# 或者: from datasets.dataset_utils import download_and_uncompress_tarball [as 别名]
def run(dataset_dir):
  """Runs the download and conversion operation.

  Args:
    dataset_dir: The dataset directory where the dataset is stored.
  """
  if not tf.gfile.Exists(dataset_dir):
    tf.gfile.MakeDirs(dataset_dir)

  dataset_utils.download_and_uncompress_tarball(_DATA_URL, dataset_dir)

  # First, process the training data:
  #with tf.python_io.TFRecordWriter(training_filename) as tfrecord_writer:
  filenames = []
  for i in range(_NUM_TRAIN_FILES):
    filenames.append(os.path.join(dataset_dir,
                            'cifar-10-batches-py',
                            'data_batch_%d' % (i + 1)))  # 1-indexed.
  _add_to_tfrecord(filenames, 'train', dataset_dir)

  # Next, process the testing data:
  #with tf.python_io.TFRecordWriter(testing_filename) as tfrecord_writer:
  filenames = []
  filenames.append( os.path.join(dataset_dir,
                          'cifar-10-batches-py',
                          'test_batch'))
  _add_to_tfrecord(filenames, 'test', dataset_dir)

  # Finally, write the labels file:
  labels_to_class_names = dict(zip(range(len(_CLASS_NAMES)), _CLASS_NAMES))
  dataset_utils.write_label_file(labels_to_class_names, dataset_dir)

  _clean_up_temporary_files(dataset_dir)
  print('\nFinished converting the Cifar10 dataset!') 
开发者ID:wenwei202,项目名称:terngrad,代码行数:36,代码来源:download_convert_and_shard_cifar10.py


注:本文中的datasets.dataset_utils.download_and_uncompress_tarball方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。