当前位置: 首页>>代码示例>>Python>>正文


Python parallel_reader.parallel_read方法代码示例

本文整理汇总了Python中tensorflow.contrib.slim.python.slim.data.parallel_reader.parallel_read方法的典型用法代码示例。如果您正苦于以下问题:Python parallel_reader.parallel_read方法的具体用法?Python parallel_reader.parallel_read怎么用?Python parallel_reader.parallel_read使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.slim.python.slim.data.parallel_reader的用法示例。


在下文中一共展示了parallel_reader.parallel_read方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testTFRecordReader

# 需要导入模块: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 别名]
def testTFRecordReader(self):
    with self.test_session():
      self._tfrecord_paths = test_utils.create_tfrecord_files(
          self.get_temp_dir(), num_files=3)

    key, value = parallel_reader.parallel_read(
        self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)

    sv = supervisor.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      flowers = 0
      num_reads = 100
      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if 'flowers' in str(current_key):
          flowers += 1
      self.assertGreater(flowers, 0)
      self.assertEquals(flowers, num_reads) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:22,代码来源:parallel_reader_test.py

示例2: __init__

# 需要导入模块: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 别名]
def __init__(self,
               dataset,
               num_readers=1,
               shuffle=True,
               num_epochs=None,
               common_queue_capacity=256,
               common_queue_min=128,
               seed=None):
    """Creates a DatasetDataProvider.

    Args:
      dataset: An instance of the Dataset class.
      num_readers: The number of parallel readers to use.
      shuffle: Whether to shuffle the data sources and common queue when
        reading.
      num_epochs: The number of times each data source is read. If left as None,
        the data will be cycled through indefinitely.
      common_queue_capacity: The capacity of the common queue.
      common_queue_min: The minimum number of elements in the common queue after
        a dequeue.
      seed: The seed to use if shuffling.
    """
    _, data = parallel_reader.parallel_read(
        dataset.data_sources,
        reader_class=dataset.reader,
        num_epochs=num_epochs,
        num_readers=num_readers,
        shuffle=shuffle,
        capacity=common_queue_capacity,
        min_after_dequeue=common_queue_min,
        seed=seed)

    items = dataset.decoder.list_items()
    tensors = dataset.decoder.decode(data, items)

    super(DatasetDataProvider, self).__init__(
        items_to_tensors=dict(zip(items, tensors)),
        num_samples=dataset.num_samples) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:40,代码来源:dataset_data_provider.py

示例3: __init__

# 需要导入模块: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 别名]
def __init__(self, dataset, num_readers=1, shuffle=True, num_epochs=None,
               common_queue_capacity=256, common_queue_min=128, seed=None):
    """Creates a DatasetDataProvider.

    Args:
      dataset: An instance of the Dataset class.
      num_readers: The number of parallel readers to use.
      shuffle: Whether to shuffle the data sources and common queue when
        reading.
      num_epochs: The number of times each data source is read. If left as None,
        the data will be cycled through indefinitely.
      common_queue_capacity: The capacity of the common queue.
      common_queue_min: The minimum number of elements in the common queue after
        a dequeue.
      seed: The seed to use if shuffling.
    """
    _, data = parallel_reader.parallel_read(
        dataset.data_sources,
        reader_class=dataset.reader,
        num_epochs=num_epochs,
        num_readers=num_readers,
        shuffle=shuffle,
        capacity=common_queue_capacity,
        min_after_dequeue=common_queue_min,
        seed=seed)

    items = dataset.decoder.list_items()
    tensors = dataset.decoder.decode(data, items)

    super(DatasetDataProvider, self).__init__(
        items_to_tensors=dict(zip(items, tensors)),
        num_samples=dataset.num_samples) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:34,代码来源:dataset_data_provider.py

示例4: __init__

# 需要导入模块: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 别名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 别名]
def __init__(self,
               dataset,
               num_readers=1,
               reader_kwargs=None,
               shuffle=True,
               num_epochs=None,
               common_queue_capacity=256,
               common_queue_min=128,
               record_key='record_key',
               seed=None,
               scope=None):
    """Creates a DatasetDataProvider.

    Args:
      dataset: An instance of the Dataset class.
      num_readers: The number of parallel readers to use.
      reader_kwargs: An optional dict of kwargs for the reader.
      shuffle: Whether to shuffle the data sources and common queue when
        reading.
      num_epochs: The number of times each data source is read. If left as None,
        the data will be cycled through indefinitely.
      common_queue_capacity: The capacity of the common queue.
      common_queue_min: The minimum number of elements in the common queue after
        a dequeue.
      record_key: The item name to use for the dataset record keys in the
        provided tensors.
      seed: The seed to use if shuffling.
      scope: Optional name scope for the ops.
    Raises:
      ValueError: If `record_key` matches one of the items in the dataset.
    """
    key, data = parallel_reader.parallel_read(
        dataset.data_sources,
        reader_class=dataset.reader,
        num_epochs=num_epochs,
        num_readers=num_readers,
        reader_kwargs=reader_kwargs,
        shuffle=shuffle,
        capacity=common_queue_capacity,
        min_after_dequeue=common_queue_min,
        seed=seed,
        scope=scope)

    items = dataset.decoder.list_items()
    tensors = dataset.decoder.decode(data, items)

    if record_key in items:
      raise ValueError('The item name used for `record_key` cannot also be '
                       'used for a dataset item: %s', record_key)
    items.append(record_key)
    tensors.append(key)

    super(DatasetDataProvider, self).__init__(
        items_to_tensors=dict(zip(items, tensors)),
        num_samples=dataset.num_samples) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:57,代码来源:dataset_data_provider.py


注:本文中的tensorflow.contrib.slim.python.slim.data.parallel_reader.parallel_read方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。