當前位置: 首頁>>代碼示例>>Python>>正文


Python parallel_reader.parallel_read方法代碼示例

本文整理匯總了Python中tensorflow.contrib.slim.python.slim.data.parallel_reader.parallel_read方法的典型用法代碼示例。如果您正苦於以下問題:Python parallel_reader.parallel_read方法的具體用法?Python parallel_reader.parallel_read怎麽用?Python parallel_reader.parallel_read使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.contrib.slim.python.slim.data.parallel_reader的用法示例。


在下文中一共展示了parallel_reader.parallel_read方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testTFRecordReader

# 需要導入模塊: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 別名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 別名]
def testTFRecordReader(self):
    with self.test_session():
      self._tfrecord_paths = test_utils.create_tfrecord_files(
          self.get_temp_dir(), num_files=3)

    key, value = parallel_reader.parallel_read(
        self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)

    sv = supervisor.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      flowers = 0
      num_reads = 100
      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if 'flowers' in str(current_key):
          flowers += 1
      self.assertGreater(flowers, 0)
      self.assertEquals(flowers, num_reads) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:22,代碼來源:parallel_reader_test.py

示例2: __init__

# 需要導入模塊: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 別名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 別名]
def __init__(self,
               dataset,
               num_readers=1,
               shuffle=True,
               num_epochs=None,
               common_queue_capacity=256,
               common_queue_min=128,
               seed=None):
    """Creates a DatasetDataProvider.

    Args:
      dataset: An instance of the Dataset class.
      num_readers: The number of parallel readers to use.
      shuffle: Whether to shuffle the data sources and common queue when
        reading.
      num_epochs: The number of times each data source is read. If left as None,
        the data will be cycled through indefinitely.
      common_queue_capacity: The capacity of the common queue.
      common_queue_min: The minimum number of elements in the common queue after
        a dequeue.
      seed: The seed to use if shuffling.
    """
    _, data = parallel_reader.parallel_read(
        dataset.data_sources,
        reader_class=dataset.reader,
        num_epochs=num_epochs,
        num_readers=num_readers,
        shuffle=shuffle,
        capacity=common_queue_capacity,
        min_after_dequeue=common_queue_min,
        seed=seed)

    items = dataset.decoder.list_items()
    tensors = dataset.decoder.decode(data, items)

    super(DatasetDataProvider, self).__init__(
        items_to_tensors=dict(zip(items, tensors)),
        num_samples=dataset.num_samples) 
開發者ID:abhisuri97,項目名稱:auto-alt-text-lambda-api,代碼行數:40,代碼來源:dataset_data_provider.py

示例3: __init__

# 需要導入模塊: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 別名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 別名]
def __init__(self, dataset, num_readers=1, shuffle=True, num_epochs=None,
               common_queue_capacity=256, common_queue_min=128, seed=None):
    """Creates a DatasetDataProvider.

    Args:
      dataset: An instance of the Dataset class.
      num_readers: The number of parallel readers to use.
      shuffle: Whether to shuffle the data sources and common queue when
        reading.
      num_epochs: The number of times each data source is read. If left as None,
        the data will be cycled through indefinitely.
      common_queue_capacity: The capacity of the common queue.
      common_queue_min: The minimum number of elements in the common queue after
        a dequeue.
      seed: The seed to use if shuffling.
    """
    _, data = parallel_reader.parallel_read(
        dataset.data_sources,
        reader_class=dataset.reader,
        num_epochs=num_epochs,
        num_readers=num_readers,
        shuffle=shuffle,
        capacity=common_queue_capacity,
        min_after_dequeue=common_queue_min,
        seed=seed)

    items = dataset.decoder.list_items()
    tensors = dataset.decoder.decode(data, items)

    super(DatasetDataProvider, self).__init__(
        items_to_tensors=dict(zip(items, tensors)),
        num_samples=dataset.num_samples) 
開發者ID:tobegit3hub,項目名稱:deep_image_model,代碼行數:34,代碼來源:dataset_data_provider.py

示例4: __init__

# 需要導入模塊: from tensorflow.contrib.slim.python.slim.data import parallel_reader [as 別名]
# 或者: from tensorflow.contrib.slim.python.slim.data.parallel_reader import parallel_read [as 別名]
def __init__(self,
               dataset,
               num_readers=1,
               reader_kwargs=None,
               shuffle=True,
               num_epochs=None,
               common_queue_capacity=256,
               common_queue_min=128,
               record_key='record_key',
               seed=None,
               scope=None):
    """Creates a DatasetDataProvider.

    Args:
      dataset: An instance of the Dataset class.
      num_readers: The number of parallel readers to use.
      reader_kwargs: An optional dict of kwargs for the reader.
      shuffle: Whether to shuffle the data sources and common queue when
        reading.
      num_epochs: The number of times each data source is read. If left as None,
        the data will be cycled through indefinitely.
      common_queue_capacity: The capacity of the common queue.
      common_queue_min: The minimum number of elements in the common queue after
        a dequeue.
      record_key: The item name to use for the dataset record keys in the
        provided tensors.
      seed: The seed to use if shuffling.
      scope: Optional name scope for the ops.
    Raises:
      ValueError: If `record_key` matches one of the items in the dataset.
    """
    key, data = parallel_reader.parallel_read(
        dataset.data_sources,
        reader_class=dataset.reader,
        num_epochs=num_epochs,
        num_readers=num_readers,
        reader_kwargs=reader_kwargs,
        shuffle=shuffle,
        capacity=common_queue_capacity,
        min_after_dequeue=common_queue_min,
        seed=seed,
        scope=scope)

    items = dataset.decoder.list_items()
    tensors = dataset.decoder.decode(data, items)

    if record_key in items:
      raise ValueError('The item name used for `record_key` cannot also be '
                       'used for a dataset item: %s', record_key)
    items.append(record_key)
    tensors.append(key)

    super(DatasetDataProvider, self).__init__(
        items_to_tensors=dict(zip(items, tensors)),
        num_samples=dataset.num_samples) 
開發者ID:ryfeus,項目名稱:lambda-packs,代碼行數:57,代碼來源:dataset_data_provider.py


注:本文中的tensorflow.contrib.slim.python.slim.data.parallel_reader.parallel_read方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。