当前位置: 首页>>代码示例>>Python>>正文


Python io_ops.TFRecordReader方法代码示例

本文整理汇总了Python中tensorflow.python.ops.io_ops.TFRecordReader方法的典型用法代码示例。如果您正苦于以下问题:Python io_ops.TFRecordReader方法的具体用法?Python io_ops.TFRecordReader怎么用?Python io_ops.TFRecordReader使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.io_ops的用法示例。


在下文中一共展示了io_ops.TFRecordReader方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: TFRecordSource

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def TFRecordSource(file_names,
                   reader_kwargs=None,
                   enqueue_size=1,
                   batch_size=1,
                   queue_capacity=None,
                   shuffle=False,
                   min_after_dequeue=None,
                   num_threads=1,
                   seed=None):
  return ReaderSource(io_ops.TFRecordReader,
                      work_units=file_names,
                      reader_kwargs=reader_kwargs,
                      enqueue_size=enqueue_size,
                      batch_size=batch_size,
                      queue_capacity=queue_capacity,
                      shuffle=shuffle,
                      min_after_dequeue=min_after_dequeue,
                      num_threads=num_threads,
                      seed=seed) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:21,代码来源:reader_source.py

示例2: testTFRecordReader

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def testTFRecordReader(self):
    with self.test_session():
      self._tfrecord_paths = test_utils.create_tfrecord_files(
          self.get_temp_dir(), num_files=3)

    key, value = parallel_reader.parallel_read(
        self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)

    sv = supervisor.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      flowers = 0
      num_reads = 100
      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if 'flowers' in str(current_key):
          flowers += 1
      self.assertGreater(flowers, 0)
      self.assertEquals(flowers, num_reads) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:22,代码来源:parallel_reader_test.py

示例3: testOutOfRangeError

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def testOutOfRangeError(self):
    with self.test_session():
      [tfrecord_path] = test_utils.create_tfrecord_files(
          self.get_temp_dir(), num_files=1)

    key, value = parallel_reader.single_pass_read(
        tfrecord_path, reader_class=io_ops.TFRecordReader)
    init_op = variables.local_variables_initializer()

    with self.test_session() as sess:
      sess.run(init_op)
      with queues.QueueRunners(sess):
        num_reads = 11
        with self.assertRaises(errors_impl.OutOfRangeError):
          for _ in range(num_reads):
            sess.run([key, value]) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:18,代码来源:parallel_reader_test.py

示例4: testTFRecordReader

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def testTFRecordReader(self):
    with self.cached_session():
      self._tfrecord_paths = test_utils.create_tfrecord_files(
          tempfile.mkdtemp(), num_files=3)

    key, value = parallel_reader.parallel_read(
        self._tfrecord_paths, reader_class=io_ops.TFRecordReader, num_readers=3)

    sv = supervisor.Supervisor(logdir=tempfile.mkdtemp())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      flowers = 0
      num_reads = 100
      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if 'flowers' in str(current_key):
          flowers += 1
      self.assertGreater(flowers, 0)
      self.assertEqual(flowers, num_reads) 
开发者ID:google-research,项目名称:tf-slim,代码行数:22,代码来源:parallel_reader_test.py

示例5: testOutOfRangeError

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def testOutOfRangeError(self):
    with self.cached_session():
      [tfrecord_path] = test_utils.create_tfrecord_files(
          tempfile.mkdtemp(), num_files=1)

    key, value = parallel_reader.single_pass_read(
        tfrecord_path, reader_class=io_ops.TFRecordReader)
    init_op = variables.local_variables_initializer()

    with self.cached_session() as sess:
      sess.run(init_op)
      with queues.QueueRunners(sess):
        num_reads = 11
        with self.assertRaises(errors_impl.OutOfRangeError):
          for _ in range(num_reads):
            sess.run([key, value]) 
开发者ID:google-research,项目名称:tf-slim,代码行数:18,代码来源:parallel_reader_test.py

示例6: _verify_all_data_sources_read

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def _verify_all_data_sources_read(self, shared_queue):
    with self.test_session():
      tfrecord_paths = test_utils.create_tfrecord_files(
          self.get_temp_dir(), num_files=3)

    num_readers = len(tfrecord_paths)
    p_reader = parallel_reader.ParallelReader(
        io_ops.TFRecordReader, shared_queue, num_readers=num_readers)

    data_files = parallel_reader.get_data_files(tfrecord_paths)
    filename_queue = input_lib.string_input_producer(data_files)
    key, value = p_reader.read(filename_queue)

    count0 = 0
    count1 = 0
    count2 = 0

    num_reads = 50

    sv = supervisor.Supervisor(logdir=self.get_temp_dir())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if '0-of-3' in str(current_key):
          count0 += 1
        if '1-of-3' in str(current_key):
          count1 += 1
        if '2-of-3' in str(current_key):
          count2 += 1

    self.assertGreater(count0, 0)
    self.assertGreater(count1, 0)
    self.assertGreater(count2, 0)
    self.assertEquals(count0 + count1 + count2, num_reads) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:38,代码来源:parallel_reader_test.py

示例7: _create_tfrecord_dataset

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def _create_tfrecord_dataset(tmpdir):
  if not gfile.Exists(tmpdir):
    gfile.MakeDirs(tmpdir)

  data_sources = test_utils.create_tfrecord_files(tmpdir, num_files=1)

  keys_to_features = {
      'image/encoded':
          parsing_ops.FixedLenFeature(
              shape=(), dtype=dtypes.string, default_value=''),
      'image/format':
          parsing_ops.FixedLenFeature(
              shape=(), dtype=dtypes.string, default_value='jpeg'),
      'image/class/label':
          parsing_ops.FixedLenFeature(
              shape=[1],
              dtype=dtypes.int64,
              default_value=array_ops.zeros(
                  [1], dtype=dtypes.int64))
  }

  items_to_handlers = {
      'image': tfexample_decoder.Image(),
      'label': tfexample_decoder.Tensor('image/class/label'),
  }

  decoder = tfexample_decoder.TFExampleDecoder(keys_to_features,
                                               items_to_handlers)

  return dataset.Dataset(
      data_sources=data_sources,
      reader=io_ops.TFRecordReader,
      decoder=decoder,
      num_samples=100,
      items_to_descriptions=None) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:37,代码来源:dataset_data_provider_test.py

示例8: _verify_all_data_sources_read

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def _verify_all_data_sources_read(self, shared_queue):
    with self.cached_session():
      tfrecord_paths = test_utils.create_tfrecord_files(
          tempfile.mkdtemp(), num_files=3)

    num_readers = len(tfrecord_paths)
    p_reader = parallel_reader.ParallelReader(
        io_ops.TFRecordReader, shared_queue, num_readers=num_readers)

    data_files = parallel_reader.get_data_files(tfrecord_paths)
    filename_queue = input_lib.string_input_producer(data_files)
    key, value = p_reader.read(filename_queue)

    count0 = 0
    count1 = 0
    count2 = 0

    num_reads = 50

    sv = supervisor.Supervisor(logdir=tempfile.mkdtemp())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)

      for _ in range(num_reads):
        current_key, _ = sess.run([key, value])
        if '0-of-3' in str(current_key):
          count0 += 1
        if '1-of-3' in str(current_key):
          count1 += 1
        if '2-of-3' in str(current_key):
          count2 += 1

    self.assertGreater(count0, 0)
    self.assertGreater(count1, 0)
    self.assertGreater(count2, 0)
    self.assertEqual(count0 + count1 + count2, num_reads) 
开发者ID:google-research,项目名称:tf-slim,代码行数:38,代码来源:parallel_reader_test.py

示例9: read_batch_record_features

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def read_batch_record_features(file_pattern, batch_size, features,
                               randomize_input=True, num_epochs=None,
                               queue_capacity=10000, reader_num_threads=1,
                               name='dequeue_record_examples'):
  """Reads TFRecord, queues, batches and parses `Example` proto.

  See more detailed description in `read_examples`.

  Args:
    file_pattern: List of files or pattern of file paths containing
        `Example` records. See `tf.gfile.Glob` for pattern rules.
    batch_size: An int or scalar `Tensor` specifying the batch size to use.
    features: A `dict` mapping feature keys to `FixedLenFeature` or
      `VarLenFeature` values.
    randomize_input: Whether the input should be randomized.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If None, cycles through the dataset forever. NOTE - If specified,
      creates a variable that must be initialized, so call
      tf.local_variables_initializer() as shown in the tests.
    queue_capacity: Capacity for input queue.
    reader_num_threads: The number of threads to read examples.
    name: Name of resulting op.

  Returns:
    A dict of `Tensor` or `SparseTensor` objects for each in `features`.

  Raises:
    ValueError: for invalid inputs.
  """
  return read_batch_features(
      file_pattern=file_pattern,
      batch_size=batch_size,
      features=features,
      reader=io_ops.TFRecordReader,
      randomize_input=randomize_input,
      num_epochs=num_epochs,
      queue_capacity=queue_capacity,
      reader_num_threads=reader_num_threads,
      name=name) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:41,代码来源:graph_io.py

示例10: read_batch_record_features

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def read_batch_record_features(file_pattern,
                               batch_size,
                               features,
                               randomize_input=True,
                               num_epochs=None,
                               queue_capacity=10000,
                               reader_num_threads=1,
                               name='dequeue_record_examples'):
  """Reads TFRecord, queues, batches and parses `Example` proto.

  See more detailed description in `read_examples`.

  Args:
    file_pattern: List of files or patterns of file paths containing
        `Example` records. See `tf.gfile.Glob` for pattern rules.
    batch_size: An int or scalar `Tensor` specifying the batch size to use.
    features: A `dict` mapping feature keys to `FixedLenFeature` or
      `VarLenFeature` values.
    randomize_input: Whether the input should be randomized.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If None, cycles through the dataset forever. NOTE - If specified,
      creates a variable that must be initialized, so call
      tf.local_variables_initializer() and run the op in a session.
    queue_capacity: Capacity for input queue.
    reader_num_threads: The number of threads to read examples. In order to have
      predicted and repeatable order of reading and enqueueing, such as in
      prediction and evaluation mode, `reader_num_threads` should be 1.
    name: Name of resulting op.

  Returns:
    A dict of `Tensor` or `SparseTensor` objects for each in `features`.

  Raises:
    ValueError: for invalid inputs.
  """
  return read_batch_features(
      file_pattern=file_pattern,
      batch_size=batch_size,
      features=features,
      reader=io_ops.TFRecordReader,
      randomize_input=randomize_input,
      num_epochs=num_epochs,
      queue_capacity=queue_capacity,
      reader_num_threads=reader_num_threads,
      name=name) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:47,代码来源:graph_io.py

示例11: read_batch_record_features

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def read_batch_record_features(file_pattern,
                               batch_size,
                               features,
                               randomize_input=True,
                               num_epochs=None,
                               queue_capacity=10000,
                               reader_num_threads=1,
                               name='dequeue_record_examples'):
  """Reads TFRecord, queues, batches and parses `Example` proto.

  See more detailed description in `read_examples`.

  Args:
    file_pattern: List of files or pattern of file paths containing
        `Example` records. See `tf.gfile.Glob` for pattern rules.
    batch_size: An int or scalar `Tensor` specifying the batch size to use.
    features: A `dict` mapping feature keys to `FixedLenFeature` or
      `VarLenFeature` values.
    randomize_input: Whether the input should be randomized.
    num_epochs: Integer specifying the number of times to read through the
      dataset. If None, cycles through the dataset forever. NOTE - If specified,
      creates a variable that must be initialized, so call
      tf.local_variables_initializer() as shown in the tests.
    queue_capacity: Capacity for input queue.
    reader_num_threads: The number of threads to read examples.
    name: Name of resulting op.

  Returns:
    A dict of `Tensor` or `SparseTensor` objects for each in `features`.

  Raises:
    ValueError: for invalid inputs.
  """
  return read_batch_features(
      file_pattern=file_pattern,
      batch_size=batch_size,
      features=features,
      reader=io_ops.TFRecordReader,
      randomize_input=randomize_input,
      num_epochs=num_epochs,
      queue_capacity=queue_capacity,
      reader_num_threads=reader_num_threads,
      name=name) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:45,代码来源:graph_io.py

示例12: _verify_read_up_to_out

# 需要导入模块: from tensorflow.python.ops import io_ops [as 别名]
# 或者: from tensorflow.python.ops.io_ops import TFRecordReader [as 别名]
def _verify_read_up_to_out(self, shared_queue):
    with self.cached_session():
      num_files = 3
      num_records_per_file = 7
      tfrecord_paths = test_utils.create_tfrecord_files(
          tempfile.mkdtemp(),
          num_files=num_files,
          num_records_per_file=num_records_per_file)

    p_reader = parallel_reader.ParallelReader(
        io_ops.TFRecordReader, shared_queue, num_readers=5)

    data_files = parallel_reader.get_data_files(tfrecord_paths)
    filename_queue = input_lib.string_input_producer(data_files, num_epochs=1)
    key, value = p_reader.read_up_to(filename_queue, 4)

    count0 = 0
    count1 = 0
    count2 = 0
    all_keys_count = 0
    all_values_count = 0

    sv = supervisor.Supervisor(logdir=tempfile.mkdtemp())
    with sv.prepare_or_wait_for_session() as sess:
      sv.start_queue_runners(sess)
      while True:
        try:
          current_keys, current_values = sess.run([key, value])
          self.assertEqual(len(current_keys), len(current_values))
          all_keys_count += len(current_keys)
          all_values_count += len(current_values)
          for current_key in current_keys:
            if '0-of-3' in str(current_key):
              count0 += 1
            if '1-of-3' in str(current_key):
              count1 += 1
            if '2-of-3' in str(current_key):
              count2 += 1
        except errors_impl.OutOfRangeError:
          break

    self.assertEqual(count0, num_records_per_file)
    self.assertEqual(count1, num_records_per_file)
    self.assertEqual(count2, num_records_per_file)
    self.assertEqual(
        all_keys_count,
        num_files * num_records_per_file)
    self.assertEqual(all_values_count, all_keys_count)
    self.assertEqual(
        count0 + count1 + count2,
        all_keys_count) 
开发者ID:google-research,项目名称:tf-slim,代码行数:53,代码来源:parallel_reader_test.py


注:本文中的tensorflow.python.ops.io_ops.TFRecordReader方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。