当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.FIFOQueue方法代码示例

本文整理汇总了Python中tensorflow.FIFOQueue方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.FIFOQueue方法的具体用法?Python tensorflow.FIFOQueue怎么用?Python tensorflow.FIFOQueue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.FIFOQueue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testSimple

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def testSimple(self):
    labels = [9, 3, 0]
    records = [self._record(labels[0], 0, 128, 255),
               self._record(labels[1], 255, 0, 1),
               self._record(labels[2], 254, 255, 0)]
    contents = b"".join([record for record, _ in records])
    expected = [expected for _, expected in records]
    filename = os.path.join(self.get_temp_dir(), "cifar")
    open(filename, "wb").write(contents)

    with self.test_session() as sess:
      q = tf.FIFOQueue(99, [tf.string], shapes=())
      q.enqueue([filename]).run()
      q.close().run()
      result = cifar10_input.read_cifar10(q)

      for i in range(3):
        key, label, uint8image = sess.run([
            result.key, result.label, result.uint8image])
        self.assertEqual("%s:%d" % (filename, i), tf.compat.as_text(key))
        self.assertEqual(labels[i], label)
        self.assertAllEqual(expected[i], uint8image)

      with self.assertRaises(tf.errors.OutOfRangeError):
        sess.run([result.key, result.uint8image]) 
开发者ID:ringringyi,项目名称:DOTA_models,代码行数:27,代码来源:cifar10_input_test.py

示例2: enqueue

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def enqueue(sess):
  """ Iterates over our data puts small junks into our queue."""
  under = 0
  max = len(raw_data)
  while True:
    print("starting to write into queue")
    upper = under + 20
    print("try to enqueue ", under, " to ", upper)
    if upper <= max:
      curr_data = raw_data[under:upper]
      curr_target = raw_target[under:upper]
      under = upper
    else:
      rest = upper - max
      curr_data = np.concatenate((raw_data[under:max], raw_data[0:rest]))
      curr_target = np.concatenate((raw_target[under:max], raw_target[0:rest]))
      under = rest

    sess.run(enqueue_op, feed_dict={queue_input_data: curr_data,
                                    queue_input_target: curr_target})
    print("added to the queue")
  print("finished enqueueing")

# start the threads for our FIFOQueue and batch 
开发者ID:kafkasl,项目名称:contextualLSTM,代码行数:26,代码来源:input_pipeline.py

示例3: create_tensor

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def create_tensor(self, in_layers=None, **kwargs):
    # TODO(rbharath): Not sure if this layer can be called with __call__
    # meaningfully, so not going to support that functionality for now.
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)
    self.dtypes = [x.out_tensor.dtype for x in in_layers]
    self.queue = tf.FIFOQueue(self.capacity, self.dtypes, names=self.names)
    feed_dict = {x.name: x.out_tensor for x in in_layers}
    self.out_tensor = self.queue.enqueue(feed_dict)
    self.close_op = self.queue.close()
    self.out_tensors = self.queue.dequeue()
    self._non_pickle_fields += ['queue', 'out_tensors', 'close_op']

  # def none_tensors(self):
  #   queue, out_tensors, out_tensor, close_op = self.queue, self.out_tensor, self.out_tensor, self.close_op
  #   self.queue, self.out_tensor, self.out_tensors, self.close_op = None, None, None, None
  #   return queue, out_tensors, out_tensor, close_op

  # def set_tensors(self, tensors):
  #   self.queue, self.out_tensor, self.out_tensors, self.close_op = tensors 
开发者ID:simonfqy,项目名称:PADME,代码行数:23,代码来源:layers.py

示例4: manual_eval_ops

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def manual_eval_ops(self, device='/cpu:0'):
        """ This is the baseline random model, this takes all the targets,
        randomly assign values to it and then report the result.

        :param device:
        :return:
        """

        with tf.name_scope("namual_evaluation"):
            with tf.device('/cpu:0'):
                # head rel pair to evaluate
                ph_head_rel = tf.placeholder(tf.string, [1, 2], name='ph_head_rel')
                # tail targets to evaluate
                ph_eval_targets = tf.placeholder(tf.string, [1, None], name='ph_eval_targets')
                # indices of true tail targets in ph_eval_targets. Mask these when calculating filtered mean rank
                ph_true_target_idx = tf.placeholder(tf.int32, [None], name='ph_true_target_idx')
                # indices of true targets in the evaluation set, we will return the ranks of these targets
                ph_test_target_idx = tf.placeholder(tf.int32, [None], name='ph_test_target_idx')

                # We put random numbers into the pred_scores_queue
                pred_scores_queue = tf.FIFOQueue(1000000, dtypes=tf.float32, shapes=[[1]], name='pred_scorse_queue') 
开发者ID:bxshi,项目名称:ConMask,代码行数:23,代码来源:random_model.py

示例5: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def __init__(self, dataset, num_threads, queue_size, batch_size):
        self._dataset = dataset
        self._num_threads = num_threads
        self._queue_size = queue_size
        self._batch_size = batch_size

        datatypes = 2*['float32']
        shapes = 2*[self._dataset.shape]

        batch_shape = [None]+list(self._dataset.shape)
        
        self._placeholders = 2*[
            tf.placeholder(dtype=tf.float32, shape=batch_shape),
            tf.placeholder(dtype=tf.float32, shape=batch_shape) 
        ]

        self._queue = tf.FIFOQueue(self._queue_size, datatypes, shapes=shapes)
        self.x, self.y = self._queue.dequeue_up_to(self._batch_size)
        self.enqueue_op = self._queue.enqueue_many(self._placeholders)

        self._coordinator = tf.train.Coordinator()

        self._threads = [] 
开发者ID:DLR-RM,项目名称:AugmentedAutoencoder,代码行数:25,代码来源:queue.py

示例6: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def __init__(self, input_size, batch_size, data_generator_creator, max_steps=None):

    super().__init__(input_size)
    self.batch_size = batch_size
    self.data_generator_creator = data_generator_creator
    self.steps_left = max_steps

    with tf.device("/cpu:0"):
      # Define input and label placeholders
      # inputs is of dimension [batch_size, max_time, input_size]
      self.inputs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='inputs')
      self.sequence_lengths = tf.placeholder(tf.int32, [batch_size], name='sequence_lengths')
      self.labels = tf.sparse_placeholder(tf.int32, name='labels')

      # Queue for inputs and labels
      self.queue = tf.FIFOQueue(dtypes=[tf.float32, tf.int32, tf.string],
                                capacity=100)

      # queues do not support sparse tensors yet, we need to serialize...
      serialized_labels = tf.serialize_many_sparse(self.labels)

      self.enqueue_op = self.queue.enqueue([self.inputs,
                                            self.sequence_lengths,
                                            serialized_labels]) 
开发者ID:timediv,项目名称:speechT,代码行数:26,代码来源:speech_input.py

示例7: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def __init__(self, path, batch_size=16, input_size=227,
                 scale_factor=1.0, num_threads=10):
        self._path = path

        self._list_files = glob.glob(os.path.join(path, "**/*.avi"))

        self._batch_size = batch_size
        self._scale_factor = scale_factor
        self._image_size = input_size
        self._label_size = int(input_size * self._scale_factor)
        self._num_threads = num_threads
        self._coord = tf.train.Coordinator()
        self._image_shape = [batch_size, self._image_size, self._image_size, 3]
        self._label_shape = [batch_size, self._label_size, self._label_size, 1]
        p_x = tf.placeholder(tf.float32, self._image_shape, name='x')
        p_y = tf.placeholder(tf.float32, self._label_shape, name='y')
        inputs = [p_x, p_y]
        self._queue = tf.FIFOQueue(400,
                [i.dtype for i in inputs], [i.get_shape() for i in inputs])
        self._inputs = inputs
        self._enqueue_op = self._queue.enqueue(inputs)
        self._queue_close_op = self._queue.close(cancel_pending_enqueues=True)
        self._threads = [] 
开发者ID:gustavla,项目名称:self-supervision,代码行数:25,代码来源:video_avi_flow_saliency.py

示例8: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def __init__(self, files, batch_size=16, input_size=227,
                 scale_factor=1.0, num_threads=10):
        self._list_files = files
        self._batch_size = batch_size
        self._scale_factor = scale_factor
        self._image_size = input_size
        self._label_size = int(input_size * self._scale_factor)
        self._num_threads = num_threads
        self._coord = tf.train.Coordinator()
        self._image_shape = [batch_size, self._image_size, self._image_size, 3]
        self._label_shape = [batch_size, self._label_size, self._label_size, 2]
        p_x = tf.placeholder(tf.float32, self._image_shape, name='x')
        p_y = tf.placeholder(tf.float32, self._label_shape, name='y')
        inputs = [p_x, p_y]
        self._queue = tf.FIFOQueue(400,
                [i.dtype for i in inputs], [i.get_shape() for i in inputs])
        self._inputs = inputs
        self._enqueue_op = self._queue.enqueue(inputs)
        self._queue_close_op = self._queue.close(cancel_pending_enqueues=True)
        self._threads = [] 
开发者ID:gustavla,项目名称:self-supervision,代码行数:22,代码来源:video_avi_flow.py

示例9: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def __init__(self, path, root_path='', batch_size=16, input_size=227, num_threads=10):
        self._path = path
        self._root_path = root_path
        with open(path) as f:
            self._list_files = [x.rstrip('\n') for x in f.readlines()]
        print('list_files', len(self._list_files))

        self._batch_size = batch_size
        self._input_size = input_size
        self._num_threads = num_threads
        self._coord = tf.train.Coordinator()
        self._base_shape = [batch_size, input_size, input_size]
        self._image_shape = self._base_shape + [3]
        self._label_shape = self._base_shape + [1]
        p_x = tf.placeholder(tf.float32, self._image_shape, name='x')
        p_y = tf.placeholder(tf.float32, self._label_shape, name='y')
        inputs = [p_x, p_y]
        self._queue = tf.FIFOQueue(400,
                [i.dtype for i in inputs], [i.get_shape() for i in inputs])
        self._inputs = inputs
        self._enqueue_op = self._queue.enqueue(inputs)
        self._queue_close_op = self._queue.close(cancel_pending_enqueues=True)
        self._threads = [] 
开发者ID:gustavla,项目名称:self-supervision,代码行数:25,代码来源:video_jpeg_rolls_flow_saliency.py

示例10: testMultipleEpochs

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def testMultipleEpochs(self):
    with self.test_session() as sess:
      reader = tf.IdentityReader("test_reader")
      queue = tf.FIFOQueue(99, [tf.string], shapes=())
      enqueue = queue.enqueue_many([["DD", "EE"]])
      key, value = reader.read(queue)

      enqueue.run()
      self._ExpectRead(sess, key, value, b"DD")
      self._ExpectRead(sess, key, value, b"EE")
      enqueue.run()
      self._ExpectRead(sess, key, value, b"DD")
      self._ExpectRead(sess, key, value, b"EE")
      enqueue.run()
      self._ExpectRead(sess, key, value, b"DD")
      self._ExpectRead(sess, key, value, b"EE")
      queue.close().run()
      with self.assertRaisesOpError("is closed and has insufficient elements "
                                    "\\(requested 1, current size 0\\)"):
        sess.run([key, value]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:22,代码来源:reader_ops_test.py

示例11: _testOneEpoch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def _testOneEpoch(self, files):
    with self.test_session() as sess:
      reader = tf.TextLineReader(name="test_reader")
      queue = tf.FIFOQueue(99, [tf.string], shapes=())
      key, value = reader.read(queue)

      queue.enqueue_many([files]).run()
      queue.close().run()
      for i in range(self._num_files):
        for j in range(self._num_lines):
          k, v = sess.run([key, value])
          self.assertAllEqual("%s:%d" % (files[i], j + 1), tf.compat.as_text(k))
          self.assertAllEqual(self._LineText(i, j), v)

      with self.assertRaisesOpError("is closed and has insufficient elements "
                                    "\\(requested 1, current size 0\\)"):
        k, v = sess.run([key, value]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:19,代码来源:reader_ops_test.py

示例12: testSkipHeaderLines

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def testSkipHeaderLines(self):
    files = self._CreateFiles()
    with self.test_session() as sess:
      reader = tf.TextLineReader(skip_header_lines=1, name="test_reader")
      queue = tf.FIFOQueue(99, [tf.string], shapes=())
      key, value = reader.read(queue)

      queue.enqueue_many([files]).run()
      queue.close().run()
      for i in range(self._num_files):
        for j in range(self._num_lines - 1):
          k, v = sess.run([key, value])
          self.assertAllEqual("%s:%d" % (files[i], j + 2), tf.compat.as_text(k))
          self.assertAllEqual(self._LineText(i, j + 1), v)

      with self.assertRaisesOpError("is closed and has insufficient elements "
                                    "\\(requested 1, current size 0\\)"):
        k, v = sess.run([key, value]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:20,代码来源:reader_ops_test.py

示例13: testOneEpoch

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def testOneEpoch(self):
    files = self._CreateFiles()
    with self.test_session() as sess:
      reader = tf.FixedLengthRecordReader(
          header_bytes=self._header_bytes,
          record_bytes=self._record_bytes,
          footer_bytes=self._footer_bytes,
          name="test_reader")
      queue = tf.FIFOQueue(99, [tf.string], shapes=())
      key, value = reader.read(queue)

      queue.enqueue_many([files]).run()
      queue.close().run()
      for i in range(self._num_files):
        for j in range(self._num_records):
          k, v = sess.run([key, value])
          self.assertAllEqual("%s:%d" % (files[i], j), tf.compat.as_text(k))
          self.assertAllEqual(self._Record(i, j), v)

      with self.assertRaisesOpError("is closed and has insufficient elements "
                                    "\\(requested 1, current size 0\\)"):
        k, v = sess.run([key, value]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:24,代码来源:reader_ops_test.py

示例14: testWhileQueue_1

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def testWhileQueue_1(self):
    with self.test_session():
      q = tf.FIFOQueue(-1, tf.int32)
      i = tf.constant(0)

      def c(i):
        return tf.less(i, 10)

      def b(i):
        ni = tf.add(i, 1)
        ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
        return ni

      r = tf.while_loop(c, b, [i], parallel_iterations=1)
      self.assertEqual([10], r.eval())
      for i in xrange(10):
        self.assertEqual([i], q.dequeue().eval()) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:19,代码来源:control_flow_ops_py_test.py

示例15: testConstructorWithShapes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import FIFOQueue [as 别名]
def testConstructorWithShapes(self):
    with tf.Graph().as_default():
      q = tf.FIFOQueue(5, (tf.int32, tf.float32),
                       shapes=(tf.TensorShape([1, 1, 2, 3]),
                               tf.TensorShape([5, 8])), name="Q")
    self.assertTrue(isinstance(q.queue_ref, tf.Tensor))
    self.assertEquals(tf.string_ref, q.queue_ref.dtype)
    self.assertProtoEquals("""
      name:'Q' op:'FIFOQueue'
      attr { key: 'component_types' value { list {
        type: DT_INT32 type : DT_FLOAT
      } } }
      attr { key: 'shapes' value { list {
        shape { dim { size: 1 }
                dim { size: 1 }
                dim { size: 2 }
                dim { size: 3 } }
        shape { dim { size: 5 }
                dim { size: 8 } }
      } } }
      attr { key: 'capacity' value { i: 5 } }
      attr { key: 'container' value { s: '' } }
      attr { key: 'shared_name' value { s: '' } }
      """, q.queue_ref.op.node_def) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:26,代码来源:fifo_queue_test.py


注:本文中的tensorflow.FIFOQueue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。