本文整理汇总了Python中tensorflow.python.ops.data_flow_ops.PaddingFIFOQueue方法的典型用法代码示例。如果您正苦于以下问题:Python data_flow_ops.PaddingFIFOQueue方法的具体用法?Python data_flow_ops.PaddingFIFOQueue怎么用?Python data_flow_ops.PaddingFIFOQueue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.data_flow_ops
的用法示例。
在下文中一共展示了data_flow_ops.PaddingFIFOQueue方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _which_queue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import PaddingFIFOQueue [as 别名]
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
示例2: _which_queue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import PaddingFIFOQueue [as 别名]
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue
if dynamic_pad else data_flow_ops.FIFOQueue)
示例3: _which_queue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import PaddingFIFOQueue [as 别名]
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
# Batching functions ----------------------------------------------------------
示例4: testDynamicPad
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import PaddingFIFOQueue [as 别名]
def testDynamicPad(self):
with self.cached_session() as sess:
# Create 3 tensors of variable but compatible shapes.
var_shape = [None, 2]
p1 = constant_op.constant([[1, 2], [3, 4]])
p1.set_shape(var_shape)
p2 = constant_op.constant([[5, 6], [7, 8], [9, 10]])
p2.set_shape(var_shape)
p3 = constant_op.constant([[11, 12]])
p3.set_shape(var_shape)
batch = [p1, p2, p3]
batch_size = len(batch)
zero64 = constant_op.constant(0, dtype=dtypes.int64)
examples = variables.Variable(zero64)
counter = examples.count_up_to(batch_size)
# Create a PaddingFIFOQueue to enqueue these tensors.
q = data_flow_ops.PaddingFIFOQueue(
capacity=10, dtypes=[dtypes.int32], shapes=[var_shape])
for tensor in [p1, p2, p3]:
q.enqueue([tensor]).run()
# Dequeue from the queue and batch them using batch().
batches = input_lib.batch([q.dequeue(), counter], batch_size=batch_size,
num_threads=1, dynamic_pad=True)
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
# Finally, assemble them into prefetch_queue with dynamic_pad.
batcher = prefetch_queue.prefetch_queue(batches, dynamic_pad=True)
batches = batcher.dequeue()
self.assertEqual([batch_size, None, 2], batches[0].shape.as_list())
variables.global_variables_initializer().run()
threads = queue_runner_impl.start_queue_runners()
values, _ = sess.run(batches)
# We enqueued 3 tensors of [None, 2] shapes, so using dynamic_pad
# they should be padded to the fixed size [3, 3, 2], where 3
# is the maximum length of the batch.
self.assertTrue(np.array_equal(
np.array([[[1, 2], [3, 4], [0, 0]],
[[5, 6], [7, 8], [9, 10]],
[[11, 12], [0, 0], [0, 0]]]),
values))
with self.assertRaises(errors_impl.OutOfRangeError):
sess.run(batches)
for thread in threads:
thread.join()