本文整理汇总了Python中tensorflow.python.ops.data_flow_ops.FIFOQueue方法的典型用法代码示例。如果您正苦于以下问题:Python data_flow_ops.FIFOQueue方法的具体用法?Python data_flow_ops.FIFOQueue怎么用?Python data_flow_ops.FIFOQueue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.python.ops.data_flow_ops
的用法示例。
在下文中一共展示了data_flow_ops.FIFOQueue方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testDebugQueueOpsDoesNotoErrorOut
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_urls=self._debug_urls())
sess.run(q_init, options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
示例2: __init__
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def __init__(
self, dtypes, shapes=None, capacity=10, shared_name='feeding_queue'):
self._dtypes = dtypes
self._shapes = shapes
self._shared_name = shared_name
self._capacity = capacity
self._local_q = data_flow_ops.FIFOQueue(capacity=self._capacity,
dtypes=self._dtypes,
shapes=self._shapes,
name=self._shared_name,
shared_name=self._shared_name)
self._num_remote_feeds = 0
# Fake do-nothing operation that's used to prevent remote queues
# from being closed, and as a workaround for b/32749157
self._fake_op = array_ops.constant('dummy close', name='feeder_fake_op').op
self._feeding_event = threading.Event()
示例3: close
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes the barrier and the FIFOQueue.
This operation signals that no more segments of new sequences will be
enqueued. New segments of already inserted sequences may still be enqueued
and dequeued if there is a sufficient number filling a batch or
allow_small_batch is true. Otherwise dequeue operations will fail
immediately.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False`. If `True`, all pending enqueues to the underlying queues will
be cancelled, and completing already started sequences is not possible.
name: Optional name for the op.
Returns:
The operation that closes the barrier and the FIFOQueue.
"""
with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
barrier_close = self.barrier.close(cancel_pending_enqueues,
"BarrierClose")
fifo_queue_close = self._capacity_queue.close(cancel_pending_enqueues,
"FIFOClose")
return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
示例4: close
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def close(self, cancel_pending_enqueues=False, name=None):
"""Closes the barrier and the FIFOQueue.
This operation signals that no more segments of new sequences will be
enqueued. New segments of already inserted sequences may still be enqueued
and dequeued if there is a sufficient number filling a batch or
allow_small_batch is true. Otherwise dequeue operations will fail
immediately.
Args:
cancel_pending_enqueues: (Optional.) A boolean, defaulting to
`False`. If `True`, all pending enqueues to the underlying queues will
be cancelled, and completing already started sequences is not possible.
name: Optional name for the op.
Returns:
The operation that closes the barrier and the FIFOQueue.
"""
with ops.name_scope(name, "SQSSClose", [self._prefetch_op]) as name:
barrier_close = self.barrier.close(
cancel_pending_enqueues, "BarrierClose")
fifo_queue_close = self._capacity_queue.close(
cancel_pending_enqueues, "FIFOClose")
return control_flow_ops.group(barrier_close, fifo_queue_close, name=name)
示例5: create_queue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def create_queue(self, shared_name=None, name=None):
from tensorflow.python.ops import data_flow_ops, logging_ops, math_ops
from tensorflow.python.framework import dtypes
assert self.dtypes is not None and self.shapes is not None
assert len(self.dtypes) == len(self.shapes)
capacity = self.queue_size
self._queue = data_flow_ops.FIFOQueue(
capacity=capacity,
dtypes=self.dtypes,
shapes=self.shapes,
shared_name=shared_name,
name=name)
enq = self._queue.enqueue_many(self.batch_phs)
# create a queue runner
queue_runner.add_queue_runner(queue_runner.QueueRunner(
self._queue, [enq]*self.nthreads,
feed_dict_op=[lambda: self.next_batch()],
feed_dict_key=self.batch_phs))
# summary_name = 'fraction_of_%d_full' % capacity
# logging_ops.scalar_summary("queue/%s/%s" % (
# self._queue.name, summary_name), math_ops.cast(
# self._queue.size(), dtypes.float32) * (1. / capacity))
示例6: main
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def main(args):
with tf.Graph().as_default():
with tf.Session() as sess:
# Read the file containing the pairs used for testing
pairs = lfw.read_pairs(os.path.expanduser(args.lfw_pairs))
# Get the paths for the corresponding images
paths, actual_issame = lfw.get_paths(os.path.expanduser(args.lfw_dir), pairs)
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (args.image_size, args.image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder, labels_placeholder, control_placeholder], name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size, nrof_preprocess_threads, batch_size_placeholder)
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(args.model, input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
#
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
evaluate(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder, phase_train_placeholder, batch_size_placeholder, control_placeholder,
embeddings, label_batch, paths, actual_issame, args.lfw_batch_size, args.lfw_nrof_folds, args.distance_metric, args.subtract_mean,
args.use_flipped_images, args.use_fixed_image_standardization)
示例7: _which_queue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
示例8: testFIFOSharedQueue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def testFIFOSharedQueue(self):
shared_queue = data_flow_ops.FIFOQueue(
capacity=256, dtypes=[dtypes_lib.string, dtypes_lib.string])
self._verify_all_data_sources_read(shared_queue)
示例9: testReadUpToFromFIFOQueue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def testReadUpToFromFIFOQueue(self):
shared_queue = data_flow_ops.FIFOQueue(
capacity=99,
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []])
self._verify_read_up_to_out(shared_queue)
示例10: _which_queue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue
if dynamic_pad else data_flow_ops.FIFOQueue)
示例11: testTimeoutWithShortOperations
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def testTimeoutWithShortOperations(self):
num_epochs = 5
q = data_flow_ops.FIFOQueue(
capacity=50, dtypes=[dtypes.int32], shapes=[()])
enqueue_op = q.enqueue_many(constant_op.constant([1, 2]))
# Use a 10-second timeout, which should be longer than any
# non-blocking enqueue_many op.
config = config_pb2.ConfigProto(operation_timeout_in_ms=10000)
with session.Session(config=config) as sess:
for _ in range(num_epochs):
sess.run(enqueue_op)
self.assertEqual(sess.run(q.size()), num_epochs * 2)
示例12: _which_queue
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def _which_queue(dynamic_pad):
return (data_flow_ops.PaddingFIFOQueue if dynamic_pad
else data_flow_ops.FIFOQueue)
# Batching functions ----------------------------------------------------------
示例13: testDebugQueueOpsDoesNotoErrorOut
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
开发者ID:PacktPublishing,项目名称:Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda,代码行数:17,代码来源:session_debug_testlib.py
示例14: init_triplet_model
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def init_triplet_model():
global track_struct
global triplet_graph
global triplet_sess
global eval_enqueue_op
global image_paths_placeholder
global labels_placeholder
global phase_train_placeholder
global batch_size_placeholder
global control_placeholder
global embeddings
global label_batch
global distance_metric
f_image_size = 160
distance_metric = 0
triplet_graph = tf.Graph()
with triplet_graph.as_default():
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (f_image_size, f_image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder,
labels_placeholder, control_placeholder],
name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size,
nrof_preprocess_threads, batch_size_placeholder)
triplet_sess = tf.Session(graph=triplet_graph)
with triplet_sess.as_default():
with triplet_graph.as_default():
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(track_struct['file_path']['triplet_model'], input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=triplet_sess)
return
示例15: feature_extract
# 需要导入模块: from tensorflow.python.ops import data_flow_ops [as 别名]
# 或者: from tensorflow.python.ops.data_flow_ops import FIFOQueue [as 别名]
def feature_extract(feature_size, num_patch, max_length, patch_folder, triplet_model):
f_image_size = 160
distance_metric = 0
with tf.Graph().as_default():
with tf.Session() as sess:
image_paths_placeholder = tf.placeholder(tf.string, shape=(None,1), name='image_paths')
labels_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='labels')
batch_size_placeholder = tf.placeholder(tf.int32, name='batch_size')
control_placeholder = tf.placeholder(tf.int32, shape=(None,1), name='control')
phase_train_placeholder = tf.placeholder(tf.bool, name='phase_train')
nrof_preprocess_threads = 4
image_size = (f_image_size, f_image_size)
eval_input_queue = data_flow_ops.FIFOQueue(capacity=2000000,
dtypes=[tf.string, tf.int32, tf.int32],
shapes=[(1,), (1,), (1,)],
shared_name=None, name=None)
eval_enqueue_op = eval_input_queue.enqueue_many([image_paths_placeholder,
labels_placeholder, control_placeholder],
name='eval_enqueue_op')
image_batch, label_batch = facenet.create_input_pipeline(eval_input_queue, image_size,
nrof_preprocess_threads, batch_size_placeholder)
# Load the model
input_map = {'image_batch': image_batch, 'label_batch': label_batch, 'phase_train': phase_train_placeholder}
facenet.load_model(triplet_model, input_map=input_map)
# Get output tensor
embeddings = tf.get_default_graph().get_tensor_by_name("embeddings:0")
coord = tf.train.Coordinator()
tf.train.start_queue_runners(coord=coord, sess=sess)
fea_mat = np.zeros((num_patch,feature_size-4+2))
tracklet_list = os.listdir(patch_folder)
N_tracklet = len(tracklet_list)
cnt = 0
for n in range(N_tracklet):
tracklet_folder = patch_folder+'/'+tracklet_list[n]
patch_list = os.listdir(tracklet_folder)
# get patch list, track_id and fr_id, starts from 1
prev_cnt = cnt
for m in range(len(patch_list)):
# track_id
fea_mat[cnt,0] = n+1
# fr_id
fea_mat[cnt,1] = int(patch_list[m][-8:-4])
cnt = cnt+1
patch_list[m] = tracklet_folder+'/'+patch_list[m]
#print(n)
lfw_batch_size = len(patch_list)
emb_array = feature_encode(sess, eval_enqueue_op, image_paths_placeholder, labels_placeholder,
phase_train_placeholder,batch_size_placeholder, control_placeholder,
embeddings, label_batch, patch_list, lfw_batch_size, distance_metric)
fea_mat[prev_cnt:prev_cnt+lfw_batch_size,2:] = np.copy(emb_array)
return fea_mat