本文整理汇总了Python中tensorflow.python.training.queue_runner_impl.start_queue_runners函数的典型用法代码示例。如果您正苦于以下问题:Python start_queue_runners函数的具体用法?Python start_queue_runners怎么用?Python start_queue_runners使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了start_queue_runners函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_long_eval_discard_indivisible
def test_long_eval_discard_indivisible(self):
g = ops.Graph()
with g.as_default():
model = ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=2)
raw_features = {
TrainEvalFeatures.TIMES: [[1, 3, 5, 7, 11]],
TrainEvalFeatures.VALUES: [[[1.], [2.], [3.], [4.], [5.]]]}
model.initialize_graph()
raw_evaluation = model.define_loss(
raw_features, mode=estimator_lib.ModeKeys.EVAL)
with session.Session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.global_variables_initializer().run()
raw_evaluation_evaled = sess.run(raw_evaluation)
self.assertAllEqual([[7, 11]],
raw_evaluation_evaled.prediction_times)
for feature_name in raw_evaluation.predictions:
self.assertAllEqual(
[1, 2, 1], # batch, window, num_features. The window has two cut
# off for the first input window and one discarded so
# that the remainder is divisible into output windows.
raw_evaluation_evaled.predictions[feature_name].shape)
coordinator.request_stop()
coordinator.join()
示例2: _gap_test_template
def _gap_test_template(self, times, values):
random_model = RandomStateSpaceModel(
state_dimension=1, state_noise_dimension=1,
configuration=state_space_model.StateSpaceModelConfiguration(
num_features=1))
random_model.initialize_graph()
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader({
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
}))
features, _ = input_fn()
times = features[feature_keys.TrainEvalFeatures.TIMES]
values = features[feature_keys.TrainEvalFeatures.VALUES]
model_outputs = random_model.get_batch_loss(
features={
feature_keys.TrainEvalFeatures.TIMES: times,
feature_keys.TrainEvalFeatures.VALUES: values
},
mode=None,
state=math_utils.replicate_state(
start_state=random_model.get_start_state(),
batch_size=array_ops.shape(times)[0]))
with self.cached_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
示例3: testFeederActsLikeQueue
def testFeederActsLikeQueue(self):
# Tests that a feeder acts like a queue
feeder = feeder_lib.Feeder(
dtypes=[dtypes_lib.string, dtypes_lib.string],
shapes=[[], []],
capacity=10)
feeder.set_many_fed_tensors([
constant_op.constant(['a0', 'a1', 'a2']),
constant_op.constant(['b0', 'b1', 'b2'])
])
out_a, out_b = feeder.get_fed_tensors()
with self.test_session() as session:
coord = coordinator.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coord)
a, b = session.run([out_a, out_b])
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b)
a = session.run(out_a) # Omit b!
self.assertEquals(b'a1', a)
a, b = session.run([out_a, out_b])
self.assertEquals(b'a2', a)
self.assertEquals(b'b2', b) # queued together
a, b = session.run([out_a, out_b]) # loops around
self.assertEquals(b'a0', a)
self.assertEquals(b'b0', b) # queued together
coord.request_stop()
coord.join()
示例4: _test_pass_to_next
def _test_pass_to_next(self, read_offset, step, correct_offset):
stub_model = StubTimeSeriesModel(correct_offset=correct_offset)
data = self._make_test_data(
length=100 + read_offset, cut_start=None, cut_end=None, offset=100.,
step=step)
init_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(
{k: v[:-read_offset] for k, v in data.items()}))
result_input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(
{k: v[read_offset:] for k, v in data.items()}))
chainer = state_management.ChainingStateManager(
state_saving_interval=1)
stub_model.initialize_graph()
chainer.initialize_graph(model=stub_model)
init_model_outputs = chainer.define_loss(
model=stub_model, features=init_input_fn()[0],
mode=estimator_lib.ModeKeys.TRAIN)
result_model_outputs = chainer.define_loss(
model=stub_model, features=result_input_fn()[0],
mode=estimator_lib.ModeKeys.TRAIN)
with self.test_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
init_model_outputs.loss.eval()
returned_loss = result_model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
return returned_loss
示例5: _test_initialization
def _test_initialization(self, warmup_iterations, batch_size):
stub_model = StubTimeSeriesModel()
data = self._make_test_data(length=20, cut_start=None, cut_end=None,
offset=0.)
if batch_size == -1:
input_fn = test_utils.AllWindowInputFn(
input_pipeline.NumpyReader(data), window_size=10)
else:
input_fn = input_pipeline.RandomWindowInputFn(
input_pipeline.NumpyReader(data),
window_size=10,
batch_size=batch_size)
chainer = state_management.ChainingStateManager(
state_saving_interval=1)
features, _ = input_fn()
stub_model.initialize_graph()
chainer.initialize_graph(model=stub_model)
model_outputs = chainer.define_loss(
model=stub_model, features=features, mode=estimator_lib.ModeKeys.TRAIN)
with self.test_session() as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
for _ in range(warmup_iterations):
# Warm up saved state
model_outputs.loss.eval()
outputs = model_outputs.loss.eval()
coordinator.request_stop()
coordinator.join()
return outputs
示例6: _random_window_input_fn_test_template
def _random_window_input_fn_test_template(
self, time_series_reader, window_size, batch_size, num_features,
discard_out_of_order=False):
input_fn = input_pipeline.RandomWindowInputFn(
time_series_reader=time_series_reader,
window_size=window_size, batch_size=batch_size)
result, _ = input_fn()
init_op = variables.local_variables_initializer()
with self.cached_session() as session:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
session.run(init_op)
features = session.run(result)
coordinator.request_stop()
coordinator.join()
self.assertAllEqual([batch_size, window_size],
features[TrainEvalFeatures.TIMES].shape)
for window_position in range(window_size - 1):
for batch_position in range(batch_size):
# Checks that all times are contiguous
self.assertEqual(
features[TrainEvalFeatures.TIMES][batch_position,
window_position + 1],
features[TrainEvalFeatures.TIMES][batch_position,
window_position] + 1)
self.assertAllEqual([batch_size, window_size, num_features],
features[TrainEvalFeatures.VALUES].shape)
self.assertEqual("int64", features[TrainEvalFeatures.TIMES].dtype)
for feature_number in range(num_features):
self.assertAllEqual(
features[TrainEvalFeatures.TIMES] * 2. + feature_number,
features[TrainEvalFeatures.VALUES][:, :, feature_number])
return features
示例7: _all_window_input_fn_test_template
def _all_window_input_fn_test_template(
self, time_series_reader, num_samples, window_size,
original_numpy_features=None):
input_fn = test_utils.AllWindowInputFn(
time_series_reader=time_series_reader,
window_size=window_size)
features, _ = input_fn()
init_op = variables.local_variables_initializer()
with self.cached_session() as session:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
session.run(init_op)
chunked_times, chunked_values = session.run(
[features[TrainEvalFeatures.TIMES],
features[TrainEvalFeatures.VALUES]])
coordinator.request_stop()
coordinator.join()
self.assertAllEqual([num_samples - window_size + 1, window_size],
chunked_times.shape)
if original_numpy_features is not None:
original_times = original_numpy_features[TrainEvalFeatures.TIMES]
original_values = original_numpy_features[TrainEvalFeatures.VALUES]
self.assertAllEqual(original_times, numpy.unique(chunked_times))
self.assertAllEqual(original_values[chunked_times],
chunked_values)
示例8: _equivalent_to_single_model_test_template
def _equivalent_to_single_model_test_template(self, model_generator):
with self.cached_session() as session:
random_model = RandomStateSpaceModel(
state_dimension=5,
state_noise_dimension=4,
configuration=state_space_model.StateSpaceModelConfiguration(
dtype=dtypes.float64, num_features=1))
random_model.initialize_graph()
series_length = 10
model_data = random_model.generate(
number_of_series=1, series_length=series_length,
model_parameters=random_model.random_model_parameters())
input_fn = input_pipeline.WholeDatasetInputFn(
input_pipeline.NumpyReader(model_data))
features, _ = input_fn()
model_outputs = random_model.get_batch_loss(
features=features,
mode=None,
state=math_utils.replicate_state(
start_state=random_model.get_start_state(),
batch_size=array_ops.shape(
features[feature_keys.TrainEvalFeatures.TIMES])[0]))
variables.global_variables_initializer().run()
compare_outputs_evaled_fn = model_generator(
random_model, model_data)
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
compare_outputs_evaled = compare_outputs_evaled_fn(session)
model_outputs_evaled = session.run(
(model_outputs.end_state, model_outputs.predictions))
coordinator.request_stop()
coordinator.join()
model_posteriors, model_predictions = model_outputs_evaled
(_, compare_posteriors,
compare_predictions) = compare_outputs_evaled
(model_posterior_mean, model_posterior_var,
model_from_time) = model_posteriors
(compare_posterior_mean, compare_posterior_var,
compare_from_time) = compare_posteriors
self.assertAllClose(model_posterior_mean, compare_posterior_mean[0])
self.assertAllClose(model_posterior_var, compare_posterior_var[0])
self.assertAllClose(model_from_time, compare_from_time)
self.assertEqual(sorted(model_predictions.keys()),
sorted(compare_predictions.keys()))
for prediction_name in model_predictions:
if prediction_name == "loss":
# Chunking means that losses will be different; skip testing them.
continue
# Compare the last chunk to their corresponding un-chunked model
# predictions
last_prediction_chunk = compare_predictions[prediction_name][-1]
comparison_values = last_prediction_chunk.shape[0]
model_prediction = (
model_predictions[prediction_name][0, -comparison_values:])
self.assertAllClose(model_prediction,
last_prediction_chunk)
示例9: _input_statistics_test_template
def _input_statistics_test_template(
self, stat_object, num_features, dtype, give_full_data,
warmup_iterations=0, rtol=1e-6, data_length=500, chunk_size=4):
graph = ops.Graph()
with graph.as_default():
numpy_dtype = dtype.as_numpy_dtype
values = (
(numpy.arange(data_length, dtype=numpy_dtype)[..., None]
+ numpy.arange(num_features, dtype=numpy_dtype)[None, ...])[None])
times = 2 * (numpy.arange(data_length)[None]) - 3
if give_full_data:
stat_object.set_data((times, values))
features = {TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: values}
input_fn = input_pipeline.RandomWindowInputFn(
batch_size=16, window_size=chunk_size,
time_series_reader=input_pipeline.NumpyReader(features))
statistics = stat_object.initialize_graph(
features=input_fn()[0])
with self.session(graph=graph) as session:
variables.global_variables_initializer().run()
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(session, coord=coordinator)
for _ in range(warmup_iterations):
# A control dependency should ensure that, for queue-based statistics,
# a use of any statistic is preceded by an update of all adaptive
# statistics.
statistics.total_observation_count.eval()
self.assertAllClose(
range(num_features) + numpy.mean(numpy.arange(chunk_size))[None],
statistics.series_start_moments.mean.eval(),
rtol=rtol)
self.assertAllClose(
numpy.tile(numpy.var(numpy.arange(chunk_size))[None],
[num_features]),
statistics.series_start_moments.variance.eval(),
rtol=rtol)
self.assertAllClose(
numpy.mean(values[0], axis=0),
statistics.overall_feature_moments.mean.eval(),
rtol=rtol)
self.assertAllClose(
numpy.var(values[0], axis=0),
statistics.overall_feature_moments.variance.eval(),
rtol=rtol)
self.assertAllClose(
-3,
statistics.start_time.eval(),
rtol=rtol)
self.assertAllClose(
data_length,
statistics.total_observation_count.eval(),
rtol=rtol)
coordinator.request_stop()
coordinator.join()
示例10: testStartQueueRunnersRaisesIfNotASession
def testStartQueueRunnersRaisesIfNotASession(self):
zero64 = constant_op.constant(0, dtype=dtypes.int64)
var = variables.VariableV1(zero64)
count_up_to = var.count_up_to(3)
queue = data_flow_ops.FIFOQueue(10, dtypes.float32)
init_op = variables.global_variables_initializer()
qr = queue_runner_impl.QueueRunner(queue, [count_up_to])
queue_runner_impl.add_queue_runner(qr)
with self.cached_session():
init_op.run()
with self.assertRaisesRegexp(TypeError, "tf.Session"):
queue_runner_impl.start_queue_runners("NotASession")
示例11: testExtendAfterQueueRunners
def testExtendAfterQueueRunners(self):
server = self._cached_server
with session.Session(server.target) as sess:
input_queue = input_ops.input_producer(constant_op.constant(
[0.], dtype=dtypes.float32))
self.assertIsNotNone(input_queue)
var = variables.VariableV1(1., dtype=dtypes.float32, trainable=False,
name="var")
sess.run(variables.global_variables_initializer())
queue_runner_impl.start_queue_runners(sess)
sess.run(var.assign(3.0))
示例12: test_long_eval
def test_long_eval(self):
g = ops.Graph()
with g.as_default():
model = ARModel(periodicities=2,
num_features=1,
num_time_buckets=10,
input_window_size=2,
output_window_size=1)
raw_features = {
TrainEvalFeatures.TIMES: [[1, 3, 5, 7, 11]],
TrainEvalFeatures.VALUES: [[[1.], [2.], [3.], [4.], [5.]]]}
chunked_features, _ = test_utils.AllWindowInputFn(
time_series_reader=input_pipeline.NumpyReader(raw_features),
window_size=3)()
model.initialize_graph()
with variable_scope.variable_scope("armodel") as scope:
raw_evaluation = model.define_loss(
raw_features, mode=estimator_lib.ModeKeys.EVAL)
with variable_scope.variable_scope(scope, reuse=True):
chunked_evaluation = model.define_loss(
chunked_features, mode=estimator_lib.ModeKeys.EVAL)
with session.Session() as sess:
coordinator = coordinator_lib.Coordinator()
queue_runner_impl.start_queue_runners(sess, coord=coordinator)
variables.global_variables_initializer().run()
raw_evaluation_evaled, chunked_evaluation_evaled = sess.run(
[raw_evaluation, chunked_evaluation])
self.assertAllEqual(chunked_evaluation_evaled.loss,
raw_evaluation_evaled.loss)
last_chunk_evaluation_state = [
state[-1, None] for state in
chunked_evaluation_evaled.end_state]
for last_chunk_state_member, raw_state_member in zip(
last_chunk_evaluation_state, raw_evaluation_evaled.end_state):
self.assertAllEqual(last_chunk_state_member, raw_state_member)
self.assertAllEqual([[5, 7, 11]],
raw_evaluation_evaled.prediction_times)
for feature_name in raw_evaluation.predictions:
self.assertAllEqual(
[1, 3, 1], # batch, window, num_features. The window size has 2
# cut off for the first input_window.
raw_evaluation_evaled.predictions[feature_name].shape)
self.assertAllEqual(
np.reshape(chunked_evaluation_evaled.predictions[feature_name],
[-1]),
np.reshape(raw_evaluation_evaled.predictions[feature_name],
[-1]))
coordinator.request_stop()
coordinator.join()
示例13: testNotAMultiple
def testNotAMultiple(self):
num_unroll = 3 # Not a divisor of value_length -
# so padding would have been necessary.
with self.test_session() as sess:
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
".*should be a multiple of: 3, but saw "
"value: 4. Consider setting pad=True."):
coord = coordinator.Coordinator()
threads = None
try:
with coord.stop_on_exception():
next_batch = sqss.batch_sequences_with_states(
input_key=self.key,
input_sequences=self.sequences,
input_context=self.context,
input_length=3,
initial_states=self.initial_states,
num_unroll=num_unroll,
batch_size=self.batch_size,
num_threads=3,
# to enforce that we only move on to the next examples after
# finishing all segments of the first ones.
capacity=2,
pad=False)
threads = queue_runner_impl.start_queue_runners(coord=coord)
sess.run([next_batch.key])
except errors_impl.OutOfRangeError:
pass
finally:
coord.request_stop()
if threads is not None:
coord.join(threads, stop_grace_period_secs=2)
示例14: testPandasFeeding
def testPandasFeeding(self):
if not HAS_PANDAS:
return
with ops.Graph().as_default():
array1 = np.arange(32)
array2 = np.arange(32, 64)
df = pd.DataFrame({"a": array1, "b": array2}, index=np.arange(64, 96))
q = ff._enqueue_data(df, capacity=100)
batch_size = 5
dq_op = q.dequeue_many(5)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(100):
indices = [
j % array1.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = df.index[indices]
expected_rows = df.iloc[indices]
dq = sess.run(dq_op)
np.testing.assert_array_equal(expected_df_indices, dq[0])
for col_num, col in enumerate(df.columns):
np.testing.assert_array_equal(expected_rows[col].values,
dq[col_num + 1])
coord.request_stop()
coord.join(threads)
示例15: test_keyed_features_filter
def test_keyed_features_filter(self):
gfile.Glob = self._orig_glob
lines = [
'{"features": {"feature": {"age": {"int64_list": {"value": [2]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [1]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [0]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [3]}}}}}',
'{"features": {"feature": {"age": {"int64_list": {"value": [5]}}}}}'
]
filename = self._create_temp_file("\n".join(lines))
batch_size = 2
queue_capacity = 4
name = "my_batch"
features = {"age": parsing_ops.FixedLenFeature([], dtypes_lib.int64)}
def filter_fn(keys, examples_json):
del keys
serialized = parsing_ops.decode_json_example(examples_json)
examples = parsing_ops.parse_example(serialized, features)
return math_ops.less(examples["age"], 2)
with ops.Graph().as_default() as g, self.session(graph=g) as session:
keys, inputs = graph_io._read_keyed_batch_examples_helper(
filename,
batch_size,
reader=io_ops.TextLineReader,
randomize_input=False,
num_epochs=1,
read_batch_size=batch_size,
queue_capacity=queue_capacity,
filter_fn=filter_fn,
name=name)
self.assertAllEqual((None,), keys.get_shape().as_list())
self.assertAllEqual((None,), inputs.get_shape().as_list())
session.run(variables.local_variables_initializer())
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(session, coord=coord)
# First batch of two filtered examples.
out_keys, out_vals = session.run((keys, inputs))
self.assertAllEqual(
[filename.encode("utf-8") + b":2", filename.encode("utf-8") + b":3"],
out_keys)
self.assertAllEqual([lines[1].encode("utf-8"), lines[2].encode("utf-8")],
out_vals)
# Second batch will only have one filtered example as that's the only
# remaining example that satisfies the filtering criterion.
out_keys, out_vals = session.run((keys, inputs))
self.assertAllEqual([filename.encode("utf-8") + b":4"], out_keys)
self.assertAllEqual([lines[3].encode("utf-8")], out_vals)
# Exhausted input.
with self.assertRaises(errors.OutOfRangeError):
session.run((keys, inputs))
coord.request_stop()
coord.join(threads)