本文整理汇总了Python中tensorflow.python.keras.utils.generic_utils.Progbar类的典型用法代码示例。如果您正苦于以下问题:Python Progbar类的具体用法?Python Progbar怎么用?Python Progbar使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Progbar类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: on_epoch_begin
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics)
self.seen = 0
示例2: predict_generator
def predict_generator(model,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""See docstring for `Model.predict_generator`."""
if not context.executing_eagerly():
model._make_predict_function()
steps_done = 0
all_outs = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps is None:
if is_sequence:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter_sequence_infinite(generator)
else:
output_generator = generator
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if isinstance(generator_output, tuple):
# Compatibility with the generators
# used for training.
if len(generator_output) == 2:
x, _ = generator_output
elif len(generator_output) == 3:
x, _, _ = generator_output
else:
raise ValueError('Output of generator should be '
'a tuple `(x, y, sample_weight)` '
'or `(x, y)`. Found: ' + str(generator_output))
else:
# Assumes a generator that only
# yields inputs (not targets and sample weights).
x = generator_output
outs = model.predict_on_batch(x)
if not isinstance(outs, list):
outs = [outs]
if not all_outs:
for out in outs:
all_outs.append([])
for i, out in enumerate(outs):
all_outs[i].append(out)
steps_done += 1
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if len(all_outs) == 1:
if steps_done == 1:
return all_outs[0][0]
else:
return np.concatenate(all_outs[0])
if steps_done == 1:
return [out[0] for out in all_outs]
else:
return [np.concatenate(out) for out in all_outs]
示例3: evaluate_generator
def evaluate_generator(model,
generator,
steps=None,
max_queue_size=10,
workers=1,
use_multiprocessing=False,
verbose=0):
"""See docstring for `Model.evaluate_generator`."""
if not context.executing_eagerly():
model._make_test_function()
if hasattr(model, 'metrics'):
for m in model.stateful_metric_functions:
m.reset_states()
steps_done = 0
all_outs = []
batch_sizes = []
is_sequence = isinstance(generator, Sequence)
if not is_sequence and use_multiprocessing and workers > 1:
logging.warning(
UserWarning('Using a generator with `use_multiprocessing=True`'
' and multiple workers may duplicate your data.'
' Please consider using the`keras.utils.Sequence'
' class.'))
if steps is None:
if is_sequence:
steps = len(generator)
else:
raise ValueError('`steps=None` is only valid for a generator'
' based on the `keras.utils.Sequence` class.'
' Please specify `steps` or use the'
' `keras.utils.Sequence` class.')
enqueuer = None
try:
if workers > 0:
if is_sequence:
enqueuer = OrderedEnqueuer(
generator, use_multiprocessing=use_multiprocessing)
else:
enqueuer = GeneratorEnqueuer(
generator,
use_multiprocessing=use_multiprocessing)
enqueuer.start(workers=workers, max_queue_size=max_queue_size)
output_generator = enqueuer.get()
else:
if is_sequence:
output_generator = iter_sequence_infinite(generator)
else:
output_generator = generator
if verbose == 1:
progbar = Progbar(target=steps)
while steps_done < steps:
generator_output = next(output_generator)
if not hasattr(generator_output, '__len__'):
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
raise ValueError('Output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
outs = model.test_on_batch(x, y, sample_weight=sample_weight)
if isinstance(x, list):
batch_size = x[0].shape[0]
elif isinstance(x, dict):
batch_size = list(x.values())[0].shape[0]
else:
batch_size = x.shape[0]
if batch_size == 0:
raise ValueError('Received an empty batch. '
'Batches should at least contain one item.')
all_outs.append(outs)
steps_done += 1
batch_sizes.append(batch_size)
if verbose == 1:
progbar.update(steps_done)
finally:
if enqueuer is not None:
enqueuer.stop()
if not isinstance(outs, list):
return np.average(np.asarray(all_outs), weights=batch_sizes)
else:
averages = [float(all_outs[-1][0])] # index 0 = 'loss'
averages.extend([
np.average([out[i]
for out in all_outs], weights=batch_sizes)
for i in range(1, len(outs))
#.........这里部分代码省略.........
示例4: _experimental_predict_loop
def _experimental_predict_loop(model, iterator, verbose=0, steps=None):
"""Predict loop for predicting with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
K.get_session().run(current_strategy.initialize())
# TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
K.set_learning_phase(0)
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
def step_fn(ctx, *inputs):
"""Clones the model and calls make_predict_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
clone_model_on_replicas(
model,
current_strategy,
make_callback_model=False,
inputs=inputs,
mode=_Mode.PREDICT)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_predict_function, args=(model._grouped_model_predict,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
for label, output in zip(model.output_names, combined_fn.outputs):
ctx.set_last_step_output(label, output)
return combined_fn.updates_op
# Add initial dummy values for outputs.
initial_loop_values = {}
batch_dimension = distributed_training_utils.get_batch_dimension(iterator)
for name, tensor in zip(model.output_names, model.outputs):
# TODO(priyag): This is a workaround as we do not know the batch dimension
# of the model's output at this point.
shape = tensor_shape.TensorShape(tensor.shape.dims)
shape.dims = [batch_dimension] + shape.dims[1:]
initial_loop_values[name] = array_ops.zeros(shape, tensor.dtype)
with current_strategy.scope():
# TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed.
ctx = current_strategy.run_steps_on_dataset(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
predict_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model_predict)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = [[] for _ in model.outputs]
for step in range(steps):
_, batch_outs = K.get_session().run([predict_op, output_tensors])
# TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
for i, label in enumerate(model.output_names):
#.........这里部分代码省略.........
示例5: predict_loop
def predict_loop(model, iterator, verbose=0, steps=None):
"""Predict loop for predicting with DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
# TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
if current_strategy.__class__.__name__ == 'TPUStrategy':
return _experimental_predict_loop(model, iterator, verbose, steps)
if not model._grouped_model:
clone_model_on_replicas(model, current_strategy)
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
inputs, _, _ = _get_input_from_iterator(iterator, model)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_predict_function, args=(model._grouped_model,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
distributed_predict_function = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
if not isinstance(K.learning_phase(), int):
ins = dataset_inputs + [0]
else:
ins = dataset_inputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
num_replicas = current_strategy.num_replicas_in_sync
# Since we do not know how many samples we will see, we cannot
# pre-allocate the returned Numpy arrays. Instead, we store one array per
# batch seen and concatenate them upon returning.
unconcatenated_outs = []
assert steps is not None
for step in range(steps):
batch_outs = distributed_predict_function(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
# batch_outs gives you the number of model outputs. In the distributed
# case this will be number of model_outputs * num_replicas.
for _ in range(len(model.outputs)):
unconcatenated_outs.append([])
for i in range(len(model.outputs)):
nested_outs = batch_outs[i * num_replicas:
i * num_replicas + num_replicas]
outs = nest.flatten(nested_outs)
unconcatenated_outs[i].extend(outs)
if verbose >= 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
示例6: _experimental_test_loop
def _experimental_test_loop(model, iterator, verbose=0, steps=None,
initialize_finalize_strategy=True):
"""Test loop for evaluating with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
initialize_finalize_strategy: Should the strategy initialize and finalize
functions be called.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
current_strategy = model._distribution_strategy
if initialize_finalize_strategy:
K.get_session().run(current_strategy.initialize())
def _per_device_eval_function(model):
model._make_eval_function()
return (model._eval_function.inputs, model._eval_function.outputs,
model._eval_function.updates_op,
model._eval_function.session_kwargs)
# TODO(priyag, sourabhbajaj): This should likely not be hardcoded here.
K.set_learning_phase(0)
def step_fn(ctx, inputs, targets):
"""Clones the model and calls make_eval_function."""
# TODO(priyag, sourabhbajaj): The model gets cloned every time
# fit/test/predict is called. We should look into caching this keyed on
# input shapes.
clone_model_on_replicas(
model,
current_strategy,
make_callback_model=False,
inputs=inputs,
targets=targets,
mode=_Mode.TEST)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_eval_function, args=(model._grouped_model_test,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
for label, output in zip(model.metrics_names, combined_fn.outputs):
if label == 'loss':
aggregation = distribute_lib.get_loss_reduction()
else:
# We aggregate all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
aggregation = variable_scope.VariableAggregation.MEAN
ctx.set_last_step_output(label, output, aggregation)
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name, tensor in zip(model.metrics_names[1:], model.metrics_tensors):
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
with current_strategy.scope():
# TODO(priyag): Use steps_per_run when we use new metrics as they will
# allow handling metric computation at each step using variables.
ctx = current_strategy.run_steps_on_dataset(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
test_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model_test)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
outs = [0.] * len(model.metrics_names)
#.........这里部分代码省略.........
示例7: test_loop
def test_loop(model, iterator, verbose=0, steps=None):
"""Test loop for evaluating with DistributionStrategy.
Arguments:
model: Keras Model instance.
iterator: Iterator for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
current_strategy = model._distribution_strategy
# TODO(priyag, sourabhbajaj): Remove this when the codepaths are merged.
if current_strategy.__class__.__name__ == 'TPUStrategy':
return _experimental_test_loop(model, iterator, verbose, steps)
if not model._grouped_model:
clone_model_on_replicas(model, current_strategy)
def _per_device_eval_function(model):
model._make_eval_function()
return (model._eval_function.inputs, model._eval_function.outputs,
model._eval_function.updates_op,
model._eval_function.session_kwargs)
inputs, targets, sample_weights = _get_input_from_iterator(iterator, model)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_replica(
_per_device_eval_function, args=(model._grouped_model,))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args, with_loss_tensor=True)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
distributed_test_function = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(
len(model.outputs) * current_strategy.num_replicas_in_sync)]
if not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [0]
else:
ins = dataset_inputs + dataset_targets
for m in model.stateful_metric_functions:
m.reset_states()
outs = []
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
assert steps is not None
for step in range(steps):
batch_outs = distributed_test_function(ins)
if isinstance(batch_outs, list):
if step == 0:
outs = [0.] * len(batch_outs)
outs[0] += batch_outs[0] # index 0 = 'loss'
outs[1:] = batch_outs[1:]
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs # index 0 = 'loss'
if verbose >= 1:
progbar.update(step + 1)
outs[0] /= steps # index 0 = 'loss'
if len(outs) == 1:
return outs[0]
return outs
示例8: predict_loop
def predict_loop(model, inputs, batch_size=32, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
model._make_predict_function()
f = model.predict_function
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + [0]
else:
ins = inputs
num_samples = training_utils.check_num_samples(
inputs, batch_size, steps, 'steps')
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
indices_for_conversion_to_dense = []
for i in range(len(model._feed_inputs)):
if (issparse is not None and issparse(inputs[i]) and
not K.is_sparse(model._feed_inputs[i])):
indices_for_conversion_to_dense.append(i)
if steps is not None:
# Step-based predictions.
# Since we do not know how many samples
# we will see, we cannot pre-allocate
# the returned Numpy arrays.
# Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = f(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for batch_out in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
else:
# Sample-based predictions.
outs = []
batches = make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if ins and isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
batch_outs = f(ins_batch)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if batch_index == 0:
# Pre-allocate the results arrays.
for batch_out in batch_outs:
shape = (num_samples,) + batch_out.shape[1:]
outs.append(np.zeros(shape, dtype=batch_out.dtype))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
示例9: ProgbarLogger
class ProgbarLogger(Callback):
"""Callback that prints metrics to stdout.
Arguments:
count_mode: One of "steps" or "samples".
Whether the progress bar should
count samples seen or steps (batches) seen.
stateful_metrics: Iterable of string names of metrics that
should *not* be averaged over an epoch.
Metrics in this list will be logged as-is.
All others will be averaged over time (e.g. loss, etc).
Raises:
ValueError: In case of invalid `count_mode`.
"""
def __init__(self, count_mode='samples', stateful_metrics=None):
super(ProgbarLogger, self).__init__()
if count_mode == 'samples':
self.use_steps = False
elif count_mode == 'steps':
self.use_steps = True
else:
raise ValueError('Unknown `count_mode`: ' + str(count_mode))
self.stateful_metrics = set(stateful_metrics or [])
def on_train_begin(self, logs=None):
self.verbose = self.params['verbose']
self.epochs = self.params['epochs']
def on_epoch_begin(self, epoch, logs=None):
if self.verbose:
print('Epoch %d/%d' % (epoch + 1, self.epochs))
if self.use_steps:
target = self.params['steps']
else:
target = self.params['samples']
self.target = target
self.progbar = Progbar(
target=self.target,
verbose=self.verbose,
stateful_metrics=self.stateful_metrics)
self.seen = 0
def on_batch_begin(self, batch, logs=None):
if self.seen < self.target:
self.log_values = []
def on_batch_end(self, batch, logs=None):
logs = logs or {}
batch_size = logs.get('size', 0)
if self.use_steps:
self.seen += 1
else:
self.seen += batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
# Skip progbar update for the last batch;
# will be handled by on_epoch_end.
if self.verbose and self.seen < self.target:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
示例10: experimental_tpu_test_loop
def experimental_tpu_test_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Test loop for evaluating with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
mode = ModeKeys.TEST
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset,
current_strategy)
steps = training_utils.infer_steps_for_dataset(dataset, steps,
steps_name='steps')
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
out_labels = model.metrics_names
step_fn = _make_step_fn(model, ModeKeys.TEST, current_strategy, out_labels)
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_stateful_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
# TODO(priyag): Use steps_per_run when we use new metrics as they will
# allow handling metric computation at each step using variables.
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
test_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
distributed_training_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=ModeKeys.TEST)
callbacks._call_begin_hook(mode)
outs = [0.] * len(model.metrics_names)
if steps is not None:
target_steps = steps
else:
target_steps = np.inf
current_step = 0
while current_step < target_steps:
batch_logs = {'batch': current_step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', current_step, batch_logs)
try:
_, batch_outs = K.batch_get_value([test_op, output_tensors])
except errors.OutOfRangeError:
if steps is not None:
warning_msg = 'Make sure that your dataset can generate at least '
'`steps` batches (in this case, {} batches).'.format(steps)
else:
warning_msg = 'Number of steps ran: {} steps'.format(current_step)
logging.warning('Your dataset iterator ran out of data; '
'interrupting evaluation. ' + warning_msg)
target_steps = current_step
break
for i, label in enumerate(model.metrics_names):
if i == 0:
# Loss is stateless metrics.
outs[i] += batch_outs[label]
else:
# For all stateful metrics, the aggregation is handled by mirrored vars.
#.........这里部分代码省略.........
示例11: predict_loop
def predict_loop(model, inputs, verbose=0, steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: list of tensors to be fed to `f`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring `_predict_loop` finished.
Ignored with the default value of `None`.
Returns:
Array of predictions (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
"""
current_strategy = model._distribution_strategy
def _per_device_predict_function(model):
model._make_predict_function()
return (model.predict_function.inputs,
model.predict_function.outputs,
model.predict_function.updates_op,
model.predict_function.session_kwargs)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_predict_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
distributed_predict_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + [0]
else:
ins = dataset_inputs
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
if steps is not None:
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = []
for step in range(steps):
batch_outs = distributed_predict_function(ins)
if not isinstance(batch_outs, list):
batch_outs = [batch_outs]
if step == 0:
for _ in batch_outs:
unconcatenated_outs.append([])
for i, batch_out in enumerate(batch_outs):
unconcatenated_outs[i].append(batch_out)
if verbose == 1:
progbar.update(step + 1)
if len(unconcatenated_outs) == 1:
return np.concatenate(unconcatenated_outs[0], axis=0)
return [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
示例12: test_loop
def test_loop(model, inputs, targets, verbose=0, steps=None):
"""evaluate method to validate a model that uses DistributionStrategy.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
current_strategy = model._distribution_strategy
def _per_device_test_function(model):
model._make_test_function()
return (model.test_function.inputs,
model.test_function.outputs,
model.test_function.updates_op,
model.test_function.session_kwargs)
with current_strategy.scope():
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.call_for_each_tower(
_per_device_test_function, model._grouped_model)
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args, with_loss_tensor=True)
dataset_inputs = distributed_training_utils.flatten_perdevice_values(
current_strategy, inputs)
dataset_targets = distributed_training_utils.flatten_perdevice_values(
current_strategy, targets)
distributed_test_function = K.Function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
# We need to set sample_weights to None since there are sample weight
# placeholders that are created with default values.
sample_weights = [None for _ in range(len(model.outputs) *
current_strategy.num_towers)]
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = dataset_inputs + dataset_targets + sample_weights + [0]
else:
ins = dataset_inputs + dataset_targets
outs = []
if verbose == 1:
progbar = Progbar(target=steps)
# Copy the weights from the original model to each of the replicated models.
orig_model_weights = model.get_weights()
with current_strategy.scope():
distributed_model = current_strategy.unwrap(model._grouped_model)[0]
distributed_training_utils.set_weights(
current_strategy, distributed_model, orig_model_weights)
if steps is not None:
for step in range(steps):
batch_outs = distributed_test_function(ins)
batch_outs = _aggregate_metrics_across_towers(
len(current_strategy._devices), model.metrics_names, batch_outs)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step + 1)
for i in range(len(outs)):
outs[i] /= steps
if len(outs) == 1:
return outs[0]
return outs
示例13: experimental_tpu_test_loop
def experimental_tpu_test_loop(model,
dataset,
verbose=0,
steps=None,
callbacks=None):
"""Test loop for evaluating with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset for input data.
verbose: Integer, Verbosity mode 0 or 1.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
callbacks: List of callbacks to be called during training
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the outputs.
"""
mode = ModeKeys.TEST
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset, current_strategy)
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=0)
scope.__enter__()
def _per_device_eval_function(model):
model._make_eval_function()
return (model._eval_function.inputs, model._eval_function.outputs,
model._eval_function.updates_op,
model._eval_function.session_kwargs)
def step_fn(ctx, inputs):
"""Clones the model and calls make_eval_function."""
inputs, targets = inputs
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, current_strategy, mode=mode, inputs=inputs, targets=targets)
else:
distributed_training_utils._build_distributed_network(
model, current_strategy, mode, inputs, targets)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.extended.call_for_each_replica(
_per_device_eval_function,
args=(distributed_training_utils.get_distributed_model(
model, ModeKeys.TEST),))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_test_function',
**all_session_args)
for label, output in zip(model.metrics_names, combined_fn.outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_stateful_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
# TODO(priyag): Use steps_per_run when we use new metrics as they will
# allow handling metric computation at each step using variables.
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
test_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
distributed_training_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
#.........这里部分代码省略.........
示例14: test_loop
def test_loop(model,
inputs,
targets,
sample_weights=None,
batch_size=None,
verbose=0,
steps=None):
"""Abstract method to loop over some data in batches.
Arguments:
model: Keras Model instance.
inputs: List of input arrays.
targets: List of target arrays.
sample_weights: Optional list of sample weight arrays.
batch_size: integer batch size or `None`.
verbose: verbosity mode.
steps: Total number of steps (batches of samples)
before declaring predictions finished.
Ignored with the default value of `None`.
Returns:
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
"""
model._make_test_function()
f = model.test_function
sample_weights = sample_weights or []
if model.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = inputs + targets + sample_weights + [0]
else:
ins = inputs + targets + sample_weights
if hasattr(model, 'metrics'):
for m in model.stateful_metric_functions:
m.reset_states()
stateful_metric_indices = [
i for i, name in enumerate(model.metrics_names)
if str(name) in model.stateful_metric_names
]
else:
stateful_metric_indices = []
num_samples = training_utils.check_num_samples(
ins, batch_size, steps, 'steps')
outs = []
if verbose == 1:
if steps is not None:
progbar = Progbar(target=steps)
else:
progbar = Progbar(target=num_samples)
# To prevent a slowdown, we find beforehand the arrays that need conversion.
feed = model._feed_inputs + model._feed_targets + model._feed_sample_weights
indices_for_conversion_to_dense = []
for i in range(len(feed)):
if issparse is not None and issparse(ins[i]) and not K.is_sparse(feed[i]):
indices_for_conversion_to_dense.append(i)
if steps is not None:
for step in range(steps):
batch_outs = f(ins)
if isinstance(batch_outs, list):
if step == 0:
for _ in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
if i in stateful_metric_indices:
outs[i] = batch_out
else:
outs[i] += batch_out
else:
if step == 0:
outs.append(0.)
outs[0] += batch_outs
if verbose == 1:
progbar.update(step + 1)
for i in range(len(outs)):
if i not in stateful_metric_indices:
outs[i] /= steps
else:
batches = make_batches(num_samples, batch_size)
index_array = np.arange(num_samples)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if isinstance(ins[-1], int):
# Do not slice the training phase flag.
ins_batch = slice_arrays(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_arrays(ins, batch_ids)
for i in indices_for_conversion_to_dense:
ins_batch[i] = ins_batch[i].toarray()
batch_outs = f(ins_batch)
if isinstance(batch_outs, list):
if batch_index == 0:
outs.extend([0.] * len(batch_outs))
#.........这里部分代码省略.........
示例15: experimental_tpu_predict_loop
#.........这里部分代码省略.........
"""Clones the model and calls make_predict_function."""
if model._compile_distribution:
distributed_training_utils.clone_model_on_replicas(
model, current_strategy, mode, inputs=inputs)
else:
distributed_training_utils._build_distributed_network(
model, current_strategy, mode, inputs)
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = current_strategy.extended.call_for_each_replica(
_per_device_predict_function,
args=(distributed_training_utils.get_distributed_model(
model, ModeKeys.PREDICT),))
(all_inputs, all_outputs, all_updates,
all_session_args) = distributed_training_utils.unwrap_values(
current_strategy, grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args)
combined_fn = K.function(
all_inputs, all_outputs,
updates=all_updates,
name='distributed_predict_function',
**all_session_args)
for label, output in zip(model.output_names, combined_fn.outputs):
ctx.set_last_step_output(label, output)
return combined_fn.updates_op
# Add initial dummy values for outputs.
initial_loop_values = {}
batch_dimension = distributed_training_utils.get_batch_dimension(iterator)
for name, tensor in zip(model.output_names, model.outputs):
# TODO(priyag): This is a workaround as we do not know the batch dimension
# of the model's output at this point.
shape = tensor_shape.TensorShape(tensor.shape.dims)
shape.dims = [batch_dimension] + shape.dims[1:]
initial_loop_values[name] = array_ops.zeros(shape, tensor.dtype)
# TODO(priyag, sourabhbajaj): Support steps_per_run if/when we add outfeed.
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=1,
initial_loop_values=initial_loop_values)
predict_op = ctx.run_op
output_tensors = ctx.last_step_outputs
if verbose == 1:
progbar = Progbar(target=steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
distributed_training_utils._reset_metrics(model)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=False,
epochs=1,
steps_per_epoch=steps,
verbose=verbose,
count_mode='steps',
mode=mode)
callbacks._call_begin_hook(mode)
assert steps is not None
# Since we do not know how many samples we will see, we cannot pre-allocate
# the returned Numpy arrays. Instead, we store one array per batch seen
# and concatenate them upon returning.
unconcatenated_outs = [[] for _ in model.outputs]
for step in range(steps):
batch_logs = {'batch': step, 'size': 1}
callbacks._call_batch_hook(mode, 'begin', step, batch_logs)
_, batch_outs = K.get_session().run([predict_op, output_tensors])
# TODO(priyag): maybe need to unwrap the outputs first for MirroredStrategy.
for i, label in enumerate(model.output_names):
unconcatenated_outs[i].extend(batch_outs[label])
batch_logs = cbks.make_logs(model, batch_logs, batch_outs, mode)
callbacks._call_batch_hook(mode, 'end', step, batch_logs)
if verbose >= 1:
progbar.update(step + 1)
callbacks._call_end_hook(mode)
scope.__exit__(None, None, None)
if len(unconcatenated_outs) == 1:
prediction_result = np.concatenate(unconcatenated_outs[0], axis=0)
else:
prediction_result = [
np.concatenate(unconcatenated_outs[i], axis=0)
for i in range(len(unconcatenated_outs))
]
if padding_handler:
prediction_result = padding_handler.apply_mask(prediction_result)
return prediction_result