本文整理汇总了Python中tensorflow.python.keras.backend.variable函数的典型用法代码示例。如果您正苦于以下问题:Python variable函数的具体用法?Python variable怎么用?Python variable使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了variable函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_merge_subtract
def test_merge_subtract(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
subtract_layer = keras.layers.Subtract()
o = subtract_layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 - x2, atol=1e-4)
self.assertEqual(subtract_layer.compute_mask([i1, i2], [None, None]), None)
self.assertTrue(
np.all(
K.eval(
subtract_layer.compute_mask(
[i1, i2], [K.variable(x1), K.variable(x2)]))))
with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
subtract_layer.compute_mask([i1, i2], x1)
with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
subtract_layer.compute_mask(i1, [None, None])
with self.assertRaisesRegexp(ValueError,
"layer should be called on exactly 2 inputs"):
subtract_layer([i1, i2, i3])
with self.assertRaisesRegexp(ValueError,
"layer should be called on exactly 2 inputs"):
subtract_layer([i1])
示例2: test_metrics
def test_metrics(self):
with self.test_session():
y_a = K.variable(np.random.random((6, 7)))
y_b = K.variable(np.random.random((6, 7)))
for metric in [metrics.binary_accuracy, metrics.categorical_accuracy]:
output = metric(y_a, y_b)
self.assertEqual(K.eval(output).shape, (6,))
示例3: test_merge_concatenate
def test_merge_concatenate(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
concat_layer = keras.layers.Concatenate(axis=1)
o = concat_layer([i1, i2])
self.assertListEqual(o.shape.as_list(), [None, 8, 5])
model = keras.models.Model([i1, i2], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
out = model.predict([x1, x2])
self.assertEqual(out.shape, (2, 8, 5))
self.assertAllClose(out, np.concatenate([x1, x2], axis=1), atol=1e-4)
self.assertEqual(concat_layer.compute_mask([i1, i2], [None, None]), None)
self.assertTrue(
np.all(
K.eval(
concat_layer.compute_mask(
[i1, i2], [K.variable(x1), K.variable(x2)]))))
with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
concat_layer.compute_mask([i1, i2], x1)
with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
concat_layer.compute_mask(i1, [None, None])
with self.assertRaisesRegexp(ValueError, "should have the same length"):
concat_layer.compute_mask([i1, i2], [None])
with self.assertRaisesRegexp(ValueError,
"layer should be called on a list of inputs"):
concat_layer(i1)
示例4: test_merge_add
def test_merge_add(self):
i1 = keras.layers.Input(shape=(4, 5))
i2 = keras.layers.Input(shape=(4, 5))
i3 = keras.layers.Input(shape=(4, 5))
add_layer = keras.layers.Add()
o = add_layer([i1, i2, i3])
self.assertListEqual(o.shape.as_list(), [None, 4, 5])
model = keras.models.Model([i1, i2, i3], o)
model.run_eagerly = testing_utils.should_run_eagerly()
x1 = np.random.random((2, 4, 5))
x2 = np.random.random((2, 4, 5))
x3 = np.random.random((2, 4, 5))
out = model.predict([x1, x2, x3])
self.assertEqual(out.shape, (2, 4, 5))
self.assertAllClose(out, x1 + x2 + x3, atol=1e-4)
self.assertEqual(
add_layer.compute_mask([i1, i2, i3], [None, None, None]), None)
self.assertTrue(
np.all(
K.eval(
add_layer.compute_mask(
[i1, i2], [K.variable(x1), K.variable(x2)]))))
with self.assertRaisesRegexp(ValueError, "`mask` should be a list."):
add_layer.compute_mask([i1, i2, i3], x1)
with self.assertRaisesRegexp(ValueError, "`inputs` should be a list."):
add_layer.compute_mask(i1, [None, None, None])
with self.assertRaisesRegexp(ValueError, " should have the same length."):
add_layer.compute_mask([i1, i2, i3], [None, None])
示例5: test_sparse_top_k_categorical_accuracy
def test_sparse_top_k_categorical_accuracy(self):
with self.cached_session():
# Test correctness if the shape of y_true is (num_samples, 1)
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([[1], [0]]))
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(result, 1)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(result, 0.5)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(result, 0.)
# Test correctness if the shape of y_true is (num_samples,)
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([1, 0]))
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(result, 1)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(result, 0.5)
result = K.eval(
metrics.sparse_top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(result, 0.)
示例6: offset_sep_conv2d_eval
def offset_sep_conv2d_eval(depth, padding, x):
"""Perform a separable conv2d on x with a given padding"""
depthwise_kernel = K.variable(value=np.array([[[[1]] * depth]]),
dtype='float32')
pointwise_kernel = K.variable(value=np.array([[[[1]] + [[0]] * (depth - 1)]]),
dtype='float32')
return K.separable_conv2d(x, depthwise_kernel,
pointwise_kernel, strides=(3, 3), padding=padding)
示例7: __init__
def __init__(self, lr=0.01, momentum=0., decay=0., nesterov=False, **kwargs):
super(SGD, self).__init__(**kwargs)
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
self.lr = K.variable(lr, name='lr')
self.momentum = K.variable(momentum, name='momentum')
self.decay = K.variable(decay, name='decay')
self.initial_decay = decay
self.nesterov = nesterov
示例8: test_top_k_categorical_accuracy
def test_top_k_categorical_accuracy(self):
with self.test_session():
y_pred = K.variable(np.array([[0.3, 0.2, 0.1], [0.1, 0.2, 0.7]]))
y_true = K.variable(np.array([[0, 1, 0], [1, 0, 0]]))
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=3))
self.assertEqual(result, 1)
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=2))
self.assertEqual(result, 0.5)
result = K.eval(metrics.top_k_categorical_accuracy(y_true, y_pred, k=1))
self.assertEqual(result, 0.)
示例9: __init__
def __init__(self, optimizer, iterations=None): # pylint: disable=super-init-not-called
self.optimizer = optimizer
self._track_checkpointable(optimizer, name='optimizer')
if iterations is None:
with K.name_scope(self.__class__.__name__):
self.iterations = K.variable(0, dtype='int64', name='iterations')
else:
self.iterations = iterations
self._track_checkpointable(self.iterations, name='global_step')
示例10: check_operation_offset
def check_operation_offset(depth, eval_f, padding):
"""Check if backend used an offset while placing the filter
e.g. during a convolution.
TensorFlow is inconsistent in doing so depending
on the type of operation, the used device (CPU/GPU) and the input depth.
"""
in_arr = np.array([[[[i] * depth for i in range(6)]]])
input_data = K.variable(value=in_arr, dtype='float32')
output = eval_f(depth, padding, input_data)
result = K.eval(output).flatten().tolist()
assert result in [[0, 3], [1, 4]]
return result == [1, 4]
示例11: test_sparse_categorical_accuracy
def test_sparse_categorical_accuracy(self):
with self.cached_session():
metric = metrics.sparse_categorical_accuracy
y_true = K.variable(np.random.randint(0, 7, (6,)))
y_pred = K.variable(np.random.random((6, 7)))
self.assertEqual(K.eval(metric(y_true, y_pred)).shape, (6,))
# Test correctness if the shape of y_true is (num_samples,)
y_true = K.variable([1., 0., 0., 0.])
y_pred = K.variable([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]])
print(K.eval(metric(y_true, y_pred)))
self.assertAllEqual(K.eval(metric(y_true, y_pred)), [0., 1., 1., 1.])
# Test correctness if the shape of y_true is (num_samples, 1)
y_true = K.variable([[1.], [0.], [0.], [0.]])
y_pred = K.variable([[0.8, 0.2], [0.6, 0.4], [0.7, 0.3], [0.9, 0.1]])
print(K.eval(metric(y_true, y_pred)))
self.assertAllEqual(K.eval(metric(y_true, y_pred)), [0., 1., 1., 1.])
示例12: test_sparse_categorical_accuracy_float
def test_sparse_categorical_accuracy_float(self):
with self.cached_session():
metric = metrics.sparse_categorical_accuracy
y_true = K.variable(np.random.random((6,)))
y_pred = K.variable(np.random.random((6, 7)))
self.assertEqual(K.eval(metric(y_true, y_pred)).shape, (6,))
示例13: experimental_tpu_fit_loop
def experimental_tpu_fit_loop(model,
dataset,
epochs=100,
verbose=1,
callbacks=None,
initial_epoch=0,
steps_per_epoch=None,
val_dataset=None,
validation_steps=None,
validation_freq=1):
"""Fit loop for training with TPU DistributionStrategy.
Arguments:
model: Keras Model instance.
dataset: Dataset that returns inputs and targets
epochs: Number of times to iterate over the data
verbose: Integer, Verbosity mode, 0, 1 or 2
callbacks: List of callbacks to be called during training
initial_epoch: Epoch at which to start training
(useful for resuming a previous training run)
steps_per_epoch: Total number of steps (batches of samples)
before declaring one epoch finished and starting the
next epoch. Ignored with the default value of `None`.
val_dataset: Dataset for validation data.
validation_steps: Number of steps to run validation for
(only if doing validation from data tensors).
Ignored with the default value of `None`.
validation_freq: Only relevant if validation data is provided. Integer or
`collections.Container` instance (e.g. list, tuple, etc.). If an
integer, specifies how many training epochs to run before a new
validation run is performed, e.g. `validation_freq=2` runs
validation every 2 epochs. If a Container, specifies the epochs on
which to run validation, e.g. `validation_freq=[1, 2, 10]` runs
validation at the end of the 1st, 2nd, and 10th epochs.
Returns:
Returns `None`.
Raises:
ValueError: in case of invalid arguments.
"""
mode = ModeKeys.TRAIN
# TODO(fchollet): add support for `steps_per_epoch=None` in TPU loops.
current_strategy = model._distribution_strategy
iterator = distributed_training_utils.get_iterator(dataset, current_strategy)
steps_per_epoch = training_utils.infer_steps_for_dataset(
dataset, steps_per_epoch, epochs, steps_name='steps_per_epoch')
if (current_strategy.extended.steps_per_run != 1 and
steps_per_epoch is None):
raise ValueError('`steps_per_epoch` should be specified when calling '
'`fit` on the model with TPUStrategy when '
'`steps_per_run` != 1 .')
scope = distributed_training_utils.distributed_scope(
strategy=current_strategy, learning_phase=1)
scope.__enter__()
out_labels = model.metrics_names or []
step_fn = _make_step_fn(model, ModeKeys.TRAIN, current_strategy, out_labels)
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_stateful_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
use_steps = steps_per_epoch is not None
if use_steps:
iteration_value = min(steps_per_epoch,
current_strategy.extended.steps_per_run)
else:
iteration_value = current_strategy.extended.steps_per_run
steps_per_run = K.variable(
value=iteration_value,
dtype='int32',
name='steps_per_run')
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
do_validation = bool(validation_steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose,
count_mode='steps',
mode=mode)
#.........这里部分代码省略.........
示例14: test_dynamic_loss_scaling
def test_dynamic_loss_scaling(self, strategy_fn, cloning=True):
strategy = strategy_fn()
initial_loss_scale = 2.
batch_size = 4
expected_gradient = backend.variable([initial_loss_scale / batch_size],
dtype=dtypes.float16)
# If this variable is set to True, the model below will have NaN gradients
have_nan_gradients = backend.variable(False, dtype=dtypes.bool)
with strategy.scope():
with policy.policy_scope(policy.Policy('infer_float32_vars')):
x = layers.Input(shape=(1,), batch_size=batch_size,
dtype=dtypes.float16)
layer = AddLayer(assert_type=dtypes.float16)
y = layer(x)
identity_with_nan_grads = (
mp_test_util.create_identity_with_nan_gradients_fn(
have_nan_gradients))
y = core.Lambda(identity_with_nan_grads)(y)
identity_with_grad_check_fn = (
mp_test_util.create_identity_with_grad_check_fn(
expected_dtype=dtypes.float16,
expected_gradient=expected_gradient))
y = core.Lambda(identity_with_grad_check_fn)(y)
y = math_ops.cast(y, dtypes.float32)
model = models.Model(inputs=x, outputs=y)
def loss_fn(y_true, y_pred):
del y_true
return math_ops.reduce_mean(y_pred)
opt = gradient_descent.SGD(1.)
loss_scale = loss_scale_module.DynamicLossScale(
initial_loss_scale=initial_loss_scale, increment_period=2)
opt = loss_scale_optimizer.LossScaleOptimizer(opt, loss_scale)
model.compile(opt, loss=loss_fn, cloning=cloning)
self.assertEqual(backend.eval(layer.v), 1)
x = np.ones((batch_size, 1))
y = np.ones((batch_size, 1))
dataset = dataset_ops.Dataset.from_tensor_slices((x, y)).batch(batch_size)
model.fit(dataset)
# The variables starts with 1 and has a gradient of 1, so will go down by 1
# each step.
self.assertEqual(backend.eval(layer.v), 0)
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -1)
# There have been two steps without NaNs, so the loss scale will double
backend.set_value(expected_gradient,
backend.get_value(expected_gradient * 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -2)
# Next test with NaN gradients.
backend.set_value(have_nan_gradients, True)
model.fit(dataset)
# Variable should not be updated
self.assertEqual(backend.eval(layer.v), -2)
# Test with finite gradients again
backend.set_value(have_nan_gradients, False)
# The loss scale will be halved due to the NaNs, so the gradient will also
# be halved
backend.set_value(expected_gradient,
backend.get_value(expected_gradient / 2))
model.fit(dataset)
self.assertEqual(backend.eval(layer.v), -3)
示例15: experimental_tpu_fit_loop
#.........这里部分代码省略.........
current_strategy, grouped_inputs, grouped_outputs,
grouped_updates, grouped_session_args)
combined_fn = K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_fit_function',
**all_session_args)
for label, output in zip(out_labels, combined_fn.outputs):
if label == 'loss':
reduce_op = ds_reduce_util.ReduceOp.SUM
else:
# We reduce all other metrics using mean for now. This is temporary
# workaround until new metrics are in place.
reduce_op = ds_reduce_util.ReduceOp.MEAN
ctx.set_last_step_output(label, output, reduce_op)
# TODO(priyag, sourabhbajaj): Ignoring these things from the combined_fn:
# feed_dict, session kwargs, run options, run_metadata for now. These should
# be handled appropriately
return combined_fn.updates_op
# Add initial dummy values for loss and other metric tensors.
initial_loop_values = {}
initial_loop_values['loss'] = constant_op.constant(1e7)
for name in model.metrics_names[1:]:
tensor = model._all_stateful_metrics_tensors[name]
initial_loop_values[name] = array_ops.zeros(tensor.shape, tensor.dtype)
if steps_per_epoch is None:
raise ValueError('`steps_per_epoch` should be specified when calling '
'`fit` on the model.')
steps_per_run = K.variable(
value=min(steps_per_epoch, current_strategy.extended.steps_per_run),
dtype='int32',
name='steps_per_run')
ctx = current_strategy.extended.experimental_run_steps_on_iterator(
step_fn, iterator, iterations=steps_per_run,
initial_loop_values=initial_loop_values)
train_op = ctx.run_op
output_tensors = ctx.last_step_outputs
do_validation = bool(validation_steps)
if model._compile_distribution:
distributed_training_utils._copy_weights_to_distributed_model(model, mode)
callbacks = cbks.configure_callbacks(
callbacks,
model,
do_validation=do_validation,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
verbose=verbose,
count_mode='steps',
mode=mode)
# Calculate the steps each time on the device.
steps_to_run = [current_strategy.extended.steps_per_run] * (
steps_per_epoch // current_strategy.extended.steps_per_run)
if steps_per_epoch % current_strategy.extended.steps_per_run:
steps_to_run.append(
steps_per_epoch % current_strategy.extended.steps_per_run)