本文整理汇总了Python中tensorflow.python.keras.backend.int_shape函数的典型用法代码示例。如果您正苦于以下问题:Python int_shape函数的具体用法?Python int_shape怎么用?Python int_shape使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了int_shape函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: compute_output_shape
def compute_output_shape(self, input_shape):
input_shape = tuple(tensor_shape.TensorShape(input_shape).as_list())
if self._output_shape is None:
if context.executing_eagerly():
raise NotImplementedError
x = K.placeholder(shape=input_shape)
x = self.call(x)
if isinstance(x, list):
return [tensor_shape.TensorShape(K.int_shape(x_elem)) for x_elem in x]
else:
return tensor_shape.TensorShape(K.int_shape(x))
elif isinstance(self._output_shape, (tuple, list)):
if isinstance(input_shape, list):
num_samples = input_shape[0][0]
else:
num_samples = input_shape[0] if input_shape else None
return tensor_shape.TensorShape((num_samples,) +
tuple(self._output_shape))
else:
shape = self._output_shape(input_shape)
if not isinstance(shape, (list, tuple)):
raise ValueError(
'`output_shape` function must return a tuple or a list of tuples.')
if isinstance(shape, list):
if isinstance(shape[0], int) or shape[0] is None:
shape = tuple(shape)
return tensor_shape.TensorShape(shape)
示例2: set_model
def set_model(self, model):
"""Sets Keras model and creates summary ops."""
self.model = model
self.sess = K.get_session()
# only make histogram summary op if it hasn't already been made
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if self.write_grads:
for weight in layer.trainable_weights:
mapped_weight_name = weight.name.replace(':', '_')
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [grad.values if is_indexed_slices(grad) else grad
for grad in grads]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = self._writer_class(self.log_dir, self.sess.graph)
else:
self.writer = self._writer_class(self.log_dir)
示例3: sparse_categorical_accuracy
def sparse_categorical_accuracy(y_true, y_pred):
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the predicted output and actual output types don't match, force cast them
# to match.
if K.dtype(y_pred) != K.dtype(y_true):
y_pred = math_ops.cast(y_pred, K.dtype(y_true))
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
示例4: sparse_categorical_accuracy
def sparse_categorical_accuracy(y_true, y_pred):
# If the shape of y_true is (num_samples, 1), squeeze to (num_samples,)
if (len(K.int_shape(y_true)) == len(K.int_shape(y_pred))):
y_true = array_ops.squeeze(y_true, [-1])
y_pred = math_ops.argmax(y_pred, axis=-1)
# If the expected labels are float, we need to cast the int returned by
# argmax to compare.
if K.dtype(y_true) == K.floatx():
y_pred = math_ops.cast(y_pred, K.floatx())
return math_ops.cast(math_ops.equal(y_true, y_pred), K.floatx())
示例5: __call__
def __call__(self, inputs, initial_state=None, constants=None, **kwargs):
inputs, initial_state, constants = _standardize_args(
inputs, initial_state, constants, self._num_constants)
if initial_state is None and constants is None:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
# If any of `initial_state` or `constants` are specified and are Keras
# tensors, then add them to the inputs and temporarily modify the
# input_spec to include them.
additional_inputs = []
additional_specs = []
if initial_state is not None:
kwargs['initial_state'] = initial_state
additional_inputs += initial_state
self.state_spec = []
for state in initial_state:
shape = K.int_shape(state)
self.state_spec.append(InputSpec(shape=shape))
additional_specs += self.state_spec
if constants is not None:
kwargs['constants'] = constants
additional_inputs += constants
self.constants_spec = [InputSpec(shape=K.int_shape(constant))
for constant in constants]
self._num_constants = len(constants)
additional_specs += self.constants_spec
# at this point additional_inputs cannot be empty
for tensor in additional_inputs:
if K.is_keras_tensor(tensor) != K.is_keras_tensor(additional_inputs[0]):
raise ValueError('The initial state or constants of an RNN'
' layer cannot be specified with a mix of'
' Keras tensors and non-Keras tensors')
if K.is_keras_tensor(additional_inputs[0]):
# Compute the full input spec, including state and constants
full_input = [inputs] + additional_inputs
full_input_spec = self.input_spec + additional_specs
# Perform the call with temporarily replaced input_spec
original_input_spec = self.input_spec
self.input_spec = full_input_spec
output = super(ConvRNN2D, self).__call__(full_input, **kwargs)
self.input_spec = original_input_spec
return output
else:
return super(ConvRNN2D, self).__call__(inputs, **kwargs)
示例6: compute_output_shape
def compute_output_shape(self, input_shape):
if self._output_shape is None:
if context.executing_eagerly():
# Make use of existing autocomputation for Eager mode but provide
# Lambda-specific error message.
try:
return super(Lambda, self).compute_output_shape(input_shape)
except NotImplementedError:
raise NotImplementedError('We could not automatically infer '
'the static shape of the Lambda\'s output.'
' Please specify the `output_shape` for'
' this Lambda.')
if isinstance(input_shape, list):
x = [K.placeholder(shape=shape) for shape in input_shape]
else:
x = K.placeholder(shape=input_shape)
x = self.call(x)
if isinstance(x, list):
return [tensor_shape.TensorShape(K.int_shape(x_elem)) for x_elem in x]
else:
return tensor_shape.TensorShape(K.int_shape(x))
elif isinstance(self._output_shape, (tuple, list)):
if isinstance(input_shape, list):
num_samples = input_shape[0][0]
else:
num_samples = input_shape[0] if input_shape else None
# List here represents multiple outputs.
if isinstance(self._output_shape, list):
return [
tensor_shape.TensorShape((num_samples,) + tuple(single_shape))
for single_shape in self._output_shape
]
return tensor_shape.TensorShape((num_samples,) + self._output_shape)
else:
shape = self._output_shape(input_shape)
if not isinstance(shape, (list, tuple)):
raise ValueError(
'`output_shape` function must return a tuple or a list of tuples.')
# List here can represent multiple outputs or single output.
if isinstance(shape, list):
# Convert list representing single output into a tuple.
if isinstance(shape[0], (int, type(None))):
shape = tuple(shape)
else:
return [
tensor_shape.TensorShape(single_shape) for single_shape in shape
]
return tensor_shape.TensorShape(shape)
示例7: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
shapes = [K.int_shape(p) for p in params]
accumulators = [K.zeros(shape) for shape in shapes]
self.weights = accumulators
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. /
(1. +
self.decay * math_ops.cast(self.iterations, K.dtype(self.decay))))
for p, g, a in zip(params, grads, accumulators):
new_a = a + math_ops.square(g) # update accumulator
self.updates.append(state_ops.assign(a, new_a))
new_p = p - lr * g / (K.sqrt(new_a) + self.epsilon)
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
示例8: _get_shape_tuple
def _get_shape_tuple(self, init_tuple, tensor, start_idx, int_shape=None):
"""Finds non-specific dimensions in the static shapes.
The static shapes are replaced with the corresponding dynamic shapes of the
tensor.
Arguments:
init_tuple: a tuple, the first part of the output shape
tensor: the tensor from which to get the (static and dynamic) shapes
as the last part of the output shape
start_idx: int, which indicate the first dimension to take from
the static shape of the tensor
int_shape: an alternative static shape to take as the last part
of the output shape
Returns:
The new int_shape with the first part from init_tuple
and the last part from either `int_shape` (if provided)
or `tensor.shape`, where every `None` is replaced by
the corresponding dimension from `tf.shape(tensor)`.
"""
# replace all None in int_shape by K.shape
if int_shape is None:
int_shape = K.int_shape(tensor)[start_idx:]
if not any(not s for s in int_shape):
return init_tuple + tuple(int_shape)
shape = K.shape(tensor)
int_shape = list(int_shape)
for i, s in enumerate(int_shape):
if not s:
int_shape[i] = shape[start_idx + i]
return init_tuple + tuple(int_shape)
示例9: get_updates
def get_updates(self, loss, params):
grads = self.get_gradients(loss, params)
self.updates = [state_ops.assign_add(self.iterations, 1)]
lr = self.lr
if self.initial_decay > 0:
lr = lr * ( # pylint: disable=g-no-augmented-assignment
1. / (1. + self.decay * math_ops.cast(self.iterations,
K.dtype(self.decay))))
# momentum
shapes = [K.int_shape(p) for p in params]
moments = [K.zeros(shape) for shape in shapes]
self.weights = [self.iterations] + moments
for p, g, m in zip(params, grads, moments):
v = self.momentum * m - lr * g # velocity
self.updates.append(state_ops.assign(m, v))
if self.nesterov:
new_p = p + self.momentum * v - lr * g
else:
new_p = p + v
# Apply constraints.
if getattr(p, 'constraint', None) is not None:
new_p = p.constraint(new_p)
self.updates.append(state_ops.assign(p, new_p))
return self.updates
示例10: opt_variable
def opt_variable(value, dtype=None, name=None, constraint=None):
"""Instantiates a variable and returns it."""
if dtype is None:
dtype = backend.floatx()
variables = []
for i in range(num_replicas):
# Keras holds the variables in optimizer class instance , so the name
# does not matter here. ResourceVariable constructor will find a unique
# name (including name=None) for each replica.
with ops.device("device:TPU:{}".format(i)):
v = resource_variable_ops.ResourceVariable(
value,
dtype=dtypes_module.as_dtype(dtype),
name=name,
constraint=constraint)
variables.append(v)
name = "replicate_{}_{}".format("variable" if name is None else name,
ops.uid())
v = ReplicatedVariable(name, variables)
# pylint: disable=protected-access
if isinstance(value, np.ndarray):
v._keras_shape = value.shape
elif hasattr(value, "shape"):
v._keras_shape = backend.int_shape(value)
v._uses_learning_phase = False
backend.track_variable(v)
return v
示例11: call
def call(self, inputs, training=None, mask=None):
kwargs = {}
if generic_utils.has_arg(self.layer.call, 'training'):
kwargs['training'] = training
uses_learning_phase = False # pylint: disable=redefined-outer-name
input_shape = K.int_shape(inputs)
if input_shape[0]:
# batch size matters, use rnn-based implementation
def step(x, _):
global uses_learning_phase # pylint: disable=global-variable-undefined
output = self.layer.call(x, **kwargs)
if hasattr(output, '_uses_learning_phase'):
uses_learning_phase = (output._uses_learning_phase or
uses_learning_phase)
return output, []
_, outputs, _ = K.rnn(
step,
inputs,
initial_states=[],
input_length=input_shape[1],
unroll=False)
y = outputs
else:
# No batch size specified, therefore the layer will be able
# to process batches of any size.
# We can go with reshape-based implementation for performance.
input_length = input_shape[1]
if not input_length:
input_length = array_ops.shape(inputs)[1]
inner_input_shape = self._get_shape_tuple((-1,), inputs, 2)
# Shape: (num_samples * timesteps, ...). And track the
# transformation in self._input_map.
input_uid = generic_utils.object_list_uid(inputs)
inputs = array_ops.reshape(inputs, inner_input_shape)
self._input_map[input_uid] = inputs
# (num_samples * timesteps, ...)
if generic_utils.has_arg(self.layer.call, 'mask') and mask is not None:
inner_mask_shape = self._get_shape_tuple((-1,), mask, 2)
kwargs['mask'] = K.reshape(mask, inner_mask_shape)
y = self.layer.call(inputs, **kwargs)
if hasattr(y, '_uses_learning_phase'):
uses_learning_phase = y._uses_learning_phase
# Shape: (num_samples, timesteps, ...)
output_shape = self.compute_output_shape(input_shape).as_list()
output_shape = self._get_shape_tuple(
(-1, input_length), y, 1, output_shape[2:])
y = array_ops.reshape(y, output_shape)
# Apply activity regularizer if any:
if (hasattr(self.layer, 'activity_regularizer') and
self.layer.activity_regularizer is not None):
regularization_loss = self.layer.activity_regularizer(y)
self.add_loss(regularization_loss, inputs)
if uses_learning_phase:
y._uses_learning_phase = True
return y
示例12: call
def call(self, inputs, mask=None, training=None, initial_state=None):
# GRU does not support constants. Ignore it during process.
inputs, initial_state, _ = self._process_inputs(inputs, initial_state, None)
if isinstance(mask, list):
mask = mask[0]
input_shape = K.int_shape(inputs)
timesteps = input_shape[0] if self.time_major else input_shape[1]
if not self.could_use_cudnn:
# CuDNN does not support masking, fall back to use the normal GRU.
kwargs = {'training': training}
def step(cell_inputs, cell_states):
return self.cell.call(cell_inputs, cell_states, **kwargs)
last_output, outputs, states = K.rnn(
step,
inputs,
initial_state,
constants=None,
go_backwards=self.go_backwards,
mask=mask,
unroll=self.unroll,
input_length=timesteps,
time_major=self.time_major,
zero_output_for_mask=self.zero_output_for_mask)
# This is a dummy tensor for testing purpose.
runtime = _runtime('unknown')
else:
last_output, outputs, runtime, states = self._defun_gru_call(
inputs, initial_state, training, mask)
if self.stateful:
updates = [state_ops.assign(self.states[0], states[0])]
self.add_update(updates, inputs)
if self.return_sequences:
output = outputs
else:
output = last_output
if self.return_state:
return [output] + list(states)
elif self._return_runtime:
return output, runtime
else:
return output
示例13: transition_block
def transition_block(x, reduction, name):
"""A transition block.
Arguments:
x: input tensor.
reduction: float, compression rate at transition layers.
name: string, block label.
Returns:
output tensor for the block.
"""
bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
x = BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_bn')(x)
x = Activation('relu', name=name + '_relu')(x)
x = Conv2D(
int(K.int_shape(x)[bn_axis] * reduction),
1,
use_bias=False,
name=name + '_conv')(
x)
x = AveragePooling2D(2, strides=2, name=name + '_pool')(x)
return x
示例14: normal_lstm
def normal_lstm(inputs, init_h, init_c, kernel, recurrent_kernel, bias, units,
activation, recurrent_activation):
input_shape = K.int_shape(inputs)
timesteps = input_shape[1]
def step(cell_inputs, cell_states):
h_tm1 = cell_states[0] # previous memory state
c_tm1 = cell_states[1] # previous carry state
# Only use the second half of the bias weights.
_, real_bias = array_ops.split(bias, 2)
z = K.dot(cell_inputs, kernel)
z += K.dot(h_tm1, recurrent_kernel)
z = K.bias_add(z, real_bias)
z0 = z[:, :units]
z1 = z[:, units:2 * units]
z2 = z[:, 2 * units:3 * units]
z3 = z[:, 3 * units:]
i = recurrent_activation(z0)
f = recurrent_activation(z1)
c = f * c_tm1 + i * activation(z2)
o = recurrent_activation(z3)
h = o * activation(c)
return h, [h, c]
_, outputs, new_states = K.rnn(
step,
inputs, [init_h, init_c],
constants=None,
unroll=False,
input_length=timesteps)
return outputs, new_states, constant_op.constant(
'cpu', dtype=dtypes.string, name='runtime')
示例15: _eager_metrics_fn
def _eager_metrics_fn(model, outputs, targets):
"""Calculates the metrics for each output of the given model.
Arguments:
model: The model on which metrics are being calculated.
outputs: The outputs of the given model.
targets: The predictions or targets of the given model.
Returns:
Returns the metric names and metric results for each output of the model.
"""
metric_names = []
metric_results = []
if not isinstance(outputs, list):
outputs = [outputs]
if not isinstance(targets, list):
targets = [targets]
for i in range(len(model.outputs)):
output_metrics = model.nested_metrics[i]
for nested_output_metric in output_metrics:
metric_name, metric_fn = _get_metrics_info(
nested_output_metric, backend.int_shape(model.outputs[i]),
model.loss_functions[i])
if len(model.output_names) > 1:
metric_name = model.output_names[i] + '_' + metric_name
if metric_name not in model.metrics_names:
model.metrics_names.append(metric_name)
with backend.name_scope(metric_name):
metric_result = metric_fn(targets[i], outputs[i])
metric_names.append(metric_name)
metric_results.append(backend.mean(metric_result))
return metric_results