本文整理汇总了Python中tensorflow.python.keras._impl.keras.backend.image_data_format函数的典型用法代码示例。如果您正苦于以下问题:Python image_data_format函数的具体用法?Python image_data_format怎么用?Python image_data_format使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了image_data_format函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preprocess_input
def preprocess_input(x, data_format=None):
"""Preprocesses a tensor encoding a batch of images.
Arguments:
x: input Numpy tensor, 4D.
data_format: data format of the image tensor.
Returns:
Preprocessed tensor.
"""
if data_format is None:
data_format = K.image_data_format()
assert data_format in {'channels_last', 'channels_first'}
if data_format == 'channels_first':
if x.ndim == 3:
# 'RGB'->'BGR'
x = x[::-1, ...]
# Zero-center by mean pixel
x[0, :, :] -= 103.939
x[1, :, :] -= 116.779
x[2, :, :] -= 123.68
else:
x = x[:, ::-1, ...]
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
# Zero-center by mean pixel
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x
示例2: conv_block
def conv_block(x, growth_rate, name):
"""A building block for a dense block.
Arguments:
x: input tensor.
growth_rate: float, growth rate at dense layers.
name: string, block label.
Returns:
output tensor for the block.
"""
bn_axis = 3 if K.image_data_format() == 'channels_last' else 1
x1 = BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(
x)
x1 = Activation('relu', name=name + '_0_relu')(x1)
x1 = Conv2D(4 * growth_rate, 1, use_bias=False, name=name + '_1_conv')(x1)
x1 = BatchNormalization(
axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(
x1)
x1 = Activation('relu', name=name + '_1_relu')(x1)
x1 = Conv2D(
growth_rate, 3, padding='same', use_bias=False, name=name + '_2_conv')(
x1)
x = Concatenate(axis=bn_axis, name=name + '_concat')([x, x1])
return x
示例3: load_data
def load_data(label_mode='fine'):
"""Loads CIFAR100 dataset.
Arguments:
label_mode: one of "fine", "coarse".
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
Raises:
ValueError: in case of invalid `label_mode`.
"""
if label_mode not in ['fine', 'coarse']:
raise ValueError('label_mode must be one of "fine" "coarse".')
dirname = 'cifar-100-python'
origin = 'http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
fpath = os.path.join(path, 'train')
x_train, y_train = load_batch(fpath, label_key=label_mode + '_labels')
fpath = os.path.join(path, 'test')
x_test, y_test = load_batch(fpath, label_key=label_mode + '_labels')
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
示例4: preprocess_input
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor or Numpy array encoding a batch of images.
Arguments:
x: Input Numpy or symbolic tensor, 3D or 4D.
data_format: Data format of the image tensor/array.
mode: One of "caffe", "tf".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
Returns:
Preprocessed tensor or Numpy array.
Raises:
ValueError: In case of unknown `data_format` argument.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
if isinstance(x, np.ndarray):
return _preprocess_numpy_input(x, data_format=data_format, mode=mode)
else:
return _preprocess_symbolic_input(x, data_format=data_format, mode=mode)
示例5: load_data
def load_data():
"""Loads CIFAR10 dataset.
Returns:
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
"""
dirname = 'cifar-10-batches-py'
origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
path = get_file(dirname, origin=origin, untar=True)
num_train_samples = 50000
x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.zeros((num_train_samples,), dtype='uint8')
for i in range(1, 6):
fpath = os.path.join(path, 'data_batch_' + str(i))
data, labels = load_batch(fpath)
x_train[(i - 1) * 10000:i * 10000, :, :, :] = data
y_train[(i - 1) * 10000:i * 10000] = labels
fpath = os.path.join(path, 'test_batch')
x_test, y_test = load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if K.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
示例6: img_to_array
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
Arguments:
img: PIL Image instance.
data_format: Image data format.
Returns:
A 3D Numpy array.
Raises:
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
示例7: _conv_block
def _conv_block(inputs, filters, alpha, kernel=(3, 3), strides=(1, 1)):
"""Adds an initial convolution layer (with batch normalization and relu6).
Arguments:
inputs: Input tensor of shape `(rows, cols, 3)`
(with `channels_last` data format) or
(3, rows, cols) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(224, 224, 3)` would be one valid value.
filters: Integer, the dimensionality of the output space
(i.e. the number of output filters in the convolution).
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
kernel: An integer or tuple/list of 2 integers, specifying the
width and height of the 2D convolution window.
Can be a single integer to specify the same value for
all spatial dimensions.
strides: An integer or tuple/list of 2 integers,
specifying the strides of the convolution along the width and height.
Can be a single integer to specify the same value for
all spatial dimensions.
Specifying any stride value != 1 is incompatible with specifying
any `dilation_rate` value != 1.
Input shape:
4D tensor with shape:
`(samples, channels, rows, cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if data_format='channels_last'.
Output shape:
4D tensor with shape:
`(samples, filters, new_rows, new_cols)` if data_format='channels_first'
or 4D tensor with shape:
`(samples, new_rows, new_cols, filters)` if data_format='channels_last'.
`rows` and `cols` values might have changed due to stride.
Returns:
Output tensor of block.
"""
channel_axis = 1 if K.image_data_format() == 'channels_first' else -1
filters = int(filters * alpha)
x = Conv2D(
filters,
kernel,
padding='same',
use_bias=False,
strides=strides,
name='conv1')(
inputs)
x = BatchNormalization(axis=channel_axis, name='conv1_bn')(x)
return Activation(relu6, name='conv1_relu')(x)
示例8: __init__
def __init__(self, rate, data_format=None, **kwargs):
super(SpatialDropout3D, self).__init__(rate, **kwargs)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError('data_format must be in '
'{"channels_last", "channels_first"}')
self.data_format = data_format
self.input_spec = InputSpec(ndim=5)
示例9: normalize_data_format
def normalize_data_format(value):
if value is None:
value = backend.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
示例10: __init__
def __init__(self,
pool_size=(2, 2, 2),
strides=None,
padding='valid',
data_format=None,
**kwargs):
if data_format is None:
data_format = K.image_data_format()
if strides is None:
strides = pool_size
super(AveragePooling3D, self).__init__(pool_size, strides, padding,
data_format, **kwargs)
示例11: _separable_conv_block
def _separable_conv_block(ip,
filters,
kernel_size=(3, 3),
strides=(1, 1),
block_id=None):
"""Adds 2 blocks of [relu-separable conv-batchnorm].
Arguments:
ip: Input tensor
filters: Number of output filters per layer
kernel_size: Kernel size of separable convolutions
strides: Strided convolution for downsampling
block_id: String block_id
Returns:
A Keras tensor
"""
channel_dim = 1 if K.image_data_format() == 'channels_first' else -1
with K.name_scope('separable_conv_block_%s' % block_id):
x = Activation('relu')(ip)
x = SeparableConv2D(
filters,
kernel_size,
strides=strides,
name='separable_conv_1_%s' % block_id,
padding='same',
use_bias=False,
kernel_initializer='he_normal')(
x)
x = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_1_bn_%s' % (block_id))(
x)
x = Activation('relu')(x)
x = SeparableConv2D(
filters,
kernel_size,
name='separable_conv_2_%s' % block_id,
padding='same',
use_bias=False,
kernel_initializer='he_normal')(
x)
x = BatchNormalization(
axis=channel_dim,
momentum=0.9997,
epsilon=1e-3,
name='separable_conv_2_bn_%s' % (block_id))(
x)
return x
示例12: set_model
def set_model(self, model):
self.model = model
self.sess = K.get_session()
if self.histogram_freq and self.merged is None:
for layer in self.model.layers:
for weight in layer.weights:
mapped_weight_name = weight.name.replace(':', '_')
tf_summary.histogram(mapped_weight_name, weight)
if self.write_grads:
grads = model.optimizer.get_gradients(model.total_loss, weight)
def is_indexed_slices(grad):
return type(grad).__name__ == 'IndexedSlices'
grads = [grad.values if is_indexed_slices(grad) else grad
for grad in grads]
tf_summary.histogram('{}_grad'.format(mapped_weight_name), grads)
if self.write_images:
w_img = array_ops.squeeze(weight)
shape = K.int_shape(w_img)
if len(shape) == 2: # dense layer kernel case
if shape[0] > shape[1]:
w_img = array_ops.transpose(w_img)
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img, [1, shape[0], shape[1], 1])
elif len(shape) == 3: # convnet case
if K.image_data_format() == 'channels_last':
# switch to channels_first to display
# every kernel as a separate image
w_img = array_ops.transpose(w_img, perm=[2, 0, 1])
shape = K.int_shape(w_img)
w_img = array_ops.reshape(w_img,
[shape[0], shape[1], shape[2], 1])
elif len(shape) == 1: # bias case
w_img = array_ops.reshape(w_img, [1, shape[0], 1, 1])
else:
# not possible to handle 3D convnets etc.
continue
shape = K.int_shape(w_img)
assert len(shape) == 4 and shape[-1] in [1, 3, 4]
tf_summary.image(mapped_weight_name, w_img)
if hasattr(layer, 'output'):
tf_summary.histogram('{}_out'.format(layer.name), layer.output)
self.merged = tf_summary.merge_all()
if self.write_graph:
self.writer = tf_summary.FileWriter(self.log_dir, self.sess.graph)
else:
self.writer = tf_summary.FileWriter(self.log_dir)
示例13: __init__
def __init__(self, pool_function, pool_size, strides,
padding='valid', data_format=None,
name=None, **kwargs):
super(Pooling1D, self).__init__(name=name, **kwargs)
if data_format is None:
data_format = backend.image_data_format()
if strides is None:
strides = pool_size
self.pool_function = pool_function
self.pool_size = conv_utils.normalize_tuple(pool_size, 1, 'pool_size')
self.strides = conv_utils.normalize_tuple(strides, 1, 'strides')
self.padding = conv_utils.normalize_padding(padding)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(ndim=3)
示例14: conv_block
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2,
2)):
"""A block that has a conv layer at shortcut.
Arguments:
input_tensor: input tensor
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the filters of 3 conv layer at main path
stage: integer, current stage label, used for generating layer names
block: 'a','b'..., current block label, used for generating layer names
strides: Strides for the first conv layer in the block.
Returns:
Output tensor for the block.
Note that from stage 3,
the first conv layer at main path is with strides=(2, 2)
And the shortcut should have strides=(2, 2) as well
"""
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(
filters1, (1, 1), strides=strides, name=conv_name_base + '2a')(
input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(
filters2, kernel_size, padding='same', name=conv_name_base + '2b')(
x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(
filters3, (1, 1), strides=strides, name=conv_name_base + '1')(
input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = layers.add([x, shortcut])
x = Activation('relu')(x)
return x
示例15: preprocess_input
def preprocess_input(x, data_format=None, mode='caffe'):
"""Preprocesses a tensor encoding a batch of images.
Arguments:
x: input Numpy tensor, 4D.
data_format: data format of the image tensor.
mode: One of "caffe", "tf".
- caffe: will convert the images from RGB to BGR,
then will zero-center each color channel with
respect to the ImageNet dataset,
without scaling.
- tf: will scale pixels between -1 and 1,
sample-wise.
Returns:
Preprocessed tensor.
"""
if data_format is None:
data_format = K.image_data_format()
assert data_format in {'channels_last', 'channels_first'}
if mode == 'tf':
x /= 255.
x -= 0.5
x *= 2.
return x
if data_format == 'channels_first':
if x.ndim == 3:
# 'RGB'->'BGR'
x = x[::-1, ...]
# Zero-center by mean pixel
x[0, :, :] -= 103.939
x[1, :, :] -= 116.779
x[2, :, :] -= 123.68
else:
x = x[:, ::-1, ...]
x[:, 0, :, :] -= 103.939
x[:, 1, :, :] -= 116.779
x[:, 2, :, :] -= 123.68
else:
# 'RGB'->'BGR'
x = x[..., ::-1]
# Zero-center by mean pixel
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x