本文整理匯總了Python中tensorflow.keras.backend.reshape方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.reshape方法的具體用法?Python backend.reshape怎麽用?Python backend.reshape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.reshape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, x):
n = (self.win_length - 1) / 2.0
denom = n * (n + 1) * (2 * n + 1) / 3
if self.data_format == 'channels_first':
x = K.permute_dimensions(x, (0, 2, 3, 1))
x = tf.pad(x, tf.constant([[0, 0], [0, 0], [int(n), int(n)], [0, 0]]), mode=self.mode)
kernel = K.arange(-n, n + 1, 1, dtype=K.floatx())
kernel = K.reshape(kernel, (1, kernel.shape[-1], 1, 1)) # (freq, time)
x = K.conv2d(x, kernel, 1, data_format='channels_last') / denom
if self.data_format == 'channels_first':
x = K.permute_dimensions(x, (0, 3, 1, 2))
return x
示例2: split_heads_2d
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def split_heads_2d(self, ip):
tensor_shape = K.shape(ip)
# batch, height, width, channels for axis = -1
tensor_shape = [tensor_shape[i] for i in range(len(self._shape))]
batch = tensor_shape[0]
height = tensor_shape[1]
width = tensor_shape[2]
channels = tensor_shape[3]
# Save the spatial tensor dimensions
self._batch = batch
self._height = height
self._width = width
ret_shape = K.stack([batch, height, width, self.num_heads, channels // self.num_heads])
split = K.reshape(ip, ret_shape)
transpose_axes = (0, 3, 1, 2, 4)
split = K.permute_dimensions(split, transpose_axes)
return split
示例3: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, inputs):
"""
Parameters
inputs: volume of list with one volume
"""
# check shapes
if isinstance(inputs, (list, tuple)):
assert len(inputs) == 1, "inputs has to be len 1. found: %d" % len(inputs)
vol = inputs[0]
else:
vol = inputs
# necessary for multi_gpu models...
vol = K.reshape(vol, [-1, *self.inshape[1:]])
# map transform across batch
return tf.map_fn(self._single_resize, vol, dtype=tf.float32)
示例4: _single_batch_trf
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def _single_batch_trf(self, vol):
# vol should be vol_shape + [nb_features]
# self.trf should be vol_shape + [nb_features] + [ndims]
vol_shape = vol.shape.as_list()
nb_input_dims = vol_shape[-1]
# this is inefficient...
new_vols = [None] * self.output_features
for j in range(self.output_features):
new_vols[j] = tf.zeros(vol_shape[:-1], dtype=tf.float32)
for i in range(nb_input_dims):
trf_vol = transform(vol[..., i], self.trf[..., i, j, :] * self.trf_mult, interp_method=self.interp_method)
trf_vol = tf.reshape(trf_vol, vol_shape[:-1])
new_vols[j] += trf_vol * self.mult[..., i, j]
if self.use_bias:
new_vols[j] += self.bias[..., j]
return tf.stack(new_vols, -1)
示例5: channle_shuffle
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def channle_shuffle(inputs, group):
"""Shuffle the channel
Args:
inputs: 4D Tensor
group: int, number of groups
Returns:
Shuffled 4D Tensor
"""
#in_shape = inputs.get_shape().as_list()
h, w, in_channel = K.int_shape(inputs)[1:]
#h, w, in_channel = in_shape[1:]
assert(in_channel % group == 0)
l = K.reshape(inputs, [-1, h, w, in_channel // group, group])
l = K.permute_dimensions(l, [0, 1, 2, 4, 3])
l = K.reshape(l, [-1, h, w, in_channel])
return l
示例6: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, inputs):
def brelu(x):
# get shape of X, we are interested in the last axis, which is constant
shape = K.int_shape(x)
# last axis
dim = shape[-1]
# half of the last axis (+1 if necessary)
dim2 = dim // 2
if dim % 2 != 0:
dim2 += 1
# multiplier will be a tensor of alternated +1 and -1
multiplier = K.ones((dim2,))
multiplier = K.stack([multiplier, -multiplier], axis=-1)
if dim % 2 != 0:
multiplier = multiplier[:-1]
# adjust multiplier shape to the shape of x
multiplier = K.reshape(multiplier, tuple(1 for _ in shape[:-1]) + (-1,))
return multiplier * tf.nn.relu(multiplier * x)
return Lambda(brelu)(inputs)
示例7: relative_logits_1d
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def relative_logits_1d(self, q, rel_k, H, W, transpose_mask):
rel_logits = tf.einsum('bhxyd,md->bhxym', q, rel_k)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads * H, W, 2 * W - 1])
rel_logits = self.rel_to_abs(rel_logits)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H, W, W])
rel_logits = K.expand_dims(rel_logits, axis=3)
rel_logits = K.tile(rel_logits, [1, 1, 1, H, 1, 1])
rel_logits = K.permute_dimensions(rel_logits, transpose_mask)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H * W, H * W])
return rel_logits
示例8: rel_to_abs
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def rel_to_abs(self, x):
shape = K.shape(x)
shape = [shape[i] for i in range(3)]
B, Nh, L, = shape
col_pad = K.zeros(K.stack([B, Nh, L, 1]))
x = K.concatenate([x, col_pad], axis=3)
flat_x = K.reshape(x, [B, Nh, L * 2 * L])
flat_pad = K.zeros(K.stack([B, Nh, L - 1]))
flat_x_padded = K.concatenate([flat_x, flat_pad], axis=2)
final_x = K.reshape(flat_x_padded, [B, Nh, L + 1, 2 * L - 1])
final_x = final_x[:, :, :L, L - 1:]
return final_x
示例9: combine_heads_2d
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def combine_heads_2d(self, inputs):
# [batch, num_heads, height, width, depth_v // num_heads]
transposed = K.permute_dimensions(inputs, [0, 2, 3, 1, 4])
# [batch, height, width, num_heads, depth_v // num_heads]
shape = K.shape(transposed)
shape = [shape[i] for i in range(5)]
a, b = shape[-2:]
ret_shape = K.stack(shape[:-2] + [a * b])
# [batch, height, width, depth_v]
return K.reshape(transposed, ret_shape)
示例10: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, inputs):
X = inputs[0] # (batch_size, N, F)
A = inputs[1] # (batch_size, N, N)
E = inputs[2] # (n_edges, S) or (batch_size, N, N, S)
mode = ops.autodetect_mode(A, X)
if mode == modes.SINGLE:
return self._call_single(inputs)
# Parameters
N = K.shape(X)[-2]
F = K.int_shape(X)[-1]
F_ = self.channels
# Filter network
kernel_network = E
for l in self.kernel_network_layers:
kernel_network = l(kernel_network)
# Convolution
target_shape = (-1, N, N, F_, F) if mode == modes.BATCH else (N, N, F_, F)
kernel = K.reshape(kernel_network, target_shape)
output = kernel * A[..., None, None]
output = tf.einsum('abicf,aif->abc', output, X)
if self.root:
output += ops.dot(X, self.root_kernel)
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
示例11: _call_single
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def _call_single(self, inputs):
X = inputs[0] # (N, F)
A = inputs[1] # (N, N)
E = inputs[2] # (n_edges, S)
assert K.ndim(E) == 2, 'In single mode, E must have shape (n_edges, S).'
# Enforce sparse representation
if not K.is_sparse(A):
A = ops.dense_to_sparse(A)
# Parameters
N = tf.shape(X)[-2]
F = K.int_shape(X)[-1]
F_ = self.channels
# Filter network
kernel_network = E
for l in self.kernel_network_layers:
kernel_network = l(kernel_network) # (n_edges, F * F_)
target_shape = (-1, F, F_)
kernel = tf.reshape(kernel_network, target_shape)
# Propagation
index_i = A.indices[:, -2]
index_j = A.indices[:, -1]
messages = tf.gather(X, index_j)
messages = ops.dot(messages[:, None, :], kernel)[:, 0, :]
aggregated = ops.scatter_sum(messages, index_i, N)
# Update
output = aggregated
if self.root:
output += ops.dot(X, self.root_kernel)
if self.use_bias:
output = K.bias_add(output, self.bias)
if self.activation is not None:
output = self.activation(output)
return output
示例12: _single_aff_to_shift
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def _single_aff_to_shift(self, trf, volshape):
if len(trf.shape) == 1: # go from vector to matrix
trf = tf.reshape(trf, [self.ndims, self.ndims + 1])
# note this is unnecessarily extra graph since at every batch entry we have a tf.eye graph
trf += tf.eye(self.ndims+1)[:self.ndims,:] # add identity, hence affine is a shift from identitiy
return affine_to_shift(trf, volshape, shift_center=True)
示例13: build
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='mult-kernel',
shape=(np.prod(self.orig_input_shape),
self.output_len),
initializer=self.kernel_initializer,
trainable=True)
M = K.reshape(self.kernel, [-1, self.output_len]) # D x d
mt = K.transpose(M) # d x D
mtm_inv = tf.matrix_inverse(K.dot(mt, M)) # d x d
self.W = K.dot(mtm_inv, mt) # d x D
if self.use_bias:
self.bias = self.add_weight(name='bias-kernel',
shape=(self.output_len, ),
initializer=self.bias_initializer,
trainable=True)
# self.sigma_sq = self.add_weight(name='bias-kernel',
# shape=(1, ),
# initializer=self.initializer,
# trainable=True)
super(SpatiallySparse_Dense, self).build(input_shape) # Be sure to call this somewhere!
示例14: channel_shuffle_2
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def channel_shuffle_2(x):
dyn_shape = tf.shape(x)
h, w = dyn_shape[1], dyn_shape[2]
c = x.shape[3]
x = K.reshape(x, [-1, h, w, 2, c // 2])
x = K.permute_dimensions(x, [0, 1, 2, 4, 3])
x = K.reshape(x, [-1, h, w, c])
return x
示例15: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, x, **kwargs):
# (x - y)^2 = x^2 + y^2 - 2 * x * y
x_sq = K.expand_dims(K.sum(x ** 2, axis=2), axis=-1)
y_sq = K.reshape(K.sum(self.kernel ** 2, axis=1),
(1, 1, self.n_shapelets))
xy = K.dot(x, K.transpose(self.kernel))
return (x_sq + y_sq - 2 * xy) / K.int_shape(self.kernel)[1]