本文整理汇总了Python中keras.backend.permute_dimensions方法的典型用法代码示例。如果您正苦于以下问题:Python backend.permute_dimensions方法的具体用法?Python backend.permute_dimensions怎么用?Python backend.permute_dimensions使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.permute_dimensions方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def call(self, x):
mean = K.mean(x, axis=-1)
std = K.std(x, axis=-1)
if len(x.shape) == 3:
mean = K.permute_dimensions(
K.repeat(mean, x.shape.as_list()[-1]),
[0,2,1]
)
std = K.permute_dimensions(
K.repeat(std, x.shape.as_list()[-1]),
[0,2,1]
)
elif len(x.shape) == 2:
mean = K.reshape(
K.repeat_elements(mean, x.shape.as_list()[-1], 0),
(-1, x.shape.as_list()[-1])
)
std = K.reshape(
K.repeat_elements(mean, x.shape.as_list()[-1], 0),
(-1, x.shape.as_list()[-1])
)
return self._g * (x - mean) / (std + self._epsilon) + self._b
示例2: GenerateMCSamples
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def GenerateMCSamples(inp, layers, K_mc=20):
if K_mc == 1:
return apply_layers(inp, layers)
output_list = []
for _ in xrange(K_mc):
output_list += [apply_layers(inp, layers)] # THIS IS BAD!!! we create new dense layers at every call!!!!
def pack_out(output_list):
#output = K.pack(output_list) # K_mc x nb_batch x nb_classes
output = K.stack(output_list) # K_mc x nb_batch x nb_classes
return K.permute_dimensions(output, (1, 0, 2)) # nb_batch x K_mc x nb_classes
def pack_shape(s):
s = s[0]
assert len(s) == 2
return (s[0], K_mc, s[1])
out = Lambda(pack_out, output_shape=pack_shape)(output_list)
return out
# evaluation for classification tasks
示例3: crosschannelnormalization
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def crosschannelnormalization(alpha=1e-4, k=2, beta=0.75, n=5, **kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0, 2, 3, 1))
, (0, half))
extra_channels = K.permute_dimensions(extra_channels, (0, 3, 1, 2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:, i:i + ch, :, :]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape: input_shape, **kwargs)
示例4: crosschannelnormalization
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1))
, (0,half))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
示例5: ifft2
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def ifft2(x):
ff = x
ff = KB.permute_dimensions(ff, (0, 2, 1))
ff = KB.reshape(ff, (x.shape[0] *x.shape[2], x.shape[1]))
tf = ifft(ff)
tf = KB.reshape(tf, (x.shape[0], x.shape[2], x.shape[1]))
tf = KB.permute_dimensions(tf, (0, 2, 1))
tf = KB.reshape(tf, (x.shape[0] *x.shape[1], x.shape[2]))
tt = ifft(tf)
tt = KB.reshape(tt, (x.shape[0], x.shape[1], x.shape[2]))
return tt
#
# FFT Layers:
#
# FFT: Batched 1-D FFT (Input: (Batch, FeatureMaps, TimeSamples))
# IFFT: Batched 1-D IFFT (Input: (Batch, FeatureMaps, FreqSamples))
# FFT2: Batched 2-D FFT (Input: (Batch, FeatureMaps, TimeSamplesH, TimeSamplesW))
# IFFT2: Batched 2-D IFFT (Input: (Batch, FeatureMaps, FreqSamplesH, FreqSamplesW))
#
示例6: tf_normal
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def tf_normal(y_true, mu, sigma, pi):
rollout_length = K.shape(y_true)[1]
y_true = K.tile(y_true,(1,1,GAUSSIAN_MIXTURES))
y_true = K.reshape(y_true, [-1, rollout_length, GAUSSIAN_MIXTURES,Z_DIM])
oneDivSqrtTwoPI = 1 / math.sqrt(2*math.pi)
result = y_true - mu
# result = K.permute_dimensions(result, [2,1,0])
result = result * (1 / (sigma + 1e-8))
result = -K.square(result)/2
result = K.exp(result) * (1/(sigma + 1e-8))*oneDivSqrtTwoPI
result = result * pi
result = K.sum(result, axis=2) #### sum over gaussians
#result = K.prod(result, axis=2) #### multiply over latent dims
return result
示例7: _diffs
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def _diffs(self, y):
vol_shape = y.get_shape().as_list()[1:-1]
ndims = len(vol_shape)
df = [None] * ndims
for i in range(ndims):
d = i + 1
# permute dimensions to put the ith dimension first
r = [d, *range(d), *range(d + 1, ndims + 2)]
y = K.permute_dimensions(y, r)
dfi = y[1:, ...] - y[:-1, ...]
# permute back
# note: this might not be necessary for this loss specifically,
# since the results are just summed over anyway.
r = [*range(1, d + 1), 0, *range(d + 1, ndims + 2)]
df[i] = K.permute_dimensions(dfi, r)
return df
示例8: prec_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def prec_loss(self, y_pred):
"""
a more manual implementation of the precision matrix term
mu * P * mu where P = D - A
where D is the degree matrix and A is the adjacency matrix
mu * P * mu = 0.5 * sum_i mu_i sum_j (mu_i - mu_j) = 0.5 * sum_i,j (mu_i - mu_j) ^ 2
where j are neighbors of i
Note: could probably do with a difference filter,
but the edges would be complicated unless tensorflow allowed for edge copying
"""
vol_shape = y_pred.get_shape().as_list()[1:-1]
ndims = len(vol_shape)
sm = 0
for i in range(ndims):
d = i + 1
# permute dimensions to put the ith dimension first
r = [d, *range(d), *range(d + 1, ndims + 2)]
y = K.permute_dimensions(y_pred, r)
df = y[1:, ...] - y[:-1, ...]
sm += K.mean(df * df)
return 0.5 * sm / ndims
示例9: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例10: crosschannelnormalization
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def crosschannelnormalization(alpha = 1e-4, k=2, beta=0.75, n=5,**kwargs):
"""
This is the function used for cross channel normalization in the original
Alexnet
"""
def f(X):
b, ch, r, c = X.shape
half = n // 2
square = K.square(X)
extra_channels = K.spatial_2d_padding(K.permute_dimensions(square, (0,2,3,1)))
extra_channels = K.permute_dimensions(extra_channels, (0,3,1,2))
scale = k
for i in range(n):
scale += alpha * extra_channels[:,i:i+ch,:,:]
scale = scale ** beta
return X / scale
return Lambda(f, output_shape=lambda input_shape:input_shape,**kwargs)
示例11: _process_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def _process_input(self, x):
"""Apply logistic and softmax activations to input tensor
"""
logistic_activate = lambda x: 1.0/(1.0 + K.exp(-x))
(batch, w, h, channels) = x.get_shape()
x_temp = K.permute_dimensions(x, (3, 0, 1, 2))
x_t = []
for i in range(self.num):
k = self._entry_index(i, 0)
x_t.extend([
logistic_activate(K.gather(x_temp, (k, k + 1))), # 0
K.gather(x_temp, (k + 2, k + 3))])
if self.background:
x_t.append(K.gather(x_temp, (k + 4,)))
else:
x_t.append(logistic_activate(K.gather(x_temp, (k + 4,))))
x_t.append(
softmax(
K.gather(x_temp, tuple(range(k + 5, k + self.coords + self.classes + 1))),
axis=0))
x_t = K.concatenate(x_t, axis=0)
return K.permute_dimensions(x_t, (1, 2, 3, 0))
示例12: make_patches_grid
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def make_patches_grid(x, patch_size, patch_stride):
'''Break image `x` up into a grid of patches.
input shape: (channels, rows, cols)
output shape: (rows, cols, channels, patch_rows, patch_cols)
'''
from theano.tensor.nnet.neighbours import images2neibs # TODO: all K, no T
x = K.expand_dims(x, 0)
xs = K.shape(x)
num_rows = 1 + (xs[-2] - patch_size) // patch_stride
num_cols = 1 + (xs[-1] - patch_size) // patch_stride
num_channels = xs[-3]
patches = images2neibs(x,
(patch_size, patch_size), (patch_stride, patch_stride),
mode='valid')
# neibs are sorted per-channel
patches = K.reshape(patches, (num_channels, K.shape(patches)[0] // num_channels, patch_size, patch_size))
patches = K.permute_dimensions(patches, (1, 0, 2, 3))
# arrange in a 2d-grid (rows, cols, channels, px, py)
patches = K.reshape(patches, (num_rows, num_cols, num_channels, patch_size, patch_size))
patches_norm = K.sqrt(K.sum(K.square(patches), axis=(2,3,4), keepdims=True))
return patches, patches_norm
示例13: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule))
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
# final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule]
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule]
outputs = None
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule]
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(K.batch_dot(c, u_hat_vecs, [2, 2]))
if i < self.routings - 1:
b = K.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs
示例14: region_style_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def region_style_loss(style_image, target_image, style_mask, target_mask):
'''Calculate style loss between style_image and target_image,
for one common region specified by their (boolean) masks
'''
assert 3 == K.ndim(style_image) == K.ndim(target_image)
assert 2 == K.ndim(style_mask) == K.ndim(target_mask)
if K.image_data_format() == 'channels_first':
masked_style = style_image * style_mask
masked_target = target_image * target_mask
num_channels = K.shape(style_image)[0]
else:
masked_style = K.permute_dimensions(
style_image, (2, 0, 1)) * style_mask
masked_target = K.permute_dimensions(
target_image, (2, 0, 1)) * target_mask
num_channels = K.shape(style_image)[-1]
num_channels = K.cast(num_channels, dtype='float32')
s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels
c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels
return K.mean(K.square(s - c))
示例15: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import permute_dimensions [as 别名]
def call(self, u_vecs):
if self.share_weights:
u_hat_vecs = K.conv1d(u_vecs, self.W)
else:
u_hat_vecs = K.local_conv1d(u_vecs, self.W, [1], [1])
batch_size = K.shape(u_vecs)[0]
input_num_capsule = K.shape(u_vecs)[1]
u_hat_vecs = K.reshape(u_hat_vecs, (batch_size, input_num_capsule,
self.num_capsule, self.dim_capsule)) # noqa
u_hat_vecs = K.permute_dimensions(u_hat_vecs, (0, 2, 1, 3))
# final u_hat_vecs.shape = [None, num_capsule, input_num_capsule, dim_capsule] # noqa
b = K.zeros_like(u_hat_vecs[:, :, :, 0]) # shape = [None, num_capsule, input_num_capsule] # noqa
for i in range(self.routings):
b = K.permute_dimensions(b, (0, 2, 1)) # shape = [None, input_num_capsule, num_capsule] # noqa
c = K.softmax(b)
c = K.permute_dimensions(c, (0, 2, 1))
b = K.permute_dimensions(b, (0, 2, 1))
outputs = self.activation(tf.keras.backend.batch_dot(c, u_hat_vecs, [2, 2])) # noqa
if i < self.routings - 1:
b = tf.keras.backend.batch_dot(outputs, u_hat_vecs, [2, 3])
return outputs