本文整理汇总了Python中keras.backend.transpose方法的典型用法代码示例。如果您正苦于以下问题:Python backend.transpose方法的具体用法?Python backend.transpose怎么用?Python backend.transpose使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.transpose方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: acf_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def acf_loss(y_true, y_pred):
"""
Loss based on the autocorrelation of residuals (reduce sum). n_lags=10 (fixed)
"""
n_lags=5
lags = range(1,2)
residuals = (y_true - y_pred)
# acf = []
# for k in lags:
# mean = K.mean(residuals, axis=1, keepdims=True)
# autocorrelation_at_lag_k = K.square(K.sum((residuals[:,:-k] - mean) * (residuals[:,k:] - mean), axis=1) / \
# K.sum(K.square(residuals - mean), axis=1))
# acf.append(autocorrelation_at_lag_k)
# acf = K.transpose(K.tf.convert_to_tensor(acf))
mean = K.mean(residuals, axis=1, keepdims=True)
autocorrelation_at_lag_k = K.square(K.sum((residuals[:, :-1] - mean) * (residuals[:, 1:] - mean), axis=1) / \
K.sum(K.square(residuals - mean), axis=1))
return K.mean(autocorrelation_at_lag_k)
示例2: gram_matrix
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def gram_matrix(x):
"""
Computes the outer-product of the input tensor x.
Input
-----
- x: input tensor of shape (C x H x W)
Returns
-------
- x . x^T
Note that this can be computed efficiently if x is reshaped
as a tensor of shape (C x H*W).
"""
# assert K.ndim(x) == 3
if K.image_dim_ordering() == 'th':
features = K.batch_flatten(x)
else:
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
return K.dot(features, K.transpose(features))
示例3: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def call(self, input_tensor, mask=None):
x = input_tensor[0]
y = input_tensor[1]
mask = mask[0]
y = K.transpose(K.dot(self.W, K.transpose(y)))
y = K.expand_dims(y, axis=-2)
y = K.repeat_elements(y, self.steps, axis=1)
eij = K.sum(x * y, axis=-1)
if self.bias:
b = K.repeat_elements(self.b, self.steps, axis=0)
eij += b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
return a
示例4: deprocess_image
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def deprocess_image(x):
if K.image_data_format() == 'channels_first':
x = x.reshape((3, img_nrows, img_ncols))
x = x.transpose((1, 2, 0))
else:
x = x.reshape((img_nrows, img_ncols, 3))
# Remove zero-center by mean pixel
x[:, :, 0] += 103.939
x[:, :, 1] += 116.779
x[:, :, 2] += 123.68
# 'BGR'->'RGB'
x = x[:, :, ::-1]
x = np.clip(x, 0, 255).astype('uint8')
return x
# get tensor representations of our images
示例5: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def call(self, input_tensor, mask=None):
x = input_tensor[0]
aspect = input_tensor[1]
mask = mask[0]
aspect = K.transpose(K.dot(self.W, K.transpose(aspect)))
aspect = K.expand_dims(aspect, axis=-2)
aspect = K.repeat_elements(aspect, self.steps, axis=1)
eij = K.sum(x*aspect, axis=-1)
if self.bias:
b = K.repeat_elements(self.b, self.steps, axis=0)
eij += b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
return a
示例6: build
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def build(self, input_shape):
assert len(input_shape) >= 2
input_dim = input_shape[-1]
if self.transpose:
self.kernel = K.transpose(self.tie_to.kernel)
else:
self.kernel = self.tie_to.kernel
if self.use_bias:
self.bias = self.add_weight(shape=(self.units,),
initializer=self.bias_initializer,
name='bias',
regularizer=self.bias_regularizer,
constraint=self.bias_constraint,
trainable=self.trainable)
else:
self.bias = None
self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})
self.built = True
示例7: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def __init__(self,
vocab_size,
sequence_size,
setting=None,
checkpoint_path="",
temperature=10,
tying=False):
super().__init__(vocab_size, sequence_size, setting, checkpoint_path)
self.temperature = temperature
self.tying = tying
self.gamma = self.setting.gamma
if tying:
self.model.pop() # remove activation
self.model.pop() # remove projection (use self embedding)
self.model.add(Lambda(lambda x: K.dot(x, K.transpose(self.embedding.embeddings))))
self.model.add(Activation("softmax"))
示例8: augmented_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def augmented_loss(self, y_true, y_pred):
_y_pred = Activation("softmax")(y_pred)
loss = K.categorical_crossentropy(_y_pred, y_true)
# y is (batch x seq x vocab)
y_indexes = K.argmax(y_true, axis=2) # turn one hot to index. (batch x seq)
y_vectors = self.embedding(y_indexes) # lookup the vector (batch x seq x vector_length)
#v_length = self.setting.vector_length
#y_vectors = K.reshape(y_vectors, (-1, v_length))
#y_t = K.map_fn(lambda v: K.dot(self.embedding.embeddings, K.reshape(v, (-1, 1))), y_vectors)
#y_t = K.squeeze(y_t, axis=2) # unknown but necessary operation
#y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size))
# vector x embedding dot products (batch x seq x vocab)
y_t = tf.tensordot(y_vectors, K.transpose(self.embedding.embeddings), 1)
y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size)) # explicitly set shape
y_t = K.softmax(y_t / self.temperature)
_y_pred_t = Activation("softmax")(y_pred / self.temperature)
aug_loss = kullback_leibler_divergence(y_t, _y_pred_t)
loss += (self.gamma * self.temperature) * aug_loss
return loss
示例9: gram_matrix
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def gram_matrix(x):
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
gram = K.dot(features, K.transpose(features))
return gram
示例10: rbf_moment_matching
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def rbf_moment_matching(y_true, y_pred, sigmas=[2, 5, 10, 20, 40, 80]):
"""Generative moment matching loss with RBF kernel.
Reference: https://arxiv.org/abs/1502.02761
"""
warnings.warn('Moment matching loss is still in development.')
if len(K.int_shape(y_pred)) != 2 or len(K.int_shape(y_true)) != 2:
raise ValueError('RBF Moment Matching function currently only works '
'for outputs with shape (batch_size, num_features).'
'Got y_true="%s" and y_pred="%s".' %
(str(K.int_shape(y_pred)), str(K.int_shape(y_true))))
sigmas = list(sigmas) if isinstance(sigmas, (list, tuple)) else [sigmas]
x = K.concatenate([y_pred, y_true], 0)
# Performs dot product between all combinations of rows in X.
xx = K.dot(x, K.transpose(x)) # (batch_size, batch_size)
# Performs dot product of all rows with themselves.
x2 = K.sum(x * x, 1, keepdims=True) # (batch_size, None)
# Gets exponent entries of the RBF kernel (without sigmas).
exponent = xx - 0.5 * x2 - 0.5 * K.transpose(x2)
# Applies all the sigmas.
total_loss = None
for sigma in sigmas:
kernel_val = K.exp(exponent / sigma)
loss = K.sum(kernel_val)
total_loss = loss if total_loss is None else loss + total_loss
return total_loss
示例11: yolo_head
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def yolo_head(feats, anchors, num_classes, n):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
conv_dims = K.shape(feats)[1:3] # assuming channels last
# In YOLO the height index is the inner most iteration.
conv_height_index = K.arange(0, stop=conv_dims[0])
conv_width_index = K.arange(0, stop=conv_dims[1])
conv_height_index = K.tile(conv_height_index, [conv_dims[1]])
conv_width_index = K.tile(K.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
conv_width_index = K.flatten(K.transpose(conv_width_index))
conv_index = K.transpose(K.stack([conv_height_index, conv_width_index]))
conv_index = K.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
conv_index = K.cast(conv_index, K.dtype(feats))
feats = K.reshape(feats, [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]), K.dtype(feats))
box_xy = K.sigmoid(feats[..., :2])
box_wh = K.exp(feats[..., 2:4])
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
# Adjust preditions to each spatial grid point and anchor size.
# Note: YOLO iterates over height index before width index.
# TODO: It works with +1, don't know why.
box_xy = (box_xy + conv_index + 1) / conv_dims
# TODO: Input layer size
box_wh = box_wh * anchors_tensor / conv_dims / {0:32, 1:16, 2:8}[n]
return [box_xy, box_wh, box_confidence, box_class_probs]
示例12: grammian_matrix
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def grammian_matrix(matrix):
flattened_matrix = K.batch_flatten(K.permute_dimensions(matrix, (2, 0, 1)))
matrix_transpose_dot = K.dot(flattened_matrix, K.transpose(flattened_matrix))
element_count = matrix.get_shape().num_elements()
return matrix_transpose_dot / element_count
示例13: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def call(self, inputs, training=None):
def _l2normalize(v, eps=1e-12):
return v / (K.sum(v ** 2) ** 0.5 + eps)
def power_iteration(W, u):
_u = u
_v = _l2normalize(K.dot(_u, K.transpose(W)))
_u = _l2normalize(K.dot(_v, W))
return _u, _v
W_shape = self.kernel.shape.as_list()
#Flatten the Tensor
W_reshaped = K.reshape(self.kernel, [-1, W_shape[-1]])
_u, _v = power_iteration(W_reshaped, self.u)
#Calculate Sigma
sigma=K.dot(_v, W_reshaped)
sigma=K.dot(sigma, K.transpose(_u))
#normalize it
W_bar = W_reshaped / sigma
#reshape weight tensor
if training in {0, False}:
W_bar = K.reshape(W_bar, W_shape)
else:
with tf.control_dependencies([self.u.assign(_u)]):
W_bar = K.reshape(W_bar, W_shape)
output = K.dot(inputs, W_bar)
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.activation is not None:
output = self.activation(output)
return output
示例14: power_iteration
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def power_iteration(self, u, W):
'''
Accroding the paper, we only need to do power iteration one time.
'''
v = self._l2normalize(K.dot(u, K.transpose(W)))
u = self._l2normalize(K.dot(v, W))
return u, v
示例15: gram_matrix
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import transpose [as 别名]
def gram_matrix(x, norm_by_channels=False):
'''
Returns the Gram matrix of the tensor x.
'''
if K.ndim(x) == 3:
features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
shape = K.shape(x)
C, H, W = shape[0], shape[1], shape[2]
gram = K.dot(features, K.transpose(features))
elif K.ndim(x) == 4:
# Swap from (H, W, C) to (B, C, H, W)
x = K.permute_dimensions(x, (0, 3, 1, 2))
shape = K.shape(x)
B, C, H, W = shape[0], shape[1], shape[2], shape[3]
# Reshape as a batch of 2D matrices with vectorized channels
features = K.reshape(x, K.stack([B, C, H*W]))
# This is a batch of Gram matrices (B, C, C).
gram = K.batch_dot(features, features, axes=2)
else:
raise ValueError('The input tensor should be either a 3d (H, W, C) or 4d (B, H, W, C) tensor.')
# Normalize the Gram matrix
if norm_by_channels:
denominator = C * H * W # Normalization from Johnson
else:
denominator = H * W # Normalization from Google
gram = gram / K.cast(denominator, x.dtype)
return gram