本文整理汇总了Python中keras.backend.exp方法的典型用法代码示例。如果您正苦于以下问题:Python backend.exp方法的具体用法?Python backend.exp怎么用?Python backend.exp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.exp方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def call(self, x, mask=None):
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
ai = ai * mask
att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
示例2: build_encoder
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def build_encoder(self):
# Encoder
img = Input(shape=self.img_shape)
h = Flatten()(img)
h = Dense(512)(h)
h = LeakyReLU(alpha=0.2)(h)
h = Dense(512)(h)
h = LeakyReLU(alpha=0.2)(h)
mu = Dense(self.latent_dim)(h)
log_var = Dense(self.latent_dim)(h)
latent_repr = merge([mu, log_var],
mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
output_shape=lambda p: p[0])
return Model(img, latent_repr)
示例3: sampling
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def sampling(args: tuple):
"""
Reparameterization trick by sampling z from unit Gaussian
:param args: (tensor, tensor) mean and log of variance of q(z|x)
:returns tensor: sampled latent vector z
"""
# unpack the input tuple
z_mean, z_log_var = args
# mini-batch size
mb_size = K.shape(z_mean)[0]
# latent space size
dim = K.int_shape(z_mean)[1]
# random normal vector with mean=0 and std=1.0
epsilon = K.random_normal(shape=(mb_size, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例4: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def call(self, x, mask=None):
uit = dot_product(x, self.W)
if self.bias:
uit += self.b
uit = K.tanh(uit)
ait = dot_product(uit, self.u)
a = K.exp(ait)
# apply mask after the exp. will be re-normalized next
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
a *= K.cast(mask, K.floatx())
# in some cases especially in the early stages of training the sum may be almost zero
# and this results in NaN's. A workaround is to add a very small positive number ε to the sum.
# a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
a = K.expand_dims(a)
weighted_input = x * a
return K.sum(weighted_input, axis=1)
示例5: gen_cosine_amp
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def gen_cosine_amp(amp=100, period=1000, x0=0, xn=50000, step=1, k=0.0001):
"""Generates an absolute cosine time series with the amplitude
exponentially decreasing
Arguments:
amp: amplitude of the cosine function
period: period of the cosine function
x0: initial x of the time series
xn: final x of the time series
step: step of the time series discretization
k: exponential rate
"""
cos = np.zeros(((xn - x0) * step, 1, 1))
for i in range(len(cos)):
idx = x0 + i * step
cos[i, 0, 0] = amp * np.cos(2 * np.pi * idx / period)
cos[i, 0, 0] = cos[i, 0, 0] * np.exp(-k * idx)
return cos
示例6: sampling
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def sampling(self, args):
"""Reparametrisation by sampling from Gaussian, N(0,I)
To sample from epsilon = Norm(0,I) instead of from likelihood Q(z|X)
with latent variables z: z = z_mean + sqrt(var) * epsilon
Parameters
----------
args : tensor
Mean and log of variance of Q(z|X).
Returns
-------
z : tensor
Sampled latent variable.
"""
z_mean, z_log = args
batch = K.shape(z_mean)[0] # batch size
dim = K.int_shape(z_mean)[1] # latent dimension
epsilon = K.random_normal(shape=(batch, dim)) # mean=0, std=1.0
return z_mean + K.exp(0.5 * z_log) * epsilon
示例7: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def call(self, x, mask=None):
eij = dot_product(x, self.W)
if self.bias:
eij += self.b
eij = K.tanh(eij)
a = K.exp(eij)
if mask is not None:
a *= K.cast(mask, K.floatx())
a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())
weighted_input = x * K.expand_dims(a)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, a]
return result
示例8: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def call(self, x, mask=None):
# size of x :[batch_size, sel_len, attention_dim]
# size of u :[batch_size, attention_dim]
# uit = tanh(xW+b)
uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
ait = K.dot(uit, self.u)
ait = K.squeeze(ait, -1)
ait = K.exp(ait)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
ait *= K.cast(mask, K.floatx())
ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
ait = K.expand_dims(ait)
weighted_input = x * ait
output = K.sum(weighted_input, axis=1)
return output
示例9: rbf_moment_matching
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def rbf_moment_matching(y_true, y_pred, sigmas=[2, 5, 10, 20, 40, 80]):
"""Generative moment matching loss with RBF kernel.
Reference: https://arxiv.org/abs/1502.02761
"""
warnings.warn('Moment matching loss is still in development.')
if len(K.int_shape(y_pred)) != 2 or len(K.int_shape(y_true)) != 2:
raise ValueError('RBF Moment Matching function currently only works '
'for outputs with shape (batch_size, num_features).'
'Got y_true="%s" and y_pred="%s".' %
(str(K.int_shape(y_pred)), str(K.int_shape(y_true))))
sigmas = list(sigmas) if isinstance(sigmas, (list, tuple)) else [sigmas]
x = K.concatenate([y_pred, y_true], 0)
# Performs dot product between all combinations of rows in X.
xx = K.dot(x, K.transpose(x)) # (batch_size, batch_size)
# Performs dot product of all rows with themselves.
x2 = K.sum(x * x, 1, keepdims=True) # (batch_size, None)
# Gets exponent entries of the RBF kernel (without sigmas).
exponent = xx - 0.5 * x2 - 0.5 * K.transpose(x2)
# Applies all the sigmas.
total_loss = None
for sigma in sigmas:
kernel_val = K.exp(exponent / sigma)
loss = K.sum(kernel_val)
total_loss = loss if total_loss is None else loss + total_loss
return total_loss
示例10: exp_l1
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def exp_l1(a, b):
"""Exponential of L1 similarity. Maximum is 1 (a == b), minimum is 0."""
return K.exp(l1(a, b))
示例11: exp_l2
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def exp_l2(a, b):
"""Exponential of L2 similarity. Maximum is 1 (a == b), minimum is 0."""
return K.exp(l2(a, b))
示例12: exponent_neg_manhattan_distance
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def exponent_neg_manhattan_distance(left, right):
'''
Purpose : Helper function for the similarity estimate of the LSTMs outputs
Inputs : Two n-dimensional vectors
Output : Manhattan distance between the input vectors
'''
return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))
# Applying the pre-processing function on the combined text corpus
示例13: exponent_neg_manhattan_distance
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def exponent_neg_manhattan_distance(left, right):
'''
Purpose : Helper function for the similarity estimate of the LSTMs outputs
Inputs : Two n-dimensional vectors
Output : Manhattan distance between the input vectors
'''
return K.exp(-K.sum(K.abs(left-right), axis=1, keepdims=True))
#print("\n Helper functions loaded")
# Based on the training set, a keep list of common dot words was prepared
示例14: softmax
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def softmax(x, axis=-1):
"""
Self-defined softmax function
"""
x = K.exp(x - K.max(x, axis=axis, keepdims=True))
x /= K.sum(x, axis=axis, keepdims=True)
return x
示例15: rbf
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import exp [as 别名]
def rbf(x):
#This is not really a radial basis function, but it is similar
return K.exp(-K.square(x))