本文整理匯總了Python中tensorflow.keras.backend.exp方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.exp方法的具體用法?Python backend.exp怎麽用?Python backend.exp使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.exp方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _softmax
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def _softmax(x, axis=-1, alpha=1):
"""
building on keras implementation, with additional alpha parameter
Softmax activation function.
# Arguments
x : Tensor.
axis: Integer, axis along which the softmax normalization is applied.
alpha: a value to multiply all x
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
x = alpha * x
ndim = K.ndim(x)
if ndim == 2:
return K.softmax(x)
elif ndim > 2:
e = K.exp(x - K.max(x, axis=axis, keepdims=True))
s = K.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D')
示例2: sampling
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def sampling(args):
"""Reparameterization trick by sampling
fr an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
# K is the keras backend
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例3: sampling
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def sampling(args):
"""Implements reparameterization trick by sampling
from a gaussian with zero mean and std=1.
Arguments:
args (tensor): mean and log of variance of Q(z|X)
Returns:
sampled latent vector (tensor)
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例4: sampling
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def sampling(args):
"""Reparameterization trick by sampling
fr an isotropic unit Gaussian.
# Arguments:
args (tensor): mean and log of variance of Q(z|X)
# Returns:
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例5: mi_loss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def mi_loss(self, y_true, y_pred):
""" MINE loss function
Arguments:
y_true (tensor): Not used since this is
unsupervised learning
y_pred (tensor): stack of predictions for
joint T(x,y) and marginal T(x,y)
"""
size = self.args.batch_size
# lower half is pred for joint dist
pred_xy = y_pred[0: size, :]
# upper half is pred for marginal dist
pred_x_y = y_pred[size : y_pred.shape[0], :]
# implentation of MINE loss (Eq 13.7.3)
loss = K.mean(pred_xy) \
- K.log(K.mean(K.exp(pred_x_y)))
return -loss
示例6: convert_exp
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def convert_exp(node, params, layers, lambda_func, node_name, keras_name):
"""
Convert Exp layer
:param node: current operation node
:param params: operation attributes
:param layers: available keras layers
:param lambda_func: function for keras Lambda layer
:param node_name: resulting layer name
:return: None
"""
if len(node.input) != 1:
assert AttributeError('More than 1 input for log layer.')
input_0 = ensure_tf_type(layers[node.input[0]], name="%s_const" % keras_name)
def target_layer(x):
import tensorflow.keras.backend as K
return K.exp(x)
lambda_layer = keras.layers.Lambda(target_layer, name=keras_name)
layers[node_name] = lambda_layer(input_0)
lambda_func[keras_name] = target_layer
示例7: logistic
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def logistic(x, x0=0., alpha=1., L=1.):
"""
returns L/(1+exp(-alpha * (x-x0)))
"""
assert L > 0, 'L (height of logistic) should be > 0'
assert alpha > 0, 'alpha (slope) of logistic should be > 0'
return L / (1 + tf.exp(-alpha * (x-x0)))
示例8: softmax
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def softmax(x, axis):
"""
softmax of a numpy array along a given dimension
"""
return np.exp(x) / np.sum(np.exp(x), axis=axis, keepdims=True)
示例9: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def call(self, tensors, mask=None):
if self.homomorphic == True:
tensors = K.log(tensors)
x_dct = self._dct3D(tensors)
x_crop = self._cropping3D(x_dct)
x_idct = self._idct3D(x_crop)
if self.homomorphic == True:
x_idct = K.exp(x_idct)
return x_idct
示例10: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def call(self, x):
#The conditional probability of surviving each time interval (given that has survived to beginning of interval)
#is affected by the input data according to eq. 18.13 in Harrell F.,
#Regression Modeling Strategies 2nd ed. (available free online)
return K.pow(K.sigmoid(self.kernel), K.exp(x))
示例11: softplus2
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def softplus2(x):
"""
out = log(exp(x)+1) - log(2)
softplus function that is 0 at x=0, the implementation aims at avoiding overflow
Args:
x: (Tensor) input tensor
Returns:
(Tensor) output tensor
"""
return kb.relu(x) + kb.log(0.5*kb.exp(-kb.abs(x)) + 0.5)
示例12: yolo3_head
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def yolo3_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
示例13: yolo2_head
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def yolo2_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
#box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(grid_shape[..., ::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.softmax(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
示例14: gaussian_kernel
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import exp [as 別名]
def gaussian_kernel(sigma, windowsize=None, indexing='ij'):
"""
sigma will be a number of a list of numbers.
# some guidance from my MATLAB file
https://github.com/adalca/mivt/blob/master/src/gaussFilt.m
Parameters:
sigma: scalar or list of scalars
windowsize (optional): scalar or list of scalars indicating the shape of the kernel
Returns:
ND kernel the same dimensiosn as the number of sigmas.
Todo: could use MultivariateNormalDiag
"""
if not isinstance(sigma, (list, tuple)):
sigma = [sigma]
sigma = [np.maximum(f, np.finfo(float).eps) for f in sigma]
nb_dims = len(sigma)
# compute windowsize
if windowsize is None:
windowsize = [np.round(f * 3) * 2 + 1 for f in sigma]
if len(sigma) != len(windowsize):
raise ValueError('sigma and windowsize should have the same length.'
'Got vectors: ' + str(sigma) + 'and' + str(windowsize))
# ok, let's get to work.
mid = [(w - 1)/2 for w in windowsize]
# list of volume ndgrid
# N-long list, each entry of shape volshape
mesh = volshape_to_meshgrid(windowsize, indexing=indexing)
mesh = [tf.cast(f, 'float32') for f in mesh]
# compute independent gaussians
diff = [mesh[f] - mid[f] for f in range(len(windowsize))]
exp_term = [- K.square(diff[f])/(2 * (sigma[f]**2)) for f in range(nb_dims)]
norms = [exp_term[f] - np.log(sigma[f] * np.sqrt(2 * np.pi)) for f in range(nb_dims)]
# add an all-ones entry and transform into a large matrix
norms_matrix = tf.stack(norms, axis=-1) # *volshape x N
g = K.sum(norms_matrix, -1) # volshape
g = tf.exp(g)
g /= tf.reduce_sum(g)
return g