本文整理汇总了Python中keras.backend.var方法的典型用法代码示例。如果您正苦于以下问题:Python backend.var方法的具体用法?Python backend.var怎么用?Python backend.var使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.var方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: categorical_crossentropy_and_variance
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import var [as 别名]
def categorical_crossentropy_and_variance(y_true, y_pred):
return K.categorical_crossentropy(y_true, y_pred) + 10 * K.var(K.mean(y_pred, axis=0))
示例2: __call__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import var [as 别名]
def __call__(self, y_true, y_pred):
# There are additional parameters for this function
# Note: some of the 'modes' for edge behavior do not yet have a
# gradient definition in the Theano tree
# and cannot be used for learning
kernel = [self.kernel_size, self.kernel_size]
y_true = K.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
y_pred = K.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))
patches_pred = KC.extract_image_patches(y_pred, kernel, kernel, 'valid',
self.dim_ordering)
patches_true = KC.extract_image_patches(y_true, kernel, kernel, 'valid',
self.dim_ordering)
# Reshape to get the var in the cells
bs, w, h, c1, c2, c3 = self.__int_shape(patches_pred)
patches_pred = K.reshape(patches_pred, [-1, w, h, c1 * c2 * c3])
patches_true = K.reshape(patches_true, [-1, w, h, c1 * c2 * c3])
# Get mean
u_true = K.mean(patches_true, axis=-1)
u_pred = K.mean(patches_pred, axis=-1)
# Get variance
var_true = K.var(patches_true, axis=-1)
var_pred = K.var(patches_pred, axis=-1)
# Get std dev
covar_true_pred = K.mean(patches_true * patches_pred, axis=-1) - u_true * u_pred
ssim = (2 * u_true * u_pred + self.c1) * (2 * covar_true_pred + self.c2)
denom = ((K.square(u_true)
+ K.square(u_pred)
+ self.c1) * (var_pred + var_true + self.c2))
ssim /= denom # no need for clipping, c1 and c2 make the denom non-zero
return K.mean((1.0 - ssim) / 2.0)
示例3: img_normalization
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import var [as 别名]
def img_normalization(img_input, m0=0.0, var0=1.0):
m = K.mean(img_input, axis=[1,2,3], keepdims=True)
var = K.var(img_input, axis=[1,2,3], keepdims=True)
after = K.sqrt(var0*K.tf.square(img_input-m)/var)
image_n = K.tf.where(K.tf.greater(img_input, m), m0+after, m0-after)
return image_n
# atan2 function
示例4: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import var [as 别名]
def call(self, inputs, **kwargs):
input_shape = K.int_shape(inputs)
tensor_input_shape = K.shape(inputs)
# Prepare broadcasting shape.
reduction_axes = list(range(len(input_shape)))
del reduction_axes[self.axis]
broadcast_shape = [1] * len(input_shape)
broadcast_shape[self.axis] = input_shape[self.axis] // self.groups
broadcast_shape.insert(1, self.groups)
reshape_group_shape = K.shape(inputs)
group_axes = [reshape_group_shape[i] for i in range(len(input_shape))]
group_axes[self.axis] = input_shape[self.axis] // self.groups
group_axes.insert(1, self.groups)
# reshape inputs to new group shape
group_shape = [group_axes[0], self.groups] + group_axes[2:]
group_shape = K.stack(group_shape)
inputs = K.reshape(inputs, group_shape)
group_reduction_axes = list(range(len(group_axes)))
group_reduction_axes = group_reduction_axes[2:]
mean = K.mean(inputs, axis=group_reduction_axes, keepdims=True)
variance = K.var(inputs, axis=group_reduction_axes, keepdims=True)
inputs = (inputs - mean) / (K.sqrt(variance + self.epsilon))
# prepare broadcast shape
inputs = K.reshape(inputs, group_shape)
outputs = inputs
# In this case we must explicitly broadcast all parameters.
if self.scale:
broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
outputs = outputs * broadcast_gamma
if self.center:
broadcast_beta = K.reshape(self.beta, broadcast_shape)
outputs = outputs + broadcast_beta
outputs = K.reshape(outputs, tensor_input_shape)
return outputs
示例5: __call__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import var [as 别名]
def __call__(self, y_true, y_pred):
""" Call the DSSIM Loss Function.
Parameters
----------
y_true: tensor or variable
The ground truth value
y_pred: tensor or variable
The predicted value
Returns
-------
tensor
The DSSIM Loss value
Notes
-----
There are additional parameters for this function. some of the 'modes' for edge behavior
do not yet have a gradient definition in the Theano tree and cannot be used for learning
"""
kernel = [self.kernel_size, self.kernel_size]
y_true = K.reshape(y_true, [-1] + list(self.__int_shape(y_pred)[1:]))
y_pred = K.reshape(y_pred, [-1] + list(self.__int_shape(y_pred)[1:]))
patches_pred = self.extract_image_patches(y_pred,
kernel,
kernel,
'valid',
self.dim_ordering)
patches_true = self.extract_image_patches(y_true,
kernel,
kernel,
'valid',
self.dim_ordering)
# Get mean
u_true = K.mean(patches_true, axis=-1)
u_pred = K.mean(patches_pred, axis=-1)
# Get variance
var_true = K.var(patches_true, axis=-1)
var_pred = K.var(patches_pred, axis=-1)
# Get standard deviation
covar_true_pred = K.mean(
patches_true * patches_pred, axis=-1) - u_true * u_pred
ssim = (2 * u_true * u_pred + self.c_1) * (
2 * covar_true_pred + self.c_2)
denom = (K.square(u_true) + K.square(u_pred) + self.c_1) * (
var_pred + var_true + self.c_2)
ssim /= denom # no need for clipping, c_1 + c_2 make the denorm non-zero
return K.mean((1.0 - ssim) / 2.0)