本文整理匯總了Python中tensorflow.keras.backend.square方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.square方法的具體用法?Python backend.square怎麽用?Python backend.square使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.square方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: huber_loss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def huber_loss(y_true, y_pred, clip_value):
# Huber loss, see https://en.wikipedia.org/wiki/Huber_loss and
# https://medium.com/@karpathy/yes-you-should-understand-backprop-e2f06eab496b
# for details.
assert clip_value > 0.
x = y_true - y_pred
if np.isinf(clip_value):
# Spacial case for infinity since Tensorflow does have problems
# if we compare `K.abs(x) < np.inf`.
return .5 * K.square(x)
condition = K.abs(x) < clip_value
squared_loss = .5 * K.square(x)
linear_loss = clip_value * (K.abs(x) - .5 * clip_value)
import tensorflow as tf
if hasattr(tf, 'select'):
return tf.select(condition, squared_loss, linear_loss) # condition, true, false
else:
return tf.where(condition, squared_loss, linear_loss) # condition, true, false
示例2: loss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def loss(self, y_true, y_pred):
if self.crop_indices is not None:
y_true = utils.batch_gather(y_true, self.crop_indices)
y_pred = utils.batch_gather(y_pred, self.crop_indices)
ksq = K.square(y_pred - y_true)
if self.vox_weights is not None:
if self.vox_weights == 'y_true':
ksq *= y_true
elif self.vox_weights == 'expy_true':
ksq *= tf.exp(y_true)
else:
ksq *= self.vox_weights
if self.weights is not None:
ksq *= self.weights
return K.mean(ksq)
示例3: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def call(self, inputs):
#To channels last
x = tf.transpose(inputs[0], [0, 3, 1, 2])
#Get weight and bias modulations
#Make sure w's shape is compatible with self.kernel
w = K.expand_dims(K.expand_dims(K.expand_dims(inputs[1], axis = 1), axis = 1), axis = -1)
#Add minibatch layer to weights
wo = K.expand_dims(self.kernel, axis = 0)
#Modulate
weights = wo * (w+1)
#Demodulate
if self.demod:
d = K.sqrt(K.sum(K.square(weights), axis=[1,2,3], keepdims = True) + 1e-8)
weights = weights / d
#Reshape/scale input
x = tf.reshape(x, [1, -1, x.shape[2], x.shape[3]]) # Fused => reshape minibatch to convolution groups.
w = tf.reshape(tf.transpose(weights, [1, 2, 3, 0, 4]), [weights.shape[1], weights.shape[2], weights.shape[3], -1])
x = tf.nn.conv2d(x, w,
strides=self.strides,
padding="SAME",
data_format="NCHW")
# Reshape/scale output.
x = tf.reshape(x, [-1, self.filters, x.shape[2], x.shape[3]]) # Fused => reshape convolution groups back to minibatch.
x = tf.transpose(x, [0, 2, 3, 1])
return x
示例4: gradient_penalty
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def gradient_penalty(samples, output, weight):
gradients = K.gradients(output, samples)[0]
gradients_sqr = K.square(gradients)
gradient_penalty = K.sum(gradients_sqr,
axis=np.arange(1, len(gradients_sqr.shape)))
# (weight / 2) * ||grad||^2
# Penalize the gradient norm
return K.mean(gradient_penalty) * weight
示例5: update
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def update(self, x):
if x.ndim == len(self.shape):
x = x.reshape(-1, *self.shape)
assert x.shape[1:] == self.shape
self._count += x.shape[0]
self._sum += np.sum(x, axis=0)
self._sumsq += np.sum(np.square(x), axis=0)
self.mean = self._sum / float(self._count)
self.std = np.sqrt(np.maximum(np.square(self.eps), self._sumsq / float(self._count) - np.square(self.mean)))
示例6: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def call(self, x):
return K.mean(K.batch_flatten(K.square(x[0] - x[1])), -1)
示例7: earth_movers_distance
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def earth_movers_distance(y_true, y_pred):
cdf_true = K.cumsum(y_true, axis=-1)
cdf_pred = K.cumsum(y_pred, axis=-1)
emd = K.sqrt(K.mean(K.square(cdf_true - cdf_pred), axis=-1))
return K.mean(emd)
示例8: correlation_coefficient_loss
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def correlation_coefficient_loss(y_true, y_pred):
x = y_true
y = y_pred
mx = K.mean(x)
my = K.mean(y)
xm, ym = x-mx, y-my
r_num = K.sum(tf.multiply(xm,ym))
r_den = K.sqrt(tf.multiply(K.sum(K.square(xm)), K.sum(K.square(ym))))
r = r_num / r_den
r = K.maximum(K.minimum(r, 1.0), -1.0)
return 1 - K.square(r)
示例9: angle_zyz_difference
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def angle_zyz_difference(ang1=np.zeros(3), ang2=np.zeros(3)):
loc1_r = np.zeros(ang1.shape)
loc2_r = np.zeros(ang2.shape)
rm1 = rotation_matrix_zyz(ang1)
rm2 = rotation_matrix_zyz(ang2)
loc1_r_t = np.array([loc1_r, loc1_r, loc1_r])
loc2_r_t = np.array([loc2_r, loc2_r, loc2_r])
dif_m = (rm1.dot(np.eye(3) - loc1_r_t)).transpose() - (rm2.dot(np.eye(3) - loc2_r_t)).transpose()
dif_d = math.sqrt(np.square(dif_m).sum())
return dif_d
示例10: _euclidian_dist
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def _euclidian_dist(self, x_pair: List[Tensor]) -> Tensor:
x1_norm = K.l2_normalize(x_pair[0], axis=1)
x2_norm = K.l2_normalize(x_pair[1], axis=1)
diff = x1_norm - x2_norm
square = K.square(diff)
_sum = K.sum(square, axis=1)
_sum = K.clip(_sum, min_value=1e-12, max_value=None)
dist = K.sqrt(_sum) / 2.
return dist
示例11: train_step
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def train_step(self, images, style, noise, perform_gp = True, perform_pl = False):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
#Get style information
w_space = []
pl_lengths = self.pl_mean
for i in range(len(style)):
w_space.append(self.GAN.S(style[i]))
#Generate images
generated_images = self.GAN.G(w_space + [noise])
#Discriminate
real_output = self.GAN.D(images, training=True)
fake_output = self.GAN.D(generated_images, training=True)
#Hinge loss function
gen_loss = K.mean(fake_output)
divergence = K.mean(K.relu(1 + real_output) + K.relu(1 - fake_output))
disc_loss = divergence
if perform_gp:
#R1 gradient penalty
disc_loss += gradient_penalty(images, real_output, 10)
if perform_pl:
#Slightly adjust W space
w_space_2 = []
for i in range(len(style)):
std = 0.1 / (K.std(w_space[i], axis = 0, keepdims = True) + 1e-8)
w_space_2.append(w_space[i] + K.random_normal(tf.shape(w_space[i])) / (std + 1e-8))
#Generate from slightly adjusted W space
pl_images = self.GAN.G(w_space_2 + [noise])
#Get distance after adjustment (path length)
delta_g = K.mean(K.square(pl_images - generated_images), axis = [1, 2, 3])
pl_lengths = delta_g
if self.pl_mean > 0:
gen_loss += K.mean(K.square(pl_lengths - self.pl_mean))
#Get gradients for respective areas
gradients_of_generator = gen_tape.gradient(gen_loss, self.GAN.GM.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, self.GAN.D.trainable_variables)
#Apply gradients
self.GAN.GMO.apply_gradients(zip(gradients_of_generator, self.GAN.GM.trainable_variables))
self.GAN.DMO.apply_gradients(zip(gradients_of_discriminator, self.GAN.D.trainable_variables))
return disc_loss, gen_loss, divergence, pl_lengths
示例12: dice
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def dice(self, y_true, y_pred):
"""
compute dice for given Tensors
"""
if self.crop_indices is not None:
y_true = utils.batch_gather(y_true, self.crop_indices)
y_pred = utils.batch_gather(y_pred, self.crop_indices)
if self.input_type == 'prob':
# We assume that y_true is probabilistic, but just in case:
if self.re_norm:
y_true = tf.div_no_nan(y_true, K.sum(y_true, axis=-1, keepdims=True))
y_true = K.clip(y_true, K.epsilon(), 1)
# make sure pred is a probability
if self.re_norm:
y_pred = tf.div_no_nan(y_pred, K.sum(y_pred, axis=-1, keepdims=True))
y_pred = K.clip(y_pred, K.epsilon(), 1)
# Prepare the volumes to operate on
# If we're doing 'hard' Dice, then we will prepare one-hot-based matrices of size
# [batch_size, nb_voxels, nb_labels], where for each voxel in each batch entry,
# the entries are either 0 or 1
if self.dice_type == 'hard':
# if given predicted probability, transform to "hard max""
if self.input_type == 'prob':
if self.approx_hard_max:
y_pred_op = _hard_max(y_pred, axis=-1)
y_true_op = _hard_max(y_true, axis=-1)
else:
y_pred_op = _label_to_one_hot(K.argmax(y_pred, axis=-1), self.nb_labels)
y_true_op = _label_to_one_hot(K.argmax(y_true, axis=-1), self.nb_labels)
# if given predicted label, transform to one hot notation
else:
assert self.input_type == 'max_label'
y_pred_op = _label_to_one_hot(y_pred, self.nb_labels)
y_true_op = _label_to_one_hot(y_true, self.nb_labels)
# If we're doing soft Dice, require prob output, and the data already is as we need it
# [batch_size, nb_voxels, nb_labels]
else:
assert self.input_type == 'prob', "cannot do soft dice with max_label input"
y_pred_op = y_pred
y_true_op = y_true
# reshape to [batch_size, nb_voxels, nb_labels]
batch_size = K.shape(y_true)[0]
y_pred_op = K.reshape(y_pred_op, [batch_size, -1, K.shape(y_true)[-1]])
y_true_op = K.reshape(y_true_op, [batch_size, -1, K.shape(y_true)[-1]])
# compute dice for each entry in batch.
# dice will now be [batch_size, nb_labels]
top = 2 * K.sum(y_true_op * y_pred_op, 1)
bottom = K.sum(K.square(y_true_op), 1) + K.sum(K.square(y_pred_op), 1)
# make sure we have no 0s on the bottom. K.epsilon()
bottom = K.maximum(bottom, self.area_reg)
return top / bottom
示例13: gaussian_kernel
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import square [as 別名]
def gaussian_kernel(sigma, windowsize=None, indexing='ij'):
"""
sigma will be a number of a list of numbers.
# some guidance from my MATLAB file
https://github.com/adalca/mivt/blob/master/src/gaussFilt.m
Parameters:
sigma: scalar or list of scalars
windowsize (optional): scalar or list of scalars indicating the shape of the kernel
Returns:
ND kernel the same dimensiosn as the number of sigmas.
Todo: could use MultivariateNormalDiag
"""
if not isinstance(sigma, (list, tuple)):
sigma = [sigma]
sigma = [np.maximum(f, np.finfo(float).eps) for f in sigma]
nb_dims = len(sigma)
# compute windowsize
if windowsize is None:
windowsize = [np.round(f * 3) * 2 + 1 for f in sigma]
if len(sigma) != len(windowsize):
raise ValueError('sigma and windowsize should have the same length.'
'Got vectors: ' + str(sigma) + 'and' + str(windowsize))
# ok, let's get to work.
mid = [(w - 1)/2 for w in windowsize]
# list of volume ndgrid
# N-long list, each entry of shape volshape
mesh = volshape_to_meshgrid(windowsize, indexing=indexing)
mesh = [tf.cast(f, 'float32') for f in mesh]
# compute independent gaussians
diff = [mesh[f] - mid[f] for f in range(len(windowsize))]
exp_term = [- K.square(diff[f])/(2 * (sigma[f]**2)) for f in range(nb_dims)]
norms = [exp_term[f] - np.log(sigma[f] * np.sqrt(2 * np.pi)) for f in range(nb_dims)]
# add an all-ones entry and transform into a large matrix
norms_matrix = tf.stack(norms, axis=-1) # *volshape x N
g = K.sum(norms_matrix, -1) # volshape
g = tf.exp(g)
g /= tf.reduce_sum(g)
return g