本文整理汇总了Python中keras.backend.abs方法的典型用法代码示例。如果您正苦于以下问题:Python backend.abs方法的具体用法?Python backend.abs怎么用?Python backend.abs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.abs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: rpn_loss_regr
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def rpn_loss_regr(num_anchors):
def rpn_loss_regr_fixed_num(y_true, y_pred):
if K.image_dim_ordering() == 'th':
x = y_true[:, 4 * num_anchors:, :, :] - y_pred
x_abs = K.abs(x)
x_bool = K.less_equal(x_abs, 1.0)
return lambda_rpn_regr * K.sum(
y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
else:
x = y_true[:, :, :, 4 * num_anchors:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
return lambda_rpn_regr * K.sum(
y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
return rpn_loss_regr_fixed_num
示例2: optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def optimizer(self):
a = K.placeholder(shape=(None,), dtype='int32')
y = K.placeholder(shape=(None,), dtype='float32')
prediction = self.model.output
a_one_hot = K.one_hot(a, self.action_size)
q_value = K.sum(prediction * a_one_hot, axis=1)
error = K.abs(y - q_value)
quadratic_part = K.clip(error, 0.0, 1.0)
linear_part = error - quadratic_part
loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)
optimizer = RMSprop(lr=0.00025, epsilon=0.01)
updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
train = K.function([self.model.input, a, y], [loss], updates=updates)
return train
# 상태가 입력, 큐함수가 출력인 인공신경망 생성
示例3: optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def optimizer(self):
a = K.placeholder(shape=(None, ), dtype='int32')
y = K.placeholder(shape=(None, ), dtype='float32')
py_x = self.model.output
a_one_hot = K.one_hot(a, self.action_size)
q_value = K.sum(py_x * a_one_hot, axis=1)
error = K.abs(y - q_value)
quadratic_part = K.clip(error, 0.0, 1.0)
linear_part = error - quadratic_part
loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)
optimizer = RMSprop(lr=0.00025, epsilon=0.01)
updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
train = K.function([self.model.input, a, y], [loss], updates=updates)
return train
# approximate Q function using Convolution Neural Network
# state is input and Q Value of each action is output of network
示例4: optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def optimizer(self):
a = K.placeholder(shape=(None,), dtype='int32')
y = K.placeholder(shape=(None,), dtype='float32')
py_x = self.model.output
a_one_hot = K.one_hot(a, self.action_size)
q_value = K.sum(py_x * a_one_hot, axis=1)
error = K.abs(y - q_value)
quadratic_part = K.clip(error, 0.0, 1.0)
linear_part = error - quadratic_part
loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)
optimizer = RMSprop(lr=0.00025, epsilon=0.01)
updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
train = K.function([self.model.input, a, y], [loss], updates=updates)
return train
# approximate Q function using Convolution Neural Network
# state is input and Q Value of each action is output of network
示例5: optimizer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def optimizer(self):
a = K.placeholder(shape=(None, ), dtype='int32')
y = K.placeholder(shape=(None, ), dtype='float32')
py_x = self.model.output
a_one_hot = K.one_hot(a, self.action_size)
q_value = K.sum(py_x * a_one_hot, axis=1)
error = K.abs(y - q_value)
quadratic_part = K.clip(error, 0.0, 1.0)
linear_part = error - quadratic_part
loss = K.mean(0.5 * K.square(quadratic_part) + linear_part)
optimizer = RMSprop(lr=0.00025, epsilon=0.01)
updates = optimizer.get_updates(self.model.trainable_weights, [], loss)
train = K.function([self.model.input, a, y], [loss], updates=updates)
return train
# approximate Q function using Convolution Neural Network
# state is input and Q Value of each action is output of network
# dueling network's Q Value is sum of advantages and state value
示例6: rpn_loss_regr
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def rpn_loss_regr(num_anchors):
def rpn_loss_regr_fixed_num(y_true, y_pred):
if K.image_dim_ordering() == 'th':
x = y_true[:, 4 * num_anchors:, :, :] - y_pred
x_abs = K.abs(x)
x_bool = K.less_equal(x_abs, 1.0)
return lambda_rpn_regr * K.sum(
y_true[:, :4 * num_anchors, :, :] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :4 * num_anchors, :, :])
else:
x = y_true[:, :, :, 4 * num_anchors:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), tf.float32)
return lambda_rpn_regr * K.sum(
y_true[:, :, :, :4 * num_anchors] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :, :4 * num_anchors])
return rpn_loss_regr_fixed_num
示例7: count
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def count(audio, model, scaler):
# compute STFT
X = np.abs(librosa.stft(audio, n_fft=400, hop_length=160)).T
# apply global (featurewise) standardization to mean1, var0
X = scaler.transform(X)
# cut to input shape length (500 frames x 201 STFT bins)
X = X[:500, :]
# apply l2 normalization
Theta = np.linalg.norm(X, axis=1) + eps
X /= np.mean(Theta)
# add sample dimension
X = X[np.newaxis, ...]
if len(model.input_shape) == 4:
X = X[:, np.newaxis, ...]
ys = model.predict(X, verbose=0)
return np.argmax(ys, axis=1)[0]
示例8: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def call(self, x, mask=None):
# ensure the the right part is always to the right of the left
t_right_actual = self.t_left + K.abs(self.t_right)
if K.backend() == 'theano':
t_left = K.pattern_broadcast(self.t_left, self.param_broadcast)
a_left = K.pattern_broadcast(self.a_left, self.param_broadcast)
a_right = K.pattern_broadcast(self.a_right, self.param_broadcast)
t_right_actual = K.pattern_broadcast(t_right_actual,
self.param_broadcast)
else:
t_left = self.t_left
a_left = self.a_left
a_right = self.a_right
y_left_and_center = t_left + K.relu(x - t_left,
a_left,
t_right_actual - t_left)
y_right = K.relu(x - t_right_actual) * a_right
return y_left_and_center + y_right
示例9: online_bootstrapping
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
""" Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
see https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
The implementation is a bit different as we use binary crossentropy instead of softmax
SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
# Arguments
y_true: A tensor with labels.
y_pred: A tensor with predicted probabilites.
pixels: number of hard pixels to keep
threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
# Returns
Mean loss value
"""
y_true = K.flatten(y_true)
y_pred = K.flatten(y_pred)
difference = K.abs(y_true - y_pred)
values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
min_difference = (1 - threshold)
y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))
return K.mean(K.binary_crossentropy(y_true, y_pred))
示例10: class_loss_regr
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def class_loss_regr(num_classes):
def class_loss_regr_fixed_num(y_true, y_pred):
x = y_true[:, :, 4*num_classes:] - y_pred
x_abs = K.abs(x)
x_bool = K.cast(K.less_equal(x_abs, 1.0), 'float32')
return lambda_cls_regr * K.sum(y_true[:, :, :4*num_classes] * (x_bool * (0.5 * x * x) + (1 - x_bool) * (x_abs - 0.5))) / K.sum(epsilon + y_true[:, :, :4*num_classes])
return class_loss_regr_fixed_num
示例11: iou
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def iou(y_true, y_pred, smooth = 100):
intersection = K.sum(K.abs(y_true * y_pred), axis=-1)
union = K.sum(y_true,-1) + K.sum(y_pred,-1) - intersection
#sum_ = K.sum(K.abs(y_true) + K.abs(y_pred), axis=-1)
iou_acc = (intersection + smooth) / (union + smooth)
return iou_acc
示例12: smooth_l1_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def smooth_l1_loss(y_true, y_pred):
"""Implements Smooth-L1 loss.
y_true and y_pred are typically: [N, 4], but could be any shape.
"""
diff = K.abs(y_true - y_pred)
less_than_one = K.cast(K.less(diff, 1.0), "float32")
loss = (less_than_one * 0.5 * diff**2) + (1 - less_than_one) * (diff - 0.5)
return loss
示例13: trim_zeros_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def trim_zeros_graph(boxes, name='trim_zeros'):
"""Often boxes are represented with matrices of shape [N, 4] and
are padded with zeros. This removes zero boxes.
boxes: [N, 4] matrix of boxes.
non_zeros: [N] a 1D boolean mask identifying the rows to keep
"""
non_zeros = tf.cast(tf.reduce_sum(tf.abs(boxes), axis=1), tf.bool)
boxes = tf.boolean_mask(boxes, non_zeros, name=name)
return boxes, non_zeros
示例14: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def call(self, x, mask=None):
inp1, inp2 = x[0],x[1]
return K.abs(inp1-inp2)
示例15: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import abs [as 别名]
def call(self, x, mask=None):
return K.abs(x[0]- x[1])