本文整理汇总了Python中keras.backend.max方法的典型用法代码示例。如果您正苦于以下问题:Python backend.max方法的具体用法?Python backend.max怎么用?Python backend.max使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.max方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: yolo_filter_boxes
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def yolo_filter_boxes(box_confidence, boxes, box_class_probs, threshold = .6):
# Compute box scores
box_scores = box_confidence * box_class_probs
# Find the box_classes thanks to the max box_scores, keep track of the corresponding score
box_classes = K.argmax(box_scores, axis=-1)
box_class_scores = K.max(box_scores, axis=-1, keepdims=False)
# Create a filtering mask based on "box_class_scores" by using "threshold". The mask should have the
# same dimension as box_class_scores, and be True for the boxes you want to keep (with probability >= threshold)
filtering_mask = box_class_scores >= threshold
# Apply the mask to scores, boxes and classes
scores = tf.boolean_mask(box_class_scores, filtering_mask)
boxes = tf.boolean_mask(boxes, filtering_mask)
classes = tf.boolean_mask(box_classes, filtering_mask)
return scores, boxes, classes
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def call(self, x, mask=None):
# computes a probability distribution over the timesteps
# uses 'max trick' for numerical stability
# reshape is done to avoid issue with Tensorflow
# and 1-dimensional weights
logits = K.dot(x, self.W)
x_shape = K.shape(x)
logits = K.reshape(logits, (x_shape[0], x_shape[1]))
ai = K.exp(logits - K.max(logits, axis=-1, keepdims=True))
# masked timesteps have zero weight
if mask is not None:
mask = K.cast(mask, K.floatx())
ai = ai * mask
att_weights = ai / (K.sum(ai, axis=1, keepdims=True) + K.epsilon())
weighted_input = x * K.expand_dims(att_weights)
result = K.sum(weighted_input, axis=1)
if self.return_attention:
return [result, att_weights]
return result
示例3: time_distributed_nonzero_max_pooling
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def time_distributed_nonzero_max_pooling(x):
"""
Computes maximum along the first (time) dimension.
It ignores the mask m.
In:
x - input; a 3D tensor
mask_value - value to mask out, if None then no masking;
by default 0.0,
"""
import theano.tensor as T
mask_value=0.0
x = T.switch(T.eq(x, mask_value), -numpy.inf, x)
masked_max_x = x.max(axis=1)
# replace infinities with mask_value
masked_max_x = T.switch(T.eq(masked_max_x, -numpy.inf), 0, masked_max_x)
return masked_max_x
示例4: time_distributed_masked_max
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def time_distributed_masked_max(x, m):
"""
Computes max along the first (time) dimension.
In:
x - input; a 3D tensor
m - mask
m_value - value for masking
"""
# place infinities where mask is off
m_value = 0.0
tmp = K.switch(K.equal(m, 0.0), -numpy.inf, 0.0)
x_with_inf = x + K.expand_dims(tmp)
x_max = K.max(x_with_inf, axis=1)
r = K.switch(K.equal(x_max, -numpy.inf), m_value, x_max)
return r
## classes ##
# Transforms existing layers to masked layers
示例5: gen_adv_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def gen_adv_loss(logits, y, loss='logloss', mean=False):
"""
Generate the loss function.
"""
if loss == 'training':
# use the model's output instead of the true labels to avoid
# label leaking at training time
y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
y = y / K.sum(y, 1, keepdims=True)
out = K.categorical_crossentropy(y, logits, from_logits=True)
elif loss == 'logloss':
out = K.categorical_crossentropy(y, logits, from_logits=True)
else:
raise ValueError("Unknown loss: {}".format(loss))
if mean:
out = K.mean(out)
# else:
# out = K.sum(out)
return out
示例6: gen_adv_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def gen_adv_loss(logits, y, loss='logloss', mean=False):
"""
Generate the loss function.
"""
if loss == 'training':
# use the model's output instead of the true labels to avoid
# label leaking at training time
y = K.cast(K.equal(logits, K.max(logits, 1, keepdims=True)), "float32")
y = y / K.sum(y, 1, keepdims=True)
out = K.categorical_crossentropy(logits, y, from_logits=True)
elif loss == 'logloss':
# out = K.categorical_crossentropy(logits, y, from_logits=True)
out = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y)
out = tf.reduce_mean(out)
else:
raise ValueError("Unknown loss: {}".format(loss))
if mean:
out = tf.mean(out)
# else:
# out = K.sum(out)
return out
示例7: calculate_gradient_weighted_CAM
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def calculate_gradient_weighted_CAM(gradient_function, image):
output, evaluated_gradients = gradient_function([image, False])
output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :]
weights = np.mean(evaluated_gradients, axis=(0, 1))
CAM = np.ones(output.shape[0: 2], dtype=np.float32)
for weight_arg, weight in enumerate(weights):
CAM = CAM + (weight * output[:, :, weight_arg])
CAM = cv2.resize(CAM, (64, 64))
CAM = np.maximum(CAM, 0)
heatmap = CAM / np.max(CAM)
# Return to BGR [0..255] from the preprocessed image
image = image[0, :]
image = image - np.min(image)
image = np.minimum(image, 255)
CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
CAM = np.float32(CAM) + np.float32(image)
CAM = 255 * CAM / np.max(CAM)
return np.uint8(CAM), heatmap
示例8: yolo_eval
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def yolo_eval(yolo_outputs, image_shape=(720., 1280.), max_boxes=10, score_threshold=.6, iou_threshold=.5):
# Retrieve outputs of the YOLO model (≈1 line)
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
# Convert boxes to be ready for filtering functions
boxes = yolo_boxes_to_corners(box_xy, box_wh)
# Use one of the functions you've implemented to perform Score-filtering with a threshold of score_threshold (≈1 line)
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, score_threshold)
# Scale boxes back to original image shape.
boxes = scale_boxes(boxes, image_shape) # boxes: [y1, x1, y2, x2]
# Use one of the functions you've implemented to perform Non-max suppression with a threshold of iou_threshold (≈1 line)
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)
### END CODE HERE ###
return scores, boxes, classes
示例9: calculate_gradient_weighted_CAM
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def calculate_gradient_weighted_CAM(gradient_function, image):
output, evaluated_gradients = gradient_function([image, False])
output, evaluated_gradients = output[0, :], evaluated_gradients[0, :, :, :]
weights = np.mean(evaluated_gradients, axis = (0, 1))
CAM = np.ones(output.shape[0 : 2], dtype=np.float32)
for weight_arg, weight in enumerate(weights):
CAM = CAM + (weight * output[:, :, weight_arg])
CAM = cv2.resize(CAM, (64, 64))
CAM = np.maximum(CAM, 0)
heatmap = CAM / np.max(CAM)
#Return to BGR [0..255] from the preprocessed image
image = image[0, :]
image = image - np.min(image)
image = np.minimum(image, 255)
CAM = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
CAM = np.float32(CAM) + np.float32(image)
CAM = 255 * CAM / np.max(CAM)
return np.uint8(CAM), heatmap
示例10: lq_loss_wrap
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def lq_loss_wrap(_q):
def lq_loss_core(y_true, y_pred):
"""
This loss function is proposed in:
Zhilu Zhang and Mert R. Sabuncu, "Generalized Cross Entropy Loss for Training Deep Neural Networks with
Noisy Labels", 2018
https://arxiv.org/pdf/1805.07836.pdf
:param y_true:
:param y_pred:
:return:
"""
# hyper param
print(_q)
_tmp = y_pred * y_true
_loss = K.max(_tmp, axis=-1)
# compute the Lq loss between the one-hot encoded label and the prediction
_loss = (1 - (_loss + 10 ** (-8)) ** _q) / _q
return _loss
return lq_loss_core
示例11: _softmax
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def _softmax(x, axis=-1, alpha=1):
"""
building on keras implementation, allow alpha parameter
Softmax activation function.
# Arguments
x : Tensor.
axis: Integer, axis along which the softmax normalization is applied.
alpha: a value to multiply all x
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
x = alpha * x
ndim = K.ndim(x)
if ndim == 2:
return K.softmax(x)
elif ndim > 2:
e = K.exp(x - K.max(x, axis=axis, keepdims=True))
s = K.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D')
示例12: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def call(self, inputs, mask=None, **kwargs):
if isinstance(inputs, list):
query, key, value = inputs
else:
query = key = value = inputs
if isinstance(mask, list):
mask = mask[1]
feature_dim = K.shape(query)[-1]
e = K.batch_dot(query, key, axes=2) / K.sqrt(K.cast(feature_dim, dtype=K.floatx()))
e = K.exp(e - K.max(e, axis=-1, keepdims=True))
if self.history_only:
query_len, key_len = K.shape(query)[1], K.shape(key)[1]
indices = K.tile(K.expand_dims(K.arange(key_len), axis=0), [query_len, 1])
upper = K.expand_dims(K.arange(key_len), axis=-1)
e *= K.expand_dims(K.cast(indices <= upper, K.floatx()), axis=0)
if mask is not None:
e *= K.cast(K.expand_dims(mask, axis=-2), K.floatx())
a = e / (K.sum(e, axis=-1, keepdims=True) + K.epsilon())
v = K.batch_dot(a, value)
if self.return_attention:
return [v, a]
return v
示例13: softmax
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def softmax(x, axis=1):
"""Softmax activation function.
# Arguments
x : Tensor.
axis: Integer, axis along which the softmax normalization is applied.
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
ndim = K.ndim(x)
if ndim == 2:
return K.softmax(x)
elif ndim > 2:
e = K.exp(x - K.max(x, axis=axis, keepdims=True))
s = K.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D')
示例14: get_batch
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def get_batch(self, model, batch_size, gamma=0.9):
if self.fast:
return self.get_batch_fast(model, batch_size, gamma)
if len(self.memory) < batch_size:
batch_size = len(self.memory)
nb_actions = model.get_output_shape_at(0)[-1]
samples = np.array(sample(self.memory, batch_size))
input_dim = np.prod(self.input_shape)
S = samples[:, 0 : input_dim]
a = samples[:, input_dim]
r = samples[:, input_dim + 1]
S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2]
game_over = samples[:, 2 * input_dim + 2]
r = r.repeat(nb_actions).reshape((batch_size, nb_actions))
game_over = game_over.repeat(nb_actions).reshape((batch_size, nb_actions))
S = S.reshape((batch_size, ) + self.input_shape)
S_prime = S_prime.reshape((batch_size, ) + self.input_shape)
X = np.concatenate([S, S_prime], axis=0)
Y = model.predict(X)
Qsa = np.max(Y[batch_size:], axis=1).repeat(nb_actions).reshape((batch_size, nb_actions))
delta = np.zeros((batch_size, nb_actions))
a = np.cast['int'](a)
delta[np.arange(batch_size), a] = 1
targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa)
return S, targets
示例15: set_batch_function
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import max [as 别名]
def set_batch_function(self, model, input_shape, batch_size, nb_actions, gamma):
input_dim = np.prod(input_shape)
samples = K.placeholder(shape=(batch_size, input_dim * 2 + 3))
S = samples[:, 0 : input_dim]
a = samples[:, input_dim]
r = samples[:, input_dim + 1]
S_prime = samples[:, input_dim + 2 : 2 * input_dim + 2]
game_over = samples[:, 2 * input_dim + 2 : 2 * input_dim + 3]
r = K.reshape(r, (batch_size, 1))
r = K.repeat(r, nb_actions)
r = K.reshape(r, (batch_size, nb_actions))
game_over = K.repeat(game_over, nb_actions)
game_over = K.reshape(game_over, (batch_size, nb_actions))
S = K.reshape(S, (batch_size, ) + input_shape)
S_prime = K.reshape(S_prime, (batch_size, ) + input_shape)
X = K.concatenate([S, S_prime], axis=0)
Y = model(X)
Qsa = K.max(Y[batch_size:], axis=1)
Qsa = K.reshape(Qsa, (batch_size, 1))
Qsa = K.repeat(Qsa, nb_actions)
Qsa = K.reshape(Qsa, (batch_size, nb_actions))
delta = K.reshape(self.one_hot(a, nb_actions), (batch_size, nb_actions))
targets = (1 - delta) * Y[:batch_size] + delta * (r + gamma * (1 - game_over) * Qsa)
self.batch_function = K.function(inputs=[samples], outputs=[S, targets])