本文整理汇总了Python中keras.backend.gather方法的典型用法代码示例。如果您正苦于以下问题:Python backend.gather方法的具体用法?Python backend.gather怎么用?Python backend.gather使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.gather方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _target_class_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def _target_class_loss(
self,
target_class,
box_scores,
box_class_probs_logits):
""" Evaluate target_class_loss w.r.t. the input.
"""
box_scores = K.squeeze(box_scores, axis=0)
box_class_probs_logits = K.squeeze(box_class_probs_logits, axis=0)
import tensorflow as tf
boi_idx = tf.where(box_scores[:, target_class] > self._score)
loss_box_class_conf = tf.reduce_mean(
tf.gather(box_class_probs_logits[:, target_class], boi_idx))
# Avoid the propagation of nan
return tf.cond(
tf.is_nan(loss_box_class_conf),
lambda: tf.constant(0.),
lambda: loss_box_class_conf)
示例2: _process_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def _process_input(self, x):
"""Apply logistic and softmax activations to input tensor
"""
logistic_activate = lambda x: 1.0/(1.0 + K.exp(-x))
(batch, w, h, channels) = x.get_shape()
x_temp = K.permute_dimensions(x, (3, 0, 1, 2))
x_t = []
for i in range(self.num):
k = self._entry_index(i, 0)
x_t.extend([
logistic_activate(K.gather(x_temp, (k, k + 1))), # 0
K.gather(x_temp, (k + 2, k + 3))])
if self.background:
x_t.append(K.gather(x_temp, (k + 4,)))
else:
x_t.append(logistic_activate(K.gather(x_temp, (k + 4,))))
x_t.append(
softmax(
K.gather(x_temp, tuple(range(k + 5, k + self.coords + self.classes + 1))),
axis=0))
x_t = K.concatenate(x_t, axis=0)
return K.permute_dimensions(x_t, (1, 2, 3, 0))
示例3: test_gather
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def test_gather(self):
shape = (10, 2, 3)
ref = np.arange(np.prod(shape)).reshape(shape)
inds = [1, 3, 7, 9]
z_list = [k.eval(k.gather(k.variable(ref), k.variable(inds, dtype='int32')))
for k in BACKENDS]
assert_list_pairwise(z_list)
assert_list_keras_shape(z_list)
# test theano shape inference when
# input shape has None entries
if K.backend() == 'theano':
x = K.placeholder(shape=(None, 3, 4))
indices = K.placeholder(shape=(5, 6), dtype='int32')
y = K.gather(x, indices)
assert y._keras_shape == (5, 6, 3, 4)
示例4: loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def loss(self, y_true, y_pred):
from keras import backend as K
y_true = K.flatten(y_true)
output_indices = y_true // 10
updated_y_true = y_true - (10 * output_indices)
# We index into y_pred using flattened indices since Keras backend
# supports gather but has no equivalent of tf.gather_nd:
ordinals = K.arange(K.shape(y_true)[0])
flattened_indices = (
ordinals * y_pred.shape[1] + K.cast(output_indices, "int32"))
updated_y_pred = K.gather(K.flatten(y_pred), flattened_indices)
# Alternative implementation using tensorflow, which could be used if
# we drop support for other backends:
# import tensorflow as tf
# indexer = K.stack([
# ordinals,
# K.cast(output_indices, "int32")
# ], axis=-1)
#updated_y_pred = tf.gather_nd(y_pred, indexer)
return MSEWithInequalities().loss(updated_y_true, updated_y_pred)
示例5: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def call(self, x, mask=None):
sims = []
for n, sim in zip(self.n, self.similarities):
for _ in range(n):
batch_size = K.shape(x)[0]
idx = K.random_uniform((batch_size,), low=0, high=batch_size,
dtype='int32')
x_shuffled = K.gather(x, idx)
pair_sim = sim(x, x_shuffled)
for _ in range(K.ndim(x) - 1):
pair_sim = K.expand_dims(pair_sim, dim=1)
sims.append(pair_sim)
return K.concatenate(sims, axis=-1)
示例6: online_bootstrapping
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def online_bootstrapping(y_true, y_pred, pixels=512, threshold=0.5):
""" Implements nline Bootstrapping crossentropy loss, to train only on hard pixels,
see https://arxiv.org/abs/1605.06885 Bridging Category-level and Instance-level Semantic Image Segmentation
The implementation is a bit different as we use binary crossentropy instead of softmax
SUPPORTS ONLY MINIBATCH WITH 1 ELEMENT!
# Arguments
y_true: A tensor with labels.
y_pred: A tensor with predicted probabilites.
pixels: number of hard pixels to keep
threshold: confidence to use, i.e. if threshold is 0.7, y_true=1, prediction=0.65 then we consider that pixel as hard
# Returns
Mean loss value
"""
y_true = K.flatten(y_true)
y_pred = K.flatten(y_pred)
difference = K.abs(y_true - y_pred)
values, indices = K.tf.nn.top_k(difference, sorted=True, k=pixels)
min_difference = (1 - threshold)
y_true = K.tf.gather(K.gather(y_true, indices), K.tf.where(values > min_difference))
y_pred = K.tf.gather(K.gather(y_pred, indices), K.tf.where(values > min_difference))
return K.mean(K.binary_crossentropy(y_true, y_pred))
示例7: dice_coef_border
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def dice_coef_border(y_true, y_pred):
border = get_border_mask((21, 21), y_true)
border = K.flatten(border)
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))
return dice_coef(y_true_f, y_pred_f)
示例8: bce_border
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def bce_border(y_true, y_pred):
border = get_border_mask((21, 21), y_true)
border = K.flatten(border)
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
y_true_f = K.tf.gather(y_true_f, K.tf.where(border > 0.5))
y_pred_f = K.tf.gather(y_pred_f, K.tf.where(border > 0.5))
return binary_crossentropy(y_true_f, y_pred_f)
示例9: yolo_eval
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def yolo_eval(yolo_outputs,
image_shape,
max_boxes=10,
score_threshold=.6,
iou_threshold=.5):
"""Evaluate YOLO model on given input batch and return filtered boxes."""
box_xy, box_wh, box_confidence, box_class_probs = yolo_outputs
boxes = yolo_boxes_to_corners(box_xy, box_wh)
boxes, scores, classes = yolo_filter_boxes(
boxes, box_confidence, box_class_probs, threshold=score_threshold)
# Scale boxes back to original image shape.
height = image_shape[0]
width = image_shape[1]
image_dims = K.stack([height, width, height, width])
image_dims = K.reshape(image_dims, [1, 4])
boxes = boxes * image_dims
# TODO: Something must be done about this ugly hack!
max_boxes_tensor = K.variable(max_boxes, dtype='int32')
K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
nms_index = tf.image.non_max_suppression(
boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
boxes = K.gather(boxes, nms_index)
scores = K.gather(scores, nms_index)
classes = K.gather(classes, nms_index)
return boxes, scores, classes
示例10: yolo_non_max_suppression
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def yolo_non_max_suppression(scores, boxes, classes, max_boxes = 10, iou_threshold = 0.5):
max_boxes_tensor = K.variable(max_boxes, dtype='int32') # tensor to be used in tf.image.non_max_suppression()
K.get_session().run(tf.variables_initializer([max_boxes_tensor])) # initialize variable max_boxes_tensor
# Use tf.image.non_max_suppression() to get the list of indices corresponding to boxes you keep
nms_indices = tf.image.non_max_suppression(boxes, scores, max_boxes, iou_threshold)
# Use K.gather() to select only nms_indices from scores, boxes and classes
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
return scores, boxes, classes
示例11: yolo_eval
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def yolo_eval(yolo_outputs,
image_shape,
max_boxes=10,
score_threshold=.6,
iou_threshold=.5):
"""Evaluate YOLO model on given input batch and return filtered boxes."""
box_confidence, box_xy, box_wh, box_class_probs = yolo_outputs
boxes = yolo_boxes_to_corners(box_xy, box_wh)
boxes, scores, classes = yolo_filter_boxes(
box_confidence, boxes, box_class_probs, threshold=score_threshold)
# Scale boxes back to original image shape.
height = image_shape[0]
width = image_shape[1]
image_dims = K.stack([height, width, height, width])
image_dims = K.reshape(image_dims, [1, 4])
boxes = boxes * image_dims
# TODO: Something must be done about this ugly hack!
max_boxes_tensor = K.variable(max_boxes, dtype='int32')
K.get_session().run(tf.variables_initializer([max_boxes_tensor]))
nms_index = tf.image.non_max_suppression(
boxes, scores, max_boxes_tensor, iou_threshold=iou_threshold)
boxes = K.gather(boxes, nms_index)
scores = K.gather(scores, nms_index)
classes = K.gather(classes, nms_index)
return boxes, scores, classes
示例12: sparse_gather
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def sparse_gather(y_pred, target_indices, task_name):
clf_h = Lambda(lambda x: K.reshape(x, (-1, K.int_shape(x)[-1])), name=task_name + '_flatten')(y_pred)
return Lambda(lambda x: K.gather(x[0], K.cast(x[1], 'int32')), name=task_name + '_gather')([clf_h, target_indices])
示例13: path_energy0
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def path_energy0(y, x, U, mask=None):
'''Path energy without boundary potential handling.'''
n_classes = K.shape(x)[2]
y_one_hot = K.one_hot(y, n_classes)
# Tag path energy
energy = K.sum(x * y_one_hot, 2)
energy = K.sum(energy, 1)
# Transition energy
y_t = y[:, :-1]
y_tp1 = y[:, 1:]
U_flat = K.reshape(U, [-1])
# Convert 2-dim indices (y_t, y_tp1) of U to 1-dim indices of U_flat:
flat_indices = y_t * n_classes + y_tp1
U_y_t_tp1 = K.gather(U_flat, flat_indices)
if mask is not None:
mask = K.cast(mask, K.floatx())
y_t_mask = mask[:, :-1]
y_tp1_mask = mask[:, 1:]
U_y_t_tp1 *= y_t_mask * y_tp1_mask
energy += K.sum(U_y_t_tp1, axis=1)
return energy
示例14: batch_gather
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def batch_gather(reference, indices):
ref_shape = K.shape(reference)
batch_size = ref_shape[0]
n_classes = ref_shape[1]
flat_indices = K.arange(0, batch_size) * n_classes + K.flatten(indices)
return K.gather(K.flatten(reference), flat_indices)
示例15: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import gather [as 别名]
def call(self, inputs):
if K.dtype(inputs) != 'int32':
inputs = K.cast(inputs, 'int32')
def _l2normalize(v, eps=1e-12):
return v / (K.sum(v ** 2) ** 0.5 + eps)
def power_iteration(W, u):
#Accroding the paper, we only need to do power iteration one time.
_u = u
_v = _l2normalize(K.dot(_u, K.transpose(W)))
_u = _l2normalize(K.dot(_v, W))
return _u, _v
W_shape = self.embeddings.shape.as_list()
#Flatten the Tensor
W_reshaped = K.reshape(self.embeddings, [-1, W_shape[-1]])
_u, _v = power_iteration(W_reshaped, self.u)
#Calculate Sigma
sigma=K.dot(_v, W_reshaped)
sigma=K.dot(sigma, K.transpose(_u))
#normalize it
W_bar = W_reshaped / sigma
#reshape weight tensor
if training in {0, False}:
W_bar = K.reshape(W_bar, W_shape)
else:
with tf.control_dependencies([self.u.assign(_u)]):
W_bar = K.reshape(W_bar, W_shape)
self.embeddings = W_bar
out = K.gather(self.embeddings, inputs)
return out