本文整理汇总了Python中keras.backend.map_fn方法的典型用法代码示例。如果您正苦于以下问题:Python backend.map_fn方法的具体用法?Python backend.map_fn怎么用?Python backend.map_fn使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.map_fn方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def call(self, inputs, **kwargs):
# (batch_size, 1, input_num_capsule, input_dim_capsule)
expand_inputs = K.expand_dims(inputs, axis=1)
# (batch_size, num_capsule, input_num_capsule, input_dim_capsule)
expand_inputs = K.tile(expand_inputs, (1, self.num_capsule, 1, 1))
# (batch_size, num_capsule, input_num_capsule, dim_capsule)
u_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, axes=[2, 3]), expand_inputs)
if self.num_routing <= 0:
self.num_routing = 3
# (batch_size, num_capsule, input_num_capsule)
b = K.zeros((K.shape(u_hat)[0], self.num_capsule, self.input_num_capsule))
for i in xrange(self.num_routing):
# (batch_size, num_capsule, input_num_capsule)
c = softmax(b, axis=1)
# (batch_size, num_capsule, dim_capsule)
s = K.batch_dot(c, u_hat, axes=[2, 2])
squashed_s = squash(s)
if i < self.num_routing - 1:
# (batch_size, num_capsule, input_num_capsule)
b += K.batch_dot(squashed_s, u_hat, axes=[2, 3])
return squashed_s
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def call(self, inputs, training=None):
inputs_expand = K.expand_dims(inputs, 1)
inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
if(self.channels!=0):
W2 = K.repeat_elements(self.W,int(self.input_num_capsule/self.channels),1)
else:
W2 = self.W
inputs_hat = K.map_fn(lambda x: K.batch_dot(x, W2, [2, 3]) , elems=inputs_tiled)
b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])
assert self.routings > 0, 'The routings should be > 0.'
for i in range(self.routings):
c = tf.nn.softmax(b, dim=1)
outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])+ self.B)
if i < self.routings - 1:
b += K.batch_dot(outputs, inputs_hat, [2, 3])
return outputs
示例3: augmented_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def augmented_loss(self, y_true, y_pred):
_y_pred = Activation("softmax")(y_pred)
loss = K.categorical_crossentropy(_y_pred, y_true)
# y is (batch x seq x vocab)
y_indexes = K.argmax(y_true, axis=2) # turn one hot to index. (batch x seq)
y_vectors = self.embedding(y_indexes) # lookup the vector (batch x seq x vector_length)
#v_length = self.setting.vector_length
#y_vectors = K.reshape(y_vectors, (-1, v_length))
#y_t = K.map_fn(lambda v: K.dot(self.embedding.embeddings, K.reshape(v, (-1, 1))), y_vectors)
#y_t = K.squeeze(y_t, axis=2) # unknown but necessary operation
#y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size))
# vector x embedding dot products (batch x seq x vocab)
y_t = tf.tensordot(y_vectors, K.transpose(self.embedding.embeddings), 1)
y_t = K.reshape(y_t, (-1, self.sequence_size, self.vocab_size)) # explicitly set shape
y_t = K.softmax(y_t / self.temperature)
_y_pred_t = Activation("softmax")(y_pred / self.temperature)
aug_loss = kullback_leibler_divergence(y_t, _y_pred_t)
loss += (self.gamma * self.temperature) * aug_loss
return loss
示例4: _ctdet_decode
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def _ctdet_decode(hm, reg, wh, k=100, output_stride=4):
hm = K.sigmoid(hm)
hm = _nms(hm)
hm_shape = K.shape(hm)
reg_shape = K.shape(reg)
wh_shape = K.shape(wh)
batch, width, cat = hm_shape[0], hm_shape[2], hm_shape[3]
hm_flat = K.reshape(hm, (batch, -1))
reg_flat = K.reshape(reg, (reg_shape[0], -1, reg_shape[-1]))
wh_flat = K.reshape(wh, (wh_shape[0], -1, wh_shape[-1]))
def _process_sample(args):
_hm, _reg, _wh = args
_scores, _inds = tf.math.top_k(_hm, k=k, sorted=True)
_classes = K.cast(_inds % cat, 'float32')
_inds = K.cast(_inds / cat, 'int32')
_xs = K.cast(_inds % width, 'float32')
_ys = K.cast(K.cast(_inds / width, 'int32'), 'float32')
_wh = K.gather(_wh, _inds)
_reg = K.gather(_reg, _inds)
_xs = _xs + _reg[..., 0]
_ys = _ys + _reg[..., 1]
_x1 = _xs - _wh[..., 0] / 2
_y1 = _ys - _wh[..., 1] / 2
_x2 = _xs + _wh[..., 0] / 2
_y2 = _ys + _wh[..., 1] / 2
# rescale to image coordinates
_x1 = output_stride * _x1
_y1 = output_stride * _y1
_x2 = output_stride * _x2
_y2 = output_stride * _y2
_detection = K.stack([_x1, _y1, _x2, _y2, _scores, _classes], -1)
return _detection
detections = K.map_fn(_process_sample, [hm_flat, reg_flat, wh_flat], dtype=K.floatx())
return detections
示例5: accuracy
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def accuracy(y_true, y_pred):
def calculate_accuracy(true_and_pred):
y_true, y_pred_start, y_pred_end = true_and_pred
start_probability = y_pred_start[K.cast(y_true[0], dtype='int32')]
end_probability = y_pred_end[K.cast(y_true[1], dtype='int32')]
return (start_probability + end_probability) / 2.0
y_true = K.squeeze(y_true, axis=1)
y_pred_start = y_pred[:, 0, :]
y_pred_end = y_pred[:, 1, :]
accuracy = K.map_fn(calculate_accuracy, (y_true, y_pred_start, y_pred_end), dtype='float32')
return K.mean(accuracy, axis=0)
示例6: negative_avg_log_error
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def negative_avg_log_error(y_true, y_pred):
def sum_of_log_probabilities(true_and_pred):
y_true, y_pred_start, y_pred_end = true_and_pred
start_probability = y_pred_start[K.cast(y_true[0], dtype='int32')]
end_probability = y_pred_end[K.cast(y_true[1], dtype='int32')]
return K.log(start_probability) + K.log(end_probability)
y_true = K.squeeze(y_true, axis=1)
y_pred_start = y_pred[:, 0, :]
y_pred_end = y_pred[:, 1, :]
batch_probability_sum = K.map_fn(sum_of_log_probabilities, (y_true, y_pred_start, y_pred_end), dtype='float32')
return -K.mean(batch_probability_sum, axis=0)
示例7: mc_dropout_preds
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def mc_dropout_preds(model, x: tf.Tensor, n_mc: int) -> tf.Tensor:
"""
Take a model, and a tensor of size batch_size x n_classes and return the
result of doing n_mc stochastic forward passes as a n_mc x batch_size x
n_classes tensor. This assumes the model has some VI layers like dropout or
whatever, and that the model has been loaded with
keras.backend.set_learning_phase(True). Also note that this takes and
returns keras tensors, not arrays.
"""
# tile x n_mc times and predict in a batch
xs = K.stack(list(itr.repeat(x, n_mc)))
mc_preds = K.map_fn(model, xs) # [n_mc x batch_size x n_classes]
return mc_preds
示例8: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def call(self, inputs, training=None):
# inputs.shape=[None, input_num_capsule, input_dim_capsule]
# inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
inputs_expand = K.expand_dims(inputs, 1)
# Replicate num_capsule dimension to prepare being multiplied by W
# inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
# Compute `inputs * W` by scanning inputs_tiled on dimension 0.
# x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
# W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
# Regard the first two dimensions as `batch` dimension,
# then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
# inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
inputs_hat = K.map_fn(lambda x: batch_dot(
x, self.W, [2, 3]), elems=inputs_tiled)
# Begin: Routing algorithm ---------------------------------------------------------------------#
# The prior for coupling coefficient, initialized as zeros.
# b.shape = [None, self.num_capsule, self.input_num_capsule].
b = tf.zeros(shape=[K.shape(inputs_hat)[0],
self.num_capsule, self.input_num_capsule])
output_list = []
assert self.routings > 0, 'The routings should be > 0.'
for i in range(self.routings):
# c.shape=[batch_size, num_capsule, input_num_capsule]
c = tf.nn.softmax(b, dim=1)
# c.shape = [batch_size, num_capsule, input_num_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
# outputs.shape=[None, num_capsule, dim_capsule]
# [None, 10, 16]
outputs = squash(batch_dot(c, inputs_hat, [2, 2]))
# output_list.append(K.expand_dims(outputs,axis=-1))
if i < self.routings - 1:
# outputs.shape = [None, num_capsule, dim_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
# b.shape=[batch_size, num_capsule, input_num_capsule]
b += batch_dot(outputs, inputs_hat, [2, 3])
# End: Routing algorithm -----------------------------------------------------------------------#
# return K.concatenate(output_list,-1)
return outputs
示例9: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def call(self, inputs, training=None):
# inputs.shape=[None, input_num_capsule, input_dim_capsule]
# inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]
inputs_expand = K.expand_dims(inputs, 1)
# Replicate num_capsule dimension to prepare being multiplied by W
# inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]
inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])
# Compute `inputs * W` by scanning inputs_tiled on dimension 0.
# x.shape=[num_capsule, input_num_capsule, input_dim_capsule]
# W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]
# Regard the first two dimensions as `batch` dimension,
# then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].
# inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]
inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)
# Begin: Routing algorithm ---------------------------------------------------------------------#
# The prior for coupling coefficient, initialized as zeros.
# b.shape = [None, self.num_capsule, self.input_num_capsule].
b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])
assert self.routings > 0, 'The routings should be > 0.'
for i in range(self.routings):
# c.shape=[batch_size, num_capsule, input_num_capsule]
c = tf.nn.softmax(b, dim=1)
# c.shape = [batch_size, num_capsule, input_num_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].
# outputs.shape=[None, num_capsule, dim_capsule]
outputs = squash(K.batch_dot(c, inputs_hat, [2, 2])) # [None, 10, 16]
if i < self.routings - 1:
# outputs.shape = [None, num_capsule, dim_capsule]
# inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]
# The first two dimensions as `batch` dimension,
# then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].
# b.shape=[batch_size, num_capsule, input_num_capsule]
b += K.batch_dot(outputs, inputs_hat, [2, 3])
# End: Routing algorithm -----------------------------------------------------------------------#
return outputs
示例10: segmentation_gaussian_measurement
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def segmentation_gaussian_measurement(
y_true,
y_pred,
gaussian_sigma=3,
measurement=keras.losses.binary_crossentropy):
""" Apply metric or loss measurement incorporating a 2D gaussian.
Only works with batch size 1.
Loop and call this function repeatedly over each sample
to use a larger batch size.
# Arguments
y_true: is assumed to be [label, x_img_coord, y_image_coord]
y_pred: is expected to be a 2D array of labels
with shape [1, img_height, img_width, 1].
"""
with K.name_scope(name='grasp_segmentation_gaussian_loss') as scope:
if keras.backend.ndim(y_true) == 4:
# sometimes the dimensions are expanded from 2 to 4
# to meet Keras' expectations.
# In that case reduce them back to 2
y_true = K.squeeze(y_true, axis=-1)
y_true = K.squeeze(y_true, axis=-1)
print('y_pred: ', y_pred)
print('y_true: ', y_true)
# y_true should have shape [batch_size, 3] here,
# label, y_height_coordinate, x_width_coordinate become shape:
# [batch_size, 1]
label = K.expand_dims(y_true[:, 0])
print('label: ', label)
y_height_coordinate = K.expand_dims(y_true[:, 1])
x_width_coordinate = K.expand_dims(y_true[:, 2])
# label = K.reshape(label, [1, 1])
print('label: ', label)
image_shape = tf.Tensor.get_shape(y_pred)
y_true_img = tile_vector_as_image_channels(label, image_shape)
y_true_img = K.cast(y_true_img, 'float32')
loss_img = measurement(y_true_img, y_pred)
y_pred_shape = K.int_shape(y_pred)
if len(y_pred_shape) == 3:
y_pred_shape = y_pred_shape[:-1]
if len(y_pred_shape) == 4:
y_pred_shape = y_pred_shape[1:3]
def batch_gaussian(one_y_true):
# def batch_gaussian(y_height_coord, x_width_coord):
# weights = gaussian_kernel_2D(size=y_pred_shape, center=(y_height_coord, x_width_coord), sigma=gaussian_sigma)
# weights = gaussian_kernel_2D(size=y_pred_shape, center=(y_height_coordinate, x_width_coordinate), sigma=gaussian_sigma)
return gaussian_kernel_2D(size=y_pred_shape, center=(one_y_true[0], one_y_true[1]), sigma=gaussian_sigma)
weights = K.map_fn(batch_gaussian, y_true)
loss_img = K.flatten(loss_img)
weights = K.flatten(weights)
weighted_loss_img = tf.multiply(loss_img, weights)
loss_sum = K.sum(weighted_loss_img)
loss_sum = K.reshape(loss_sum, [1, 1])
return loss_sum
示例11: cifar10_load_data
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def cifar10_load_data(datadir=None):
'''Loads CIFAR10 dataset.
Returns
Tuple of Numpy arrays: `(x_train, y_train), (x_test, y_test)`.
'''
if datadir is None:
return cifar10.load_data()
dirname = 'cifar-10-batches-py'
# origin = 'http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz'
# path = get_file(dirname, origin=origin, untar=True)
path_ = os.path.join(datadir, dirname)
num_train_samples = 50000
x_train = np.zeros((num_train_samples, 3, 32, 32), dtype='uint8')
y_train = np.zeros((num_train_samples,), dtype='uint8')
for ii in range(1, 6):
fpath = os.path.join(path_, 'data_batch_' + str(ii))
data, labels = cifar10.load_batch(fpath)
x_train[(ii - 1) * 10000: ii * 10000, :, :, :] = data
y_train[(ii - 1) * 10000: ii * 10000] = labels
fpath = os.path.join(path_, 'test_batch')
x_test, y_test = cifar10.load_batch(fpath)
y_train = np.reshape(y_train, (len(y_train), 1))
y_test = np.reshape(y_test, (len(y_test), 1))
if KB.image_data_format() == 'channels_last':
x_train = x_train.transpose(0, 2, 3, 1)
x_test = x_test.transpose(0, 2, 3, 1)
return (x_train, y_train), (x_test, y_test)
# def stand_img(xin):
# '''Use as: model.add(KL.Lambda(stand_img))
# Seems to make the code run very slow. Pre-processing the data is faster.
# '''
# # KB.map_fn(fn, elems, name, dtype) # maybe KB.map_fn also works.
# with tf.device(xin.device):
# img_std = tf.map_fn(tf.image.per_image_standardization, xin)
#
# return img_std
示例12: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import map_fn [as 别名]
def call(self, inputs):
if self.r_num == 1:
outputs = K.dot(K.reshape(inputs, (-1, self.ch_i * self.n_i)),
K.reshape(self.w, (self.ch_i * self.n_i,
self.ch_j * self.n_j)))
outputs = squeeze(K.reshape(outputs, (-1, self.ch_j, self.n_j)))
else:
wr = K.reshape(self.w, (self.ch_i, self.n_i, self.ch_j * self.n_j))
u = tf.transpose(tf.matmul(tf.transpose(inputs, [1, 0, 2]), wr), [1, 0, 2])
u = K.reshape(u, (-1, self.ch_i, self.ch_j, self.n_j))
def rt(ub):
ub = K.reshape(ub, (-1, self.ch_i, self.ch_j, self.n_j))
ub_wo_g = K.stop_gradient(ub)
b = 0.0
for r in range(self.r_num):
if r > 0:
c = K.expand_dims(K.softmax(b * self.b_alphas[r])) * self.ch_j # distribution of weighs of capsules in I across capsules in J
c = K.stop_gradient(c)
else:
c = 1.0
if r == self.r_num - 1:
cub = c * ub
else:
cub = c * ub_wo_g
s = K.sum(cub, axis=-3) # vectors of capsules in J
v = squeeze(s) # squeezed vectors of capsules in J
if r == self.r_num - 1:
break
v = K.stop_gradient(v)
a = tf.einsum('bjk,bijk->bij', v, ub) # a = v dot u
# a = K.matmul(K.reshape(v, (-1, 1, J, 1, n_j)),
# K.reshape(u, (-1, I, J, n_j, 1))).reshape((-1, I, J))
b = b + a # increase those b[i,j] where v[j] dot b[i,j] is larger
return v
u = K.reshape(u, (-1, self.ch_i * self.ch_j * self.n_j))
global useGPU
if useGPU:
outputs = rt(u)
else:
outputs = tf.map_fn(rt, u,
parallel_iterations=100, back_prop=True,
infer_shape=False)
outputs = K.reshape(outputs, (-1, self.ch_j, self.n_j))
return outputs