本文整理汇总了Python中tensorflow.keras.backend.tile方法的典型用法代码示例。如果您正苦于以下问题:Python backend.tile方法的具体用法?Python backend.tile怎么用?Python backend.tile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.tile方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: expand_tile
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tile [as 别名]
def expand_tile(units, axis):
"""
Expand and tile tensor along given axis
Args:
units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
axis: axis along which expand and tile. Must be 1 or 2
"""
assert axis in (1, 2)
n_time_steps = K.int_shape(units)[1]
repetitions = [1, 1, 1, 1]
repetitions[axis] = n_time_steps
if axis == 1:
expanded = Reshape(target_shape=((1,) + K.int_shape(units)[1:]))(units)
else:
expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units)
return K.tile(expanded, repetitions)
示例2: relative_logits_1d
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tile [as 别名]
def relative_logits_1d(self, q, rel_k, H, W, transpose_mask):
rel_logits = tf.einsum('bhxyd,md->bhxym', q, rel_k)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads * H, W, 2 * W - 1])
rel_logits = self.rel_to_abs(rel_logits)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H, W, W])
rel_logits = K.expand_dims(rel_logits, axis=3)
rel_logits = K.tile(rel_logits, [1, 1, 1, H, 1, 1])
rel_logits = K.permute_dimensions(rel_logits, transpose_mask)
rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H * W, H * W])
return rel_logits
示例3: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tile [as 别名]
def call(self, inputs, mask=None):
"""
convert to query, key, value vectors, shaped [batch_size*num_head, time_step, embed_dim]
"""
multihead_query = K.concatenate(tf.split(K.dot(inputs, self.w_q),
self.num_heads, axis=2), axis=0)
multihead_key = K.concatenate(tf.split(K.dot(inputs, self.w_k),
self.num_heads, axis=2), axis=0)
multihead_value = K.concatenate(tf.split(K.dot(inputs, self.w_v),
self.num_heads, axis=2), axis=0)
"""scaled dot product"""
scaled = K.int_shape(inputs)[-1] ** -0.5
attend = K.batch_dot(multihead_query, multihead_key, axes=2) * scaled
# apply mask before normalization (softmax)
if mask is not None:
multihead_mask = K.tile(mask, [self.num_heads, 1])
attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 2)
attend *= K.expand_dims(K.cast(multihead_mask, K.floatx()), 1)
# normalization
attend = attend / K.cast(K.sum(attend, axis=-1, keepdims=True) + K.epsilon(), K.floatx())
# apply attention
attend = K.batch_dot(attend, multihead_value, axes=(2, 1))
attend = tf.concat(tf.split(attend, self.num_heads, axis=0), axis=2)
attend = K.dot(attend, self.w_final)
if self.residual:
attend = attend + inputs
if self.normalize:
mean = K.mean(attend, axis=-1, keepdims=True)
std = K.mean(attend, axis=-1, keepdims=True)
attend = self.gamma * (attend - mean) / (std + K.epsilon()) + self.beta
return attend
示例4: repeat_
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tile [as 别名]
def repeat_(x, k):
tile_factor = [1, k] + [1] * (K.ndim(x) - 1)
return K.tile(x[:, None, :], tile_factor)
示例5: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tile [as 别名]
def call(self, inputs, **kwargs):
assert isinstance(inputs, list) and len(inputs) == 3
first, second, features = inputs[0], inputs[1], inputs[2]
if not self.from_logits:
first = K.clip(first, 1e-10, 1.0)
second = K.clip(second, 1e-10, 1.0)
first_, second_ = K.log(first), K.log(second)
else:
first_, second_ = first, second
# embedded_features.shape = (M, T, 1)
if self.use_intermediate_layer:
features = K.dot(features, self.first_kernel)
features = K.bias_add(features, self.first_bias, data_format="channels_last")
features = self.intermediate_activation(features)
embedded_features = K.dot(features, self.features_kernel)
embedded_features = K.bias_add(
embedded_features, self.features_bias, data_format="channels_last")
if self.use_dimension_bias:
tiling_shape = [1] * (K.ndim(first) - 1) + [K.shape(first)[-1]]
embedded_features = K.tile(embedded_features, tiling_shape)
embedded_features = K.bias_add(
embedded_features, self.dimensions_bias, data_format="channels_last")
sigma = K.sigmoid(embedded_features)
result = weighted_sum(first_, second_, sigma,
self.first_threshold, self.second_threshold)
probs = K.softmax(result)
if self.return_logits:
return [probs, result]
return probs
示例6: TemporalDropout
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tile [as 别名]
def TemporalDropout(inputs, dropout=0.0):
"""
Drops with :dropout probability temporal steps of input 3D tensor
"""
# TO DO: adapt for >3D tensors
if dropout == 0.0:
return inputs
inputs_func = lambda x: K.ones_like(inputs[:, :, 0:1])
inputs_mask = Lambda(inputs_func)(inputs)
inputs_mask = Dropout(dropout)(inputs_mask)
tiling_shape = [1, 1, K.shape(inputs)[2]] + [1] * (K.ndim(inputs) - 3)
inputs_mask = Lambda(K.tile, arguments={"n": tiling_shape},
output_shape=inputs._keras_shape[1:])(inputs_mask)
answer = Multiply()([inputs, inputs_mask])
return answer
示例7: yolo3_head
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tile [as 别名]
def yolo3_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.sigmoid(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
示例8: yolo2_head
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import tile [as 别名]
def yolo2_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
#box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(grid_shape[..., ::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.softmax(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs