本文整理汇总了Python中keras.backend.concatenate方法的典型用法代码示例。如果您正苦于以下问题:Python backend.concatenate方法的具体用法?Python backend.concatenate怎么用?Python backend.concatenate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.concatenate方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def step(self, x, states):
h = states[0]
# states[1] necessary?
# equals K.dot(X, self._W1) + self._b2 with X.shape=[bs, T, input_dim]
total_x_prod = states[-1]
# comes from the constants (equals the input sequence)
X = states[-2]
# expand dims to add the vector which is only valid for this time step
# to total_x_prod which is valid for all time steps
hw = K.expand_dims(K.dot(h, self._W2), 1)
additive_atn = total_x_prod + hw
attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
x_weighted = K.sum(attention * X, [1])
x = K.dot(K.concatenate([x, x_weighted], 1), self._W3) + self._b3
h, new_states = self.layer.cell.call(x, states[:-2])
return h, new_states
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def call(self,x,training=None):
deta1 = 0.3
deta2 = 0.3
deta3 = 0.3
seed = np.random.randint(1, 10e6)
rng = RandomStreams(seed=seed)
theta1 = rng.uniform(size=(x.shape[0],1),low=-deta1,high=deta1,dtype='float32')
theta2 = rng.uniform(size=(x.shape[0],1),low=-deta2,high=deta2,dtype='float32')
theta3 = rng.uniform(size=(x.shape[0],1),low=-deta3,high=deta3,dtype='float32')
theta = K.concatenate([theta1,theta2,theta3],axis=-1)
theta = K.tile(theta,x.shape[1])
theta = theta.reshape((x.shape[0], x.shape[1], 3))
theta = theta.reshape((theta.shape[0]*theta.shape[1], theta.shape[2]))
M = _fusion(theta)
output = _transform_rot(M, x)
return K.in_train_phase(output,x,training = training)
开发者ID:microsoft,项目名称:View-Adaptive-Neural-Networks-for-Skeleton-based-Human-Action-Recognition,代码行数:20,代码来源:transform_rnn.py
示例3: yolo_correct_boxes
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
'''Get corrected boxes'''
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
# Scale boxes back to original image shape.
boxes *= K.concatenate([image_shape, image_shape])
return boxes
示例4: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def call(self , x, mask=None):
e1=x[0].T
e2=x[1].T
batch_size = K.shape(x[0])[0]
sim = []
V_out = K.dot(self.V, K.concatenate([e1,e2],axis=0))
for i in range(self.k):
temp = K.batch_dot(K.dot(e1.T,self.W[i,:,:]),e2.T,axes=1)
sim.append(temp)
sim=K.reshape(sim,(self.k,batch_size))
tensor_bi_product = self.activation(V_out+sim)
tensor_bi_product = K.dot(self.U.T, tensor_bi_product).T
return tensor_bi_product
示例5: yolo_body
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def yolo_body(inputs, num_anchors, num_classes):
"""Create YOLO_V2 model CNN body in Keras."""
darknet = Model(inputs, darknet_body()(inputs))
conv20 = compose(
DarknetConv2D_BN_Leaky(1024, (3, 3)),
DarknetConv2D_BN_Leaky(1024, (3, 3)))(darknet.output)
conv13 = darknet.layers[43].output
conv21 = DarknetConv2D_BN_Leaky(64, (1, 1))(conv13)
# TODO: Allow Keras Lambda to use func arguments for output_shape?
conv21_reshaped = Lambda(
space_to_depth_x2,
output_shape=space_to_depth_x2_output_shape,
name='space_to_depth')(conv21)
x = concatenate([conv21_reshaped, conv20])
x = DarknetConv2D_BN_Leaky(1024, (3, 3))(x)
x = DarknetConv2D(num_anchors * (num_classes + 5), (1, 1))(x)
return Model(inputs, x)
示例6: _multi_kmax_context_concat
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def _multi_kmax_context_concat(inputs, top_k, poses):
x, context_input = inputs
idxes, topk_vs = list(), list()
for p in poses:
val, idx = tf.nn.top_k(tf.slice(x, [0,0,0], [-1,-1, p]), k=top_k, sorted=True, name=None)
topk_vs.append(val)
idxes.append(idx)
concat_topk_max = tf.concat(topk_vs, -1, name='concat_val')
concat_topk_idx = tf.concat(idxes, -1, name='concat_idx')
# hack that requires the context to have the same shape as similarity matrices
# https://stackoverflow.com/questions/41897212/how-to-sort-a-multi-dimensional-tensor-using-the-returned-indices-of-tf-nn-top-k
shape = tf.shape(x)
mg = tf.meshgrid(*[tf.range(d) for d in (tf.unstack(shape[:(x.get_shape().ndims - 1)]) + [top_k*len(poses)])], indexing='ij')
val_contexts = tf.gather_nd(context_input, tf.stack(mg[:-1] + [concat_topk_idx], axis=-1))
return tf.concat([concat_topk_max, val_contexts], axis=-1)
# return backend.concatenate([concat_topk_max, val_contexts])
示例7: preprocess
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def preprocess(x):
return K.concatenate([
x[:,:,0:1] / 360.0,
x[:,:,1:3],
x[:,:,3:4] / 360.0,
x[:,:,4:6],
x[:,:,6:18] / 360.0,
x[:,:,18:19] - x[:,:,1:2],
x[:,:,19:22],
x[:,:,28:29] - x[:,:,1:2],
x[:,:,29:30],
x[:, :, 30:31] - x[:, :, 1:2],
x[:, :, 31:32],
x[:, :, 32:33] - x[:, :, 1:2],
x[:, :, 33:34],
x[:, :, 34:35] - x[:, :, 1:2],
x[:, :, 35:41],
], axis=2)
示例8: add_boundary_energy
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def add_boundary_energy(x, b_start=None, b_end=None, mask=None):
'''Given the observations x, it adds the start boundary energy b_start (resp.
end boundary energy b_end on the start (resp. end) elements and multiplies
the mask.'''
if mask is None:
if b_start is not None:
x = K.concatenate([x[:, :1, :] + b_start, x[:, 1:, :]], axis=1)
if b_end is not None:
x = K.concatenate([x[:, :-1, :], x[:, -1:, :] + b_end], axis=1)
else:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask, 2)
x *= mask
if b_start is not None:
mask_r = K.concatenate([K.zeros_like(mask[:, :1]), mask[:, :-1]], axis=1)
start_mask = K.cast(K.greater(mask, mask_r), K.floatx())
x = x + start_mask * b_start
if b_end is not None:
mask_l = K.concatenate([mask[:, 1:], K.zeros_like(mask[:, -1:])], axis=1)
end_mask = K.cast(K.greater(mask, mask_l), K.floatx())
x = x + end_mask * b_end
return x
示例9: _forward
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def _forward(x, reduce_step, initial_states, U, mask=None):
'''Forward recurrence of the linear chain crf.'''
def _forward_step(energy_matrix_t, states):
alpha_tm1 = states[-1]
new_states = reduce_step(K.expand_dims(alpha_tm1, 2) + energy_matrix_t)
return new_states[0], new_states
U_shared = K.expand_dims(K.expand_dims(U, 0), 0)
if mask is not None:
mask = K.cast(mask, K.floatx())
mask_U = K.expand_dims(K.expand_dims(mask[:, :-1] * mask[:, 1:], 2), 3)
U_shared = U_shared * mask_U
inputs = K.expand_dims(x[:, 1:, :], 2) + U_shared
inputs = K.concatenate([inputs, K.zeros_like(inputs[:, -1:, :, :])], axis=1)
last, values, _ = K.rnn(_forward_step, inputs, initial_states)
return last, values
示例10: _pad
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def _pad(self, input):
"""
pads the network output so y_pred and y_true have the same dimensions
:param input: previous layer
:return: layer, last dimensions padded for 4
"""
#pad = K.placeholder( (None,self.config.ANCHORS, 4))
#pad = np.zeros ((self.config.BATCH_SIZE,self.config.ANCHORS, 4))
#return K.concatenate( [input, pad], axis=-1)
padding = np.zeros((3,2))
padding[2,1] = 4
return tf.pad(input, padding ,"CONSTANT")
#loss function to optimize
示例11: _correct_boxes
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def _correct_boxes(
self, box_xy, box_wh, input_shape, image_shape):
"""Get corrected boxes, which are scaled to original shape."""
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape / image_shape))
offset = (input_shape - new_shape) / 2. / input_shape
scale = input_shape / new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1], # y_min
box_mins[..., 1:2], # x_min
box_maxes[..., 0:1], # y_max
box_maxes[..., 1:2] # x_max
])
# Scale boxes back to original image shape.
boxes *= K.concatenate([image_shape, image_shape])
return boxes
示例12: yolo_correct_boxes
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def yolo_correct_boxes(box_xy, box_wh, input_shape, image_shape):
box_yx = box_xy[..., ::-1]
box_hw = box_wh[..., ::-1]
input_shape = K.cast(input_shape, K.dtype(box_yx))
image_shape = K.cast(image_shape, K.dtype(box_yx))
new_shape = K.round(image_shape * K.min(input_shape/image_shape))
offset = (input_shape-new_shape)/2./input_shape
scale = input_shape/new_shape
box_yx = (box_yx - offset) * scale
box_hw *= scale
box_mins = box_yx - (box_hw / 2.)
box_maxes = box_yx + (box_hw / 2.)
boxes = K.concatenate([
box_mins[..., 0:1],
box_mins[..., 1:2],
box_maxes[..., 0:1],
box_maxes[..., 1:2]
])
boxes *= K.concatenate([image_shape, image_shape])
return boxes
示例13: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
if 0 < self.dropout_W < 1:
dropout = self.dropout_W
else:
dropout = 0
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = time_distributed_dense(x, self.W_i, self.b_i, dropout,
input_dim, self.output_dim, timesteps)
x_f = time_distributed_dense(x, self.W_f, self.b_f, dropout,
input_dim, self.output_dim, timesteps)
x_c = time_distributed_dense(x, self.W_c, self.b_c, dropout,
input_dim, self.output_dim, timesteps)
x_o = time_distributed_dense(x, self.W_o, self.b_o, dropout,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return x
示例14: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def preprocess_input(self, x):
if self.consume_less == 'cpu':
input_shape = self.input_spec[0].shape
input_dim = input_shape[2]
timesteps = input_shape[1]
x_t = time_distributed_dense(x, self.W_t, self.b_t, self.dropout_W,
input_dim, self.output_dim, timesteps)
x_h = time_distributed_dense(x, self.W_h, self.b_h, self.dropout_W,
input_dim, self.output_dim, timesteps)
return K.concatenate([x_t, x_h], axis=2)
else:
return x
示例15: _additive_similarity
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import concatenate [as 别名]
def _additive_similarity(self, source, query):
concatenation = K.concatenate([source, query], axis=2)
nonlinearity = K.tanh(K.dot(concatenation, self._weights["w_a"]))
# tile the weight vector (1, 1, dim) for each time step and each element of the batch -> (bs, T, dim)
source_shape = K.shape(source)
vaeff = K.tile(K.expand_dims(self._weights["v_a"], 0), [source_shape[0], source_shape[1], 1])
similarity = K.batch_dot(K.permute_dimensions(vaeff, [0, 2, 1]), nonlinearity, axes=[1, 2])
return similarity