本文整理汇总了Python中keras.backend.bias_add方法的典型用法代码示例。如果您正苦于以下问题:Python backend.bias_add方法的具体用法?Python backend.bias_add怎么用?Python backend.bias_add使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.bias_add方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs):
if self.data_format is None:
data_format = image_data_format()
if self.data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format ' + str(data_format))
x = _preprocess_conv2d_input(inputs, self.data_format)
padding = _preprocess_padding(self.padding)
strides = (1,) + self.strides + (1,)
outputs = tf.nn.depthwise_conv2d(inputs, self.depthwise_kernel,
strides=strides,
padding=padding,
rate=self.dilation_rate)
if self.bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例2: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs, training=None):
outputs = K.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例3: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs, training=None):
outputs = K.depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.bias:
outputs = K.bias_add(
outputs, self.bias, data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例4: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs, training=None):
outputs = depthwise_conv2d(
inputs,
self.depthwise_kernel,
strides=self.strides,
padding=self.padding,
dilation_rate=self.dilation_rate,
data_format=self.data_format)
if self.bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例5: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def preprocess_input(self, inputs, training=None):
if self.window_size > 1:
inputs = K.temporal_padding(inputs, (self.window_size - 1, 0))
inputs = K.expand_dims(inputs, 2) # add a dummy dimension
output = K.conv2d(inputs, self.kernel, strides=self.strides,
padding='valid',
data_format='channels_last')
output = K.squeeze(output, 2) # remove the dummy dimension
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.dropout is not None and 0. < self.dropout < 1.:
z = output[:, :, :self.units]
f = output[:, :, self.units:2 * self.units]
o = output[:, :, 2 * self.units:]
f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
return K.concatenate([z, f, o], -1)
else:
return output
示例6: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, x, mask=None):
# MLP
ut = K.dot(x, self.kernel)
if self.use_bias:
ut = K.bias_add(ut, self.bias)
if self.activation:
ut = K.tanh(ut)
if self.context_kernel:
ut = K.dot(ut, self.context_kernel)
ut = K.squeeze(ut, axis=-1)
# softmax
at = K.exp(ut - K.max(ut, axis=-1, keepdims=True))
if mask is not None:
at *= K.cast(mask, K.floatx())
att_weights = at / (K.sum(at, axis=1, keepdims=True) + K.epsilon())
# output
atx = x * K.expand_dims(att_weights, axis=-1)
output = K.sum(atx, axis=1)
if self.return_attention:
return [output, att_weights]
return output
示例7: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs):
binary_kernel = binarize(self.kernel, H=self.H)
outputs = K.conv2d(
inputs,
binary_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例8: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def preprocess_input(self, inputs, training=None):
if self.window_size > 1:
inputs = K.temporal_padding(inputs, (self.window_size-1, 0))
inputs = K.expand_dims(inputs, 2) # add a dummy dimension
output = K.conv2d(inputs, self.kernel, strides=self.strides,
padding='valid',
data_format='channels_last')
output = K.squeeze(output, 2) # remove the dummy dimension
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.dropout is not None and 0. < self.dropout < 1.:
z = output[:, :, :self.units]
f = output[:, :, self.units:2 * self.units]
o = output[:, :, 2 * self.units:]
f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
return K.concatenate([z, f, o], -1)
else:
return output
示例9: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs):
if self.data_format == 'channels_first':
sq = K.mean(inputs, [2, 3])
else:
sq = K.mean(inputs, [1, 2])
ex = K.dot(sq, self.kernel1)
if self.use_bias:
ex = K.bias_add(ex, self.bias1)
ex= K.relu(ex)
ex = K.dot(ex, self.kernel2)
if self.use_bias:
ex = K.bias_add(ex, self.bias2)
ex= K.sigmoid(ex)
if self.data_format == 'channels_first':
ex = K.expand_dims(ex, -1)
ex = K.expand_dims(ex, -1)
else:
ex = K.expand_dims(ex, 1)
ex = K.expand_dims(ex, 1)
return inputs * ex
示例10: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def step(self, x, states):
h_tm1 = states[0]
c_tm1 = states[1]
B_U = states[2]
B_W = states[3]
z = LN(K.dot(x * B_W[0], self.kernel), self.gamma_1, self.beta_1) + \
LN(K.dot(h_tm1 * B_U[0], self.recurrent_kernel), self.gamma_2, self.beta_2)
if self.use_bias:
z = K.bias_add(z, self.bias)
z0 = z[:, :self.units]
z1 = z[:, self.units: 2 * self.units]
z2 = z[:, 2 * self.units: 3 * self.units]
z3 = z[:, 3 * self.units:]
i = self.recurrent_activation(z0)
f = self.recurrent_activation(z1)
c = f * c_tm1 + i * self.activation(z2)
o = self.recurrent_activation(z3)
h = o * self.activation(LN(c, self.gamma_3, self.beta_3))
return h, [h, c]
示例11: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs):
ternary_kernel = ternarize(self.kernel, H=self.H)
outputs = K.conv2d(
inputs,
ternary_kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例12: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def step(self, inputs, states):
if 0 < self.dropout < 1:
h = ternarize_dot(inputs * states[1], self.kernel)
else:
h = ternarize_dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
prev_output = states[0]
if 0 < self.recurrent_dropout < 1:
prev_output *= states[2]
output = h + ternarize_dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
output._uses_learning_phase = True
return output, [output]
示例13: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs):
if self.rank == 2:
outputs = K.conv2d(
inputs,
self.kernel*self.mask, ### add mask multiplication
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例14: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs, **kwargs):
outputs = K.conv2d(
inputs,
self.kernel,
strides=self.strides,
padding=self.padding,
data_format=self.data_format,
dilation_rate=self.dilation_rate)
if self.use_bias:
outputs = K.bias_add(
outputs,
self.bias,
data_format=self.data_format)
outputs = BatchNormalization(momentum=self.momentum)(outputs)
if self.activation is not None:
return self.activation(outputs)
return outputs
示例15: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import bias_add [as 别名]
def call(self, inputs):
expert_outputs = tf.tensordot(inputs, self.expert_kernel, axes=1)
if self.use_expert_bias:
expert_outputs = K.bias_add(expert_outputs, self.expert_bias)
if self.expert_activation is not None:
expert_outputs = self.expert_activation(expert_outputs)
gating_outputs = K.dot(inputs, self.gating_kernel)
if self.use_gating_bias:
gating_outputs = K.bias_add(gating_outputs, self.gating_bias)
if self.gating_activation is not None:
gating_outputs = self.gating_activation(gating_outputs)
output = K.sum(expert_outputs * K.repeat_elements(K.expand_dims(gating_outputs, axis=1), self.units, axis=1), axis=2)
return output