本文整理汇总了Python中keras.backend.dropout方法的典型用法代码示例。如果您正苦于以下问题:Python backend.dropout方法的具体用法?Python backend.dropout怎么用?Python backend.dropout使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.dropout方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def preprocess_input(self, inputs, training=None):
if self.window_size > 1:
inputs = K.temporal_padding(inputs, (self.window_size - 1, 0))
inputs = K.expand_dims(inputs, 2) # add a dummy dimension
output = K.conv2d(inputs, self.kernel, strides=self.strides,
padding='valid',
data_format='channels_last')
output = K.squeeze(output, 2) # remove the dummy dimension
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.dropout is not None and 0. < self.dropout < 1.:
z = output[:, :, :self.units]
f = output[:, :, self.units:2 * self.units]
o = output[:, :, 2 * self.units:]
f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
return K.concatenate([z, f, o], -1)
else:
return output
示例2: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
示例3: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def step(self, inputs, states):
h_tm1 = states[0] # previous memory
#B_U = states[1] # dropout matrices for recurrent units
#B_W = states[2]
h_tm1a = K.dot(h_tm1, self.Wa)
eij = K.dot(K.tanh(h_tm1a + K.dot(inputs[:, :self.h_dim], self.Ua)), self.Va)
eijs = K.repeat_elements(eij, self.h_dim, axis=1)
#alphaij = K.softmax(eijs) # batchsize * lenh h batchsize * lenh * ndim
#ci = K.permute_dimensions(K.permute_dimensions(self.h, [2,0,1]) * alphaij, [1,2,0])
#cisum = K.sum(ci, axis=1)
cisum = eijs*inputs[:, :self.h_dim]
#print(K.shape(cisum), cisum.shape, ci.shape, self.h.shape, alphaij.shape, x.shape)
zr = K.sigmoid(K.dot(inputs[:, self.h_dim:], self.Wzr) + K.dot(h_tm1, self.Uzr) + K.dot(cisum, self.Czr))
zi = zr[:, :self.units]
ri = zr[:, self.units: 2 * self.units]
si_ = K.tanh(K.dot(inputs[:, self.h_dim:], self.W) + K.dot(ri*h_tm1, self.U) + K.dot(cisum, self.C))
si = (1-zi) * h_tm1 + zi * si_
return si, [si] #h_tm1, [h_tm1]
示例4: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def preprocess_input(self, inputs, training=None):
if self.window_size > 1:
inputs = K.temporal_padding(inputs, (self.window_size-1, 0))
inputs = K.expand_dims(inputs, 2) # add a dummy dimension
output = K.conv2d(inputs, self.kernel, strides=self.strides,
padding='valid',
data_format='channels_last')
output = K.squeeze(output, 2) # remove the dummy dimension
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.dropout is not None and 0. < self.dropout < 1.:
z = output[:, :, :self.units]
f = output[:, :, self.units:2 * self.units]
o = output[:, :, 2 * self.units:]
f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
return K.concatenate([z, f, o], -1)
else:
return output
示例5: get_config
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def get_config(self):
config = {'units': self.units,
'window_size': self.window_size,
'stride': self.strides[0],
'return_sequences': self.return_sequences,
'go_backwards': self.go_backwards,
'stateful': self.stateful,
'unroll': self.unroll,
'use_bias': self.use_bias,
'dropout': self.dropout,
'activation': activations.serialize(self.activation),
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'input_dim': self.input_dim,
'input_length': self.input_length}
base_config = super(QRNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
示例6: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def step(self, inputs, states):
if 0 < self.dropout < 1:
h = ternarize_dot(inputs * states[1], self.kernel)
else:
h = ternarize_dot(inputs, self.kernel)
if self.bias is not None:
h = K.bias_add(h, self.bias)
prev_output = states[0]
if 0 < self.recurrent_dropout < 1:
prev_output *= states[2]
output = h + ternarize_dot(prev_output, self.recurrent_kernel)
if self.activation is not None:
output = self.activation(output)
# Properly set learning phase on output tensor.
if 0 < self.dropout + self.recurrent_dropout:
output._uses_learning_phase = True
return output, [output]
示例7: get_config
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(TT_RNN, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
示例8: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def get_constants(self, inputs, training=None):
constants = []
if 0. < self.recurrent_dropout < 1.:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(3)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
示例9: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def preprocess_input(self, inputs, training=None):
if self.implementation == 0:
input_shape = K.int_shape(inputs)
input_dim = input_shape[2]
timesteps = input_shape[1]
x_i = _time_distributed_dense(inputs, self.kernel_i, self.bias_i,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_f = _time_distributed_dense(inputs, self.kernel_f, self.bias_f,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_c = _time_distributed_dense(inputs, self.kernel_c, self.bias_c,
self.dropout, input_dim, self.units,
timesteps, training=training)
x_o = _time_distributed_dense(inputs, self.kernel_o, self.bias_o,
self.dropout, input_dim, self.units,
timesteps, training=training)
return K.concatenate([x_i, x_f, x_c, x_o], axis=2)
else:
return inputs
示例10: get_config
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def get_config(self):
config = {'units': self.units,
'activation': activations.serialize(self.activation),
'recurrent_activation': activations.serialize(self.recurrent_activation),
'use_bias': self.use_bias,
'kernel_initializer': initializers.serialize(self.kernel_initializer),
'recurrent_initializer': initializers.serialize(self.recurrent_initializer),
'bias_initializer': initializers.serialize(self.bias_initializer),
'unit_forget_bias': self.unit_forget_bias,
'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),
'recurrent_regularizer': regularizers.serialize(self.recurrent_regularizer),
'bias_regularizer': regularizers.serialize(self.bias_regularizer),
'activity_regularizer': regularizers.serialize(self.activity_regularizer),
'kernel_constraint': constraints.serialize(self.kernel_constraint),
'recurrent_constraint': constraints.serialize(self.recurrent_constraint),
'bias_constraint': constraints.serialize(self.bias_constraint),
'dropout': self.dropout,
'recurrent_dropout': self.recurrent_dropout}
base_config = super(PhasedLSTM, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
示例11: zoneout
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def zoneout(self, v, prev_v, pr=0.):
diff = v - prev_v
diff = K.in_train_phase(K.dropout(diff, pr, noise_shape=(self.output_dim,)), diff)
# In testing, always return v * (1-pr) + prev_v * pr
# In training when K.dropout returns 0, return prev_v
# when K.dropout returns diff/(1-pr), return v
return prev_v + diff * (1-pr)
示例12: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def call(self, x, mask=None):
if 0. < self.p < 1.:
noise_shape = self._get_noise_shape(x)
x = K.dropout(x, self.p, noise_shape)
return x
示例13: Dropout_mc
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def Dropout_mc(p):
layer = Lambda(lambda x: K.dropout(x, p), output_shape=lambda shape: shape)
return layer
示例14: get_logit_mlp_layers
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def get_logit_mlp_layers(nb_layers, nb_units, p, wd, nb_classes, layers = [], \
dropout = 'none'):
if dropout == 'MC':
D = Dropout_mc
if dropout == 'pW':
D = pW
if dropout == 'none':
D = Identity
for _ in xrange(nb_layers):
layers.append(D(p))
layers.append(Dense(nb_units, activation='relu', W_regularizer=l2(wd)))
layers.append(D(p))
layers.append(Dense(nb_classes, W_regularizer=l2(wd)))
return layers
示例15: get_logit_cnn_layers
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import dropout [as 别名]
def get_logit_cnn_layers(nb_units, p, wd, nb_classes, layers = [], dropout = False):
# number of convolutional filters to use
nb_filters = 32
# size of pooling area for max pooling
pool_size = (2, 2)
# convolution kernel size
kernel_size = (3, 3)
if dropout == 'MC':
D = Dropout_mc
if dropout == 'pW':
D = pW
if dropout == 'none':
D = Identity
layers.append(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
border_mode='valid', W_regularizer=l2(wd)))
layers.append(Activation('relu'))
layers.append(Convolution2D(nb_filters, kernel_size[0], kernel_size[1],
W_regularizer=l2(wd)))
layers.append(Activation('relu'))
layers.append(MaxPooling2D(pool_size=pool_size))
layers.append(Flatten())
layers.append(D(p))
layers.append(Dense(nb_units, W_regularizer=l2(wd)))
layers.append(Activation('relu'))
layers.append(D(p))
layers.append(Dense(nb_classes, W_regularizer=l2(wd)))
return layers