本文整理汇总了Python中keras.backend.cast_to_floatx方法的典型用法代码示例。如果您正苦于以下问题:Python backend.cast_to_floatx方法的具体用法?Python backend.cast_to_floatx怎么用?Python backend.cast_to_floatx使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.cast_to_floatx方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
示例2: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.hidden_recurrent_dim))
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, input_dim))
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
示例3: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.input_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
示例4: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
示例5: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
constants.append(B_U)
else:
constants.append(K.cast_to_floatx(1.))
if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
return constants
示例6: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
示例7: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, inputs, training=None):
constants = []
'''if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0 < self.dropout_W < 1:
input_shape = K.int_shape(x)
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
constants.append(B_W)
else:'''
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
示例8: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, x):
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * self.output_dim, 1)
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
return constants
示例9: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, inputs, training=None):
constants = []
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
if 0. < self.recurrent_dropout < 1:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(3)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
示例10: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, inputs, training=None):
constants = []
if 0. < self.recurrent_dropout < 1.:
ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.units))
def dropped_inputs():
return K.dropout(ones, self.recurrent_dropout)
rec_dp_mask = [K.in_train_phase(dropped_inputs,
ones,
training=training) for _ in range(3)]
constants.append(rec_dp_mask)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(3)])
return constants
示例11: bbalpha_softmax_cross_entropy_with_mc_logits
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def bbalpha_softmax_cross_entropy_with_mc_logits(alpha):
alpha = K.cast_to_floatx(alpha)
def loss(y_true, mc_logits):
# log(p_ij), p_ij = softmax(logit_ij)
#assert mc_logits.ndim == 3
mc_log_softmax = mc_logits - K.max(mc_logits, axis=2, keepdims=True)
mc_log_softmax = mc_log_softmax - K.log(K.sum(K.exp(mc_log_softmax), axis=2, keepdims=True))
mc_ll = K.sum(y_true * mc_log_softmax, -1) # N x K
K_mc = mc_ll.get_shape().as_list()[1] # only for tensorflow
return - 1. / alpha * (logsumexp(alpha * mc_ll, 1) + K.log(1.0 / K_mc))
return loss
###################################################################
# the model
示例12: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def __init__(self, l1=0., l2=0.,**kwargs):
self.l1 = K.cast_to_floatx(l1)
self.l2 = K.cast_to_floatx(l2)
self.uses_learning_phase = True
super(ActivityRegularizerOneDim, self).__init__(**kwargs)
#self.layer = None
示例13: get_constants
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def get_constants(self, x):
print("begin get_constants(self, x)")
constants = []
if 0 < self.dropout_U < 1:
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, self.controller_output_dim))
B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
constants.append(B_U)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.tile(ones, (1, int(input_dim)))
B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
constants.append(B_W)
else:
constants.append([K.cast_to_floatx(1.) for _ in range(4)])
# if 0 < self.dropout_R < 1:
# input_shape = self.input_spec[0].shape
# input_dim = input_shape[-1]
# ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
# ones = K.tile(ones, (1, int(input_dim)))
# B_R = [K.in_train_phase(K.dropout(ones, self.dropout_R), ones) for _ in range(4)]
# constants.append(B_R)
# else:
# constants.append([K.cast_to_floatx(1.) for _ in range(4)])
print("end get_constants(self, x)")
return constants
示例14: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def __init__(self, epsilon=0.0025, **kwargs):
super(SineReLU, self).__init__(**kwargs)
self.supports_masking = True
self.epsilon = K.cast_to_floatx(epsilon)
示例15: test_clip
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import cast_to_floatx [as 别名]
def test_clip():
clip_instance = constraints.clip()
clipped = clip_instance(K.variable(example_array))
assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.01))
clip_instance = constraints.clip(0.1)
clipped = clip_instance(K.variable(example_array))
assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.1))