當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.cast_to_floatx方法代碼示例

本文整理匯總了Python中keras.backend.cast_to_floatx方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.cast_to_floatx方法的具體用法?Python backend.cast_to_floatx怎麽用?Python backend.cast_to_floatx使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.backend的用法示例。


在下文中一共展示了backend.cast_to_floatx方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, x):
    constants = []
    if 0 < self.dropout_U < 1:
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, self.output_dim))
      B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
      constants.append(B_U)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])

    if 0 < self.dropout_W < 1:
      input_shape = self.input_spec[0].shape
      input_dim = input_shape[-1]
      ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
      ones = K.tile(ones, (1, input_dim))
      B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
      constants.append(B_W)
    else:
      constants.append([K.cast_to_floatx(1.) for _ in range(3)])
    return constants 
開發者ID:LaurentMazare,項目名稱:deep-models,代碼行數:22,代碼來源:rhn.py

示例2: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, x):
		constants = []
		if 0 < self.dropout_U < 1:
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, self.hidden_recurrent_dim))
			B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
			constants.append(B_U)
		else:
			constants.append(K.cast_to_floatx(1.))
        
		if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
			input_shape = self.input_spec[0].shape
			input_dim = input_shape[-1]
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, input_dim))
			B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
			constants.append(B_W)
		else:
			constants.append(K.cast_to_floatx(1.))

		return constants 
開發者ID:bnsnapper,項目名稱:keras_bn_library,代碼行數:23,代碼來源:rnnrbm.py

示例3: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, x):
		constants = []
		if 0 < self.dropout_U < 1:
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, self.input_dim))
			B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
			constants.append(B_U)
		else:
			constants.append([K.cast_to_floatx(1.) for _ in range(4)])

		if 0 < self.dropout_W < 1:
			input_shape = K.int_shape(x)
			input_dim = input_shape[-1]
			ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
			ones = K.tile(ones, (1, int(input_dim)))
			B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
			constants.append(B_W)
		else:
			constants.append([K.cast_to_floatx(1.) for _ in range(4)])
		return constants 
開發者ID:bnsnapper,項目名稱:keras_bn_library,代碼行數:22,代碼來源:recurrent.py

示例4: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants 
開發者ID:SigmaQuan,項目名稱:NTM-Keras,代碼行數:22,代碼來源:lstm2ntm.py

示例5: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, x):
      constants = []
      if 0 < self.dropout_U < 1:
          ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
          ones = K.concatenate([ones] * self.output_dim, 1)
          B_U = K.in_train_phase(K.dropout(ones, self.dropout_U), ones)
          constants.append(B_U)
      else:
          constants.append(K.cast_to_floatx(1.))
      if self.consume_less == 'cpu' and 0 < self.dropout_W < 1:
          input_shape = self.input_spec[0].shape
          input_dim = input_shape[-1]
          ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
          ones = K.concatenate([ones] * input_dim, 1)
          B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
          constants.append(B_W)
      else:
          constants.append(K.cast_to_floatx(1.))
      return constants 
開發者ID:commaai,項目名稱:research,代碼行數:21,代碼來源:layers.py

示例6: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
開發者ID:wentaozhu,項目名稱:recurrent-attention-for-QA-SQUAD-based-on-keras,代碼行數:22,代碼來源:QnA.py

示例7: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, inputs, training=None):
        constants = []
        '''if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(3)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0 < self.dropout_W < 1:
            input_shape = K.int_shape(x)
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(3)]
            constants.append(B_W)
        else:'''
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
開發者ID:wentaozhu,項目名稱:recurrent-attention-for-QA-SQUAD-based-on-keras,代碼行數:22,代碼來源:rnnlayer.py

示例8: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, x):
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * self.output_dim, 1)
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.concatenate([ones] * input_dim, 1)
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])
        return constants 
開發者ID:braingineer,項目名稱:ikelos,代碼行數:22,代碼來源:rtn.py

示例9: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, inputs, training=None):
        constants = []
        constants.append([K.cast_to_floatx(1.) for _ in range(3)])

        if 0. < self.recurrent_dropout < 1:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)
            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(3)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
開發者ID:Tuyki,項目名稱:TT_RNN,代碼行數:19,代碼來源:TTRNN.py

示例10: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, inputs, training=None):
        constants = []
        if 0. < self.recurrent_dropout < 1.:
            ones = K.ones_like(K.reshape(inputs[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.units))

            def dropped_inputs():
                return K.dropout(ones, self.recurrent_dropout)

            rec_dp_mask = [K.in_train_phase(dropped_inputs,
                                            ones,
                                            training=training) for _ in range(3)]
            constants.append(rec_dp_mask)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(3)])
        return constants 
開發者ID:aspuru-guzik-group,項目名稱:chemical_vae,代碼行數:18,代碼來源:tgru_k2_gpu.py

示例11: bbalpha_softmax_cross_entropy_with_mc_logits

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def bbalpha_softmax_cross_entropy_with_mc_logits(alpha):
    alpha = K.cast_to_floatx(alpha)
    def loss(y_true, mc_logits):
        # log(p_ij), p_ij = softmax(logit_ij)
        #assert mc_logits.ndim == 3
        mc_log_softmax = mc_logits - K.max(mc_logits, axis=2, keepdims=True)
        mc_log_softmax = mc_log_softmax - K.log(K.sum(K.exp(mc_log_softmax), axis=2, keepdims=True))
        mc_ll = K.sum(y_true * mc_log_softmax, -1)  # N x K
        K_mc = mc_ll.get_shape().as_list()[1]	# only for tensorflow
        return - 1. / alpha * (logsumexp(alpha * mc_ll, 1) + K.log(1.0 / K_mc))
    return loss


###################################################################
# the model 
開發者ID:YingzhenLi,項目名稱:Dropout_BBalpha,代碼行數:17,代碼來源:BBalpha_dropout.py

示例12: __init__

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def __init__(self, l1=0., l2=0.,**kwargs):
        self.l1 = K.cast_to_floatx(l1)
        self.l2 = K.cast_to_floatx(l2)
        self.uses_learning_phase = True
        super(ActivityRegularizerOneDim, self).__init__(**kwargs)
        #self.layer = None 
開發者ID:wentaozhu,項目名稱:deep-mil-for-whole-mammogram-classification,代碼行數:8,代碼來源:customlayers.py

示例13: get_constants

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def get_constants(self, x):
        print("begin get_constants(self, x)")
        constants = []
        if 0 < self.dropout_U < 1:
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, self.controller_output_dim))
            B_U = [K.in_train_phase(K.dropout(ones, self.dropout_U), ones) for _ in range(4)]
            constants.append(B_U)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        if 0 < self.dropout_W < 1:
            input_shape = self.input_spec[0].shape
            input_dim = input_shape[-1]
            ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
            ones = K.tile(ones, (1, int(input_dim)))
            B_W = [K.in_train_phase(K.dropout(ones, self.dropout_W), ones) for _ in range(4)]
            constants.append(B_W)
        else:
            constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        # if 0 < self.dropout_R < 1:
        #     input_shape = self.input_spec[0].shape
        #     input_dim = input_shape[-1]
        #     ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
        #     ones = K.tile(ones, (1, int(input_dim)))
        #     B_R = [K.in_train_phase(K.dropout(ones, self.dropout_R), ones) for _ in range(4)]
        #     constants.append(B_R)
        # else:
        #     constants.append([K.cast_to_floatx(1.) for _ in range(4)])

        print("end get_constants(self, x)")
        return constants 
開發者ID:SigmaQuan,項目名稱:NTM-Keras,代碼行數:35,代碼來源:ntm.py

示例14: __init__

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def __init__(self, epsilon=0.0025, **kwargs):
        super(SineReLU, self).__init__(**kwargs)
        self.supports_masking = True
        self.epsilon = K.cast_to_floatx(epsilon) 
開發者ID:keras-team,項目名稱:keras-contrib,代碼行數:6,代碼來源:sinerelu.py

示例15: test_clip

# 需要導入模塊: from keras import backend [as 別名]
# 或者: from keras.backend import cast_to_floatx [as 別名]
def test_clip():
    clip_instance = constraints.clip()
    clipped = clip_instance(K.variable(example_array))
    assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.01))
    clip_instance = constraints.clip(0.1)
    clipped = clip_instance(K.variable(example_array))
    assert(np.max(np.abs(K.eval(clipped))) <= K.cast_to_floatx(0.1)) 
開發者ID:keras-team,項目名稱:keras-contrib,代碼行數:9,代碼來源:constraints_test.py


注:本文中的keras.backend.cast_to_floatx方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。