當前位置: 首頁>>代碼示例>>Python>>正文


Python tensor.log1p方法代碼示例

本文整理匯總了Python中theano.tensor.log1p方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.log1p方法的具體用法?Python tensor.log1p怎麽用?Python tensor.log1p使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在theano.tensor的用法示例。


在下文中一共展示了tensor.log1p方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: log1p

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import log1p [as 別名]
def log1p(x):
    """
    Elemwise log(1 + `x`).

    """
    # see decorator for function body 
開發者ID:muhanzhang,項目名稱:D-VAE,代碼行數:8,代碼來源:basic.py

示例2: log_add

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import log1p [as 別名]
def log_add(a, b):
        max_ = tensor.maximum(a, b)
        return (max_ + tensor.log1p(tensor.exp(a + b - 2 * max_))) 
開發者ID:mohammadpz,項目名稱:CTC-Connectionist-Temporal-Classification,代碼行數:5,代碼來源:ctc_cost.py

示例3: gaussian_dropout_kl

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import log1p [as 別名]
def gaussian_dropout_kl(output_layer, input_lengthscale=1.0,
                        hidden_lengthscale=1.0):
    '''
        KL divergence approximation from :
         "Variational Dropout Sparsifies Deep Neural Networks"
         Molchanov et al, 2017
    '''
    layers = lasagne.layers.get_all_layers(output_layer)
    k1, k2, k3 = 0.63576, 1.8732, 1.48695
    C = -0.20452104900969109
    # C= -k1
    reg = []
    sigmoid = tt.nnet.sigmoid
    for i in range(1, len(layers)):
        # check if this is a dropout layer
        is_dropout_a = isinstance(layers[i], GaussianDropoutLayer)
        is_dropout_b = isinstance(layers[i], DenseGaussianDropoutLayer)
        if is_dropout_a or is_dropout_b:
            log_alpha = layers[i].log_alpha
            # there should be one log_alpha per weight
            log_alpha_shape = tuple(log_alpha.shape.eval())
            W_shape = tuple(layers[i].W.get_value().shape)
            if log_alpha_shape != W_shape:
                # we assume that if alpha does not have the same shape as W
                # (i.e. one alpha parameter per weight) there's either one per
                # output or per layer
                # TODO make this compatible with conv layers
                log_alpha = (log_alpha*tt.ones_like(layers[i].W.T)).T
            kl = -(k1*sigmoid(k2+k3*log_alpha)
                   - 0.5*tt.log1p(tt.exp(-log_alpha))
                   + C)
            is_input = isinstance(layers[i].input_layer,
                                  lasagne.layers.InputLayer)
            rw = input_lengthscale if is_input else hidden_lengthscale
            reg.append(rw*kl.sum())

    return sum(reg) 
開發者ID:mcgillmrl,項目名稱:kusanagi,代碼行數:39,代碼來源:objectives.py

示例4: _log_add

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import log1p [as 別名]
def _log_add(a, b):
    max_ = tensor.maximum(a, b)
    return (max_ + tensor.log1p(tensor.exp(a + b - 2 * max_))) 
開發者ID:dagbldr,項目名稱:dagbldr,代碼行數:5,代碼來源:penalties.py

示例5: log_add

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import log1p [as 別名]
def log_add(a, b):
        max_ = T.maximum(a, b)
        return (max_ + T.log1p(T.exp(a + b - 2 * max_))) 
開發者ID:JoergFranke,項目名稱:recnet,代碼行數:5,代碼來源:loss_function.py

示例6: logsumexp

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import log1p [as 別名]
def logsumexp(x, y):
    max = T.switch(x > y, x, y)
    min = T.switch(x > y, y, x)
    return T.log1p(T.exp(min - max)) + max 
開發者ID:mast-group,項目名稱:eqnet,代碼行數:6,代碼來源:optimization.py

示例7: apply_log_domain

# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import log1p [as 別名]
def apply_log_domain(self, l, probs, l_len=None, probs_mask=None):
        # Does the same computation as apply, but alpha is in the log domain
        # This avoids numerical underflow issues that were not corrected in the previous version.

        def _log(a):
            return tensor.log(tensor.clip(a, 1e-12, 1e12))

        def _log_add(a, b):
            maximum = tensor.maximum(a, b)
            return (maximum + tensor.log1p(tensor.exp(a + b - 2 * maximum)))

        def _log_mul(a, b):
            return a + b

        # See comments above
        B = probs.shape[1]
        C = probs.shape[2]-1
        L = l.shape[0]
        S = 2*L+1
        
        l_blk = C * tensor.ones((S, B), dtype='int32')
        l_blk = tensor.set_subtensor(l_blk[1::2,:], l)
        l_blk = l_blk.T     # now l_blk is B x S

        alpha0 = tensor.concatenate([   tensor.ones((B, 1)),
                                        tensor.zeros((B, S-1))
                                    ], axis=1)
        alpha0 = _log(alpha0)

        l_blk_2 = tensor.concatenate([-tensor.ones((B,2)), l_blk[:,:-2]], axis=1)
        l_case2 = tensor.neq(l_blk, C) * tensor.neq(l_blk, l_blk_2)

        def recursion(p, p_mask, prev_alpha):
            prev_alpha_1 = tensor.concatenate([tensor.zeros((B,1)),prev_alpha[:,:-1]], axis=1)
            prev_alpha_2 = tensor.concatenate([tensor.zeros((B,2)),prev_alpha[:,:-2]], axis=1)

            alpha_bar1 = tensor.set_subtensor(prev_alpha[:,1:], _log_add(prev_alpha[:,1:],prev_alpha[:,:-1]))
            alpha_bar2 = tensor.set_subtensor(alpha_bar1[:,2:], _log_add(alpha_bar1[:,2:],prev_alpha[:,:-2]))

            alpha_bar = tensor.switch(l_case2, alpha_bar2, alpha_bar1)

            probs = _log(p[tensor.arange(B)[:,None].repeat(S,axis=1).flatten(), l_blk.flatten()].reshape((B,S)))
            next_alpha = _log_mul(alpha_bar, probs)
            next_alpha = tensor.switch(p_mask[:,None], next_alpha, prev_alpha)
            
            return next_alpha

        alpha, _ = scan(fn=recursion,
                             sequences=[probs, probs_mask],
                             outputs_info=[alpha0])

        last_alpha = alpha[-1]
        # last_alpha = theano.printing.Print('a-1')(last_alpha)

        prob = _log_add(last_alpha[tensor.arange(B), 2*l_len.astype('int32')-1],
                        last_alpha[tensor.arange(B), 2*l_len.astype('int32')])

        # return the negative log probability of the labellings
        return -prob 
開發者ID:thomasmesnard,項目名稱:CTC-LSTM,代碼行數:61,代碼來源:ctc.py


注:本文中的theano.tensor.log1p方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。