當前位置: 首頁>>代碼示例>>Python>>正文


Python functions.log_softmax方法代碼示例

本文整理匯總了Python中chainer.functions.log_softmax方法的典型用法代碼示例。如果您正苦於以下問題:Python functions.log_softmax方法的具體用法?Python functions.log_softmax怎麽用?Python functions.log_softmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在chainer.functions的用法示例。


在下文中一共展示了functions.log_softmax方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: softmax_cross_entropy

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def softmax_cross_entropy(self, y, t):
        import numpy as np

        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # self.batch_size = chainer.Variable(np.array(t.size, np.float32),
        #                                    name='batch_size')
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        batch_size = chainer.Variable(np.array(t.shape[0], np.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        loss = -log_prob / batch_size
        loss.name = 'loss'
        return loss 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:23,代碼來源:nin.py

示例2: forward

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def forward(self, x, t):
        xp = cuda.get_array_module(x)
        y = self.predictor(x)
        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        batch_size = chainer.Variable(xp.array(t.size, xp.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        loss = -log_prob / batch_size
        reporter.report({'loss': loss}, self)
        if self.compute_accuracy:
            acc = accuracy.accuracy(y, xp.argmax(t, axis=1))
            reporter.report({'accuracy': acc}, self)
        loss.name = 'loss'
        return loss 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:25,代碼來源:gen_mnist_mlp.py

示例3: softmax_cross_entropy

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def softmax_cross_entropy(self, y, t):
        import numpy as np

        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # self.batch_size = chainer.Variable(np.array(t.size, np.float32),
        #                                    name='batch_size')
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        batch_size = chainer.Variable(self.xp.array(t.shape[0], np.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        loss = -log_prob / batch_size
        loss.name = 'loss'
        return loss 
開發者ID:pfnet-research,項目名稱:chainer-compiler,代碼行數:23,代碼來源:resnet50.py

示例4: all_log_prob

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def all_log_prob(self):
        with chainer.force_backprop_mode():
            if self.min_prob > 0:
                return F.log(self.all_prob)
            else:
                return F.log_softmax(self.beta * self.logits) 
開發者ID:chainer,項目名稱:chainerrl,代碼行數:8,代碼來源:distribution.py

示例5: log_probs

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def log_probs(self):
        return F.log_softmax(self.logits) 
開發者ID:muupan,項目名稱:async-rl,代碼行數:4,代碼來源:policy_output.py

示例6: predict

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def predict(self, s):
        """Predict single-label log probabilities

        Args:
            s (any): Current (hidden, cell) states.
        Return:
            (~chainer.Variable) log softmax vector
        """
        y = self.out(self.proj(s[2][0]))
        return F.log_softmax(y) 
開發者ID:dialogtekgeek,項目名稱:DSTC6-End-to-End-Conversation-Modeling,代碼行數:12,代碼來源:lstm_decoder.py

示例7: kl_categorical

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def kl_categorical(p_logit, q_logit):
    if isinstance(p_logit, chainer.Variable):
        xp = cuda.get_array_module(p_logit.data)
    else:
        xp = cuda.get_array_module(p_logit)
    p = F.softmax(p_logit)
    _kl = F.sum(p * (F.log_softmax(p_logit) - F.log_softmax(q_logit)), 1)
    return F.sum(_kl) / xp.prod(xp.array(_kl.shape)) 
開發者ID:takerum,項目名稱:vat_chainer,代碼行數:10,代碼來源:loss.py

示例8: cross_entropy

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def cross_entropy(logit, y):
    # y should be one-hot encoded probability
    return - F.sum(y * F.log_softmax(logit)) / logit.shape[0] 
開發者ID:takerum,項目名稱:vat_chainer,代碼行數:5,代碼來源:loss.py

示例9: entropy_y_x

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def entropy_y_x(p_logit):
    p = F.softmax(p_logit)
    return - F.sum(p * F.log_softmax(p_logit)) / p_logit.shape[0] 
開發者ID:takerum,項目名稱:vat_chainer,代碼行數:5,代碼來源:loss.py

示例10: forward

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def forward(self, inputs, device):
        x, = inputs
        return functions.log_softmax(x, axis=self.axis), 
開發者ID:chainer,項目名稱:chainer,代碼行數:5,代碼來源:test_log_softmax.py

示例11: setUp

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def setUp(self):
        BaseSoftTarget.setUp(self)
        self.t = functions.softmax(self.x).array
        self.expect = numpy.sum(-self.t * functions.log_softmax(self.x).array,
                                axis=1)
        if self.reduce == 'mean':
            self.expect = numpy.average(self.expect) 
開發者ID:chainer,項目名稱:chainer,代碼行數:9,代碼來源:test_softmax_cross_entropy.py

示例12: dirichlet_likelihood

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def dirichlet_likelihood(weights, alpha=None):
    """ Calculate the log likelihood of the observed topic proportions.
    A negative likelihood is more likely than a negative likelihood.

    Args:
        weights (chainer.Variable): Unnormalized weight vector. The vector
            will be passed through a softmax function that will map the input
            onto a probability simplex.
        alpha (float): The Dirichlet concentration parameter. Alpha
            greater than 1.0 results in very dense topic weights such
            that each document belongs to many topics. Alpha < 1.0 results
            in sparser topic weights. The default is to set alpha to
            1.0 / n_topics, effectively enforcing the prior belief that a
            document belong to very topics at once.

    Returns:
        ~chainer.Variable: Output loss variable.
    """
    if type(weights) is Variable:
        n_topics = weights.data.shape[1]
    else:
        n_topics = weights.W.data.shape[1]
    if alpha is None:
        alpha = 1.0 / n_topics
    if type(weights) is Variable:
        log_proportions = F.log_softmax(weights)
    else:
        log_proportions = F.log_softmax(weights.W)
    loss = (alpha - 1.0) * log_proportions
    return -F.sum(loss) 
開發者ID:cemoody,項目名稱:lda2vec,代碼行數:32,代碼來源:dirichlet_likelihood.py

示例13: decode

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def decode(self, sample, bow):
        """ Decode latent document vectors back into word counts
        (n_docs, n_vocab).
        """
        logprob = F.log_softmax(self.embedding(sample))
        # This is equivalent to a softmax_cross_entropy where instead of
        # guessing 1 of N words we have repeated observations
        # Normal softmax for guessing the next word is:
        # t log softmax(x), where t is 0 or 1
        # Softmax for guessing word counts is simply doing
        # the above more times, so multiply by the count
        # count log softmax(x)
        loss = -F.sum(bow * logprob)
        return loss 
開發者ID:cemoody,項目名稱:lda2vec,代碼行數:16,代碼來源:nvdm.py

示例14: forward

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def forward(self, ids, bow):
        bow, ids = utils.move(self.xp, bow, ids)
        proportions = self.proportions(ids)
        ld = dirichlet_likelihood(proportions)
        doc = F.matmul(F.softmax(proportions), self.factors())
        logp = F.dropout(self.embedding(doc))
        # loss = -F.sum(bow * F.log_softmax(logp))
        sources, targets, counts = [], [], []
        lpi =  F.sum(bow * F.log_softmax(logp), axis=1)
        loss = -F.sum(lpi)
        return loss, ld 
開發者ID:cemoody,項目名稱:lda2vec,代碼行數:13,代碼來源:lda.py

示例15: kl_loss

# 需要導入模塊: from chainer import functions [as 別名]
# 或者: from chainer.functions import log_softmax [as 別名]
def kl_loss(xp, p_logit, q_logit):
    p = F.softmax(p_logit)
    _kl = F.sum(p * (F.log_softmax(p_logit) - F.log_softmax(q_logit)), 1)
    return F.sum(_kl) / xp.prod(xp.array(_kl.shape)) 
開發者ID:chainer,項目名稱:models,代碼行數:6,代碼來源:net.py


注:本文中的chainer.functions.log_softmax方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。