当前位置: 首页>>代码示例>>Python>>正文


Python functions.log_softmax方法代码示例

本文整理汇总了Python中chainer.functions.log_softmax方法的典型用法代码示例。如果您正苦于以下问题:Python functions.log_softmax方法的具体用法?Python functions.log_softmax怎么用?Python functions.log_softmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在chainer.functions的用法示例。


在下文中一共展示了functions.log_softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: softmax_cross_entropy

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def softmax_cross_entropy(self, y, t):
        import numpy as np

        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # self.batch_size = chainer.Variable(np.array(t.size, np.float32),
        #                                    name='batch_size')
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        batch_size = chainer.Variable(np.array(t.shape[0], np.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        loss = -log_prob / batch_size
        loss.name = 'loss'
        return loss 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:23,代码来源:nin.py

示例2: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def forward(self, x, t):
        xp = cuda.get_array_module(x)
        y = self.predictor(x)
        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        batch_size = chainer.Variable(xp.array(t.size, xp.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        loss = -log_prob / batch_size
        reporter.report({'loss': loss}, self)
        if self.compute_accuracy:
            acc = accuracy.accuracy(y, xp.argmax(t, axis=1))
            reporter.report({'accuracy': acc}, self)
        loss.name = 'loss'
        return loss 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:25,代码来源:gen_mnist_mlp.py

示例3: softmax_cross_entropy

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def softmax_cross_entropy(self, y, t):
        import numpy as np

        log_softmax = F.log_softmax(y)
        # SelectItem is not supported by onnx-chainer.
        # TODO(hamaji): Support it?
        # log_prob = F.select_item(log_softmax, t)

        # TODO(hamaji): Currently, F.sum with axis=1 cannot be
        # backpropped properly.
        # log_prob = F.sum(log_softmax * t, axis=1)
        # self.batch_size = chainer.Variable(np.array(t.size, np.float32),
        #                                    name='batch_size')
        # return -F.sum(log_prob, axis=0) / self.batch_size
        log_prob = F.sum(log_softmax * t, axis=(0, 1))
        batch_size = chainer.Variable(self.xp.array(t.shape[0], np.float32),
                                      name='batch_size')
        self.extra_inputs = [batch_size]
        loss = -log_prob / batch_size
        loss.name = 'loss'
        return loss 
开发者ID:pfnet-research,项目名称:chainer-compiler,代码行数:23,代码来源:resnet50.py

示例4: all_log_prob

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def all_log_prob(self):
        with chainer.force_backprop_mode():
            if self.min_prob > 0:
                return F.log(self.all_prob)
            else:
                return F.log_softmax(self.beta * self.logits) 
开发者ID:chainer,项目名称:chainerrl,代码行数:8,代码来源:distribution.py

示例5: log_probs

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def log_probs(self):
        return F.log_softmax(self.logits) 
开发者ID:muupan,项目名称:async-rl,代码行数:4,代码来源:policy_output.py

示例6: predict

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def predict(self, s):
        """Predict single-label log probabilities

        Args:
            s (any): Current (hidden, cell) states.
        Return:
            (~chainer.Variable) log softmax vector
        """
        y = self.out(self.proj(s[2][0]))
        return F.log_softmax(y) 
开发者ID:dialogtekgeek,项目名称:DSTC6-End-to-End-Conversation-Modeling,代码行数:12,代码来源:lstm_decoder.py

示例7: kl_categorical

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def kl_categorical(p_logit, q_logit):
    if isinstance(p_logit, chainer.Variable):
        xp = cuda.get_array_module(p_logit.data)
    else:
        xp = cuda.get_array_module(p_logit)
    p = F.softmax(p_logit)
    _kl = F.sum(p * (F.log_softmax(p_logit) - F.log_softmax(q_logit)), 1)
    return F.sum(_kl) / xp.prod(xp.array(_kl.shape)) 
开发者ID:takerum,项目名称:vat_chainer,代码行数:10,代码来源:loss.py

示例8: cross_entropy

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def cross_entropy(logit, y):
    # y should be one-hot encoded probability
    return - F.sum(y * F.log_softmax(logit)) / logit.shape[0] 
开发者ID:takerum,项目名称:vat_chainer,代码行数:5,代码来源:loss.py

示例9: entropy_y_x

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def entropy_y_x(p_logit):
    p = F.softmax(p_logit)
    return - F.sum(p * F.log_softmax(p_logit)) / p_logit.shape[0] 
开发者ID:takerum,项目名称:vat_chainer,代码行数:5,代码来源:loss.py

示例10: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def forward(self, inputs, device):
        x, = inputs
        return functions.log_softmax(x, axis=self.axis), 
开发者ID:chainer,项目名称:chainer,代码行数:5,代码来源:test_log_softmax.py

示例11: setUp

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def setUp(self):
        BaseSoftTarget.setUp(self)
        self.t = functions.softmax(self.x).array
        self.expect = numpy.sum(-self.t * functions.log_softmax(self.x).array,
                                axis=1)
        if self.reduce == 'mean':
            self.expect = numpy.average(self.expect) 
开发者ID:chainer,项目名称:chainer,代码行数:9,代码来源:test_softmax_cross_entropy.py

示例12: dirichlet_likelihood

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def dirichlet_likelihood(weights, alpha=None):
    """ Calculate the log likelihood of the observed topic proportions.
    A negative likelihood is more likely than a negative likelihood.

    Args:
        weights (chainer.Variable): Unnormalized weight vector. The vector
            will be passed through a softmax function that will map the input
            onto a probability simplex.
        alpha (float): The Dirichlet concentration parameter. Alpha
            greater than 1.0 results in very dense topic weights such
            that each document belongs to many topics. Alpha < 1.0 results
            in sparser topic weights. The default is to set alpha to
            1.0 / n_topics, effectively enforcing the prior belief that a
            document belong to very topics at once.

    Returns:
        ~chainer.Variable: Output loss variable.
    """
    if type(weights) is Variable:
        n_topics = weights.data.shape[1]
    else:
        n_topics = weights.W.data.shape[1]
    if alpha is None:
        alpha = 1.0 / n_topics
    if type(weights) is Variable:
        log_proportions = F.log_softmax(weights)
    else:
        log_proportions = F.log_softmax(weights.W)
    loss = (alpha - 1.0) * log_proportions
    return -F.sum(loss) 
开发者ID:cemoody,项目名称:lda2vec,代码行数:32,代码来源:dirichlet_likelihood.py

示例13: decode

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def decode(self, sample, bow):
        """ Decode latent document vectors back into word counts
        (n_docs, n_vocab).
        """
        logprob = F.log_softmax(self.embedding(sample))
        # This is equivalent to a softmax_cross_entropy where instead of
        # guessing 1 of N words we have repeated observations
        # Normal softmax for guessing the next word is:
        # t log softmax(x), where t is 0 or 1
        # Softmax for guessing word counts is simply doing
        # the above more times, so multiply by the count
        # count log softmax(x)
        loss = -F.sum(bow * logprob)
        return loss 
开发者ID:cemoody,项目名称:lda2vec,代码行数:16,代码来源:nvdm.py

示例14: forward

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def forward(self, ids, bow):
        bow, ids = utils.move(self.xp, bow, ids)
        proportions = self.proportions(ids)
        ld = dirichlet_likelihood(proportions)
        doc = F.matmul(F.softmax(proportions), self.factors())
        logp = F.dropout(self.embedding(doc))
        # loss = -F.sum(bow * F.log_softmax(logp))
        sources, targets, counts = [], [], []
        lpi =  F.sum(bow * F.log_softmax(logp), axis=1)
        loss = -F.sum(lpi)
        return loss, ld 
开发者ID:cemoody,项目名称:lda2vec,代码行数:13,代码来源:lda.py

示例15: kl_loss

# 需要导入模块: from chainer import functions [as 别名]
# 或者: from chainer.functions import log_softmax [as 别名]
def kl_loss(xp, p_logit, q_logit):
    p = F.softmax(p_logit)
    _kl = F.sum(p * (F.log_softmax(p_logit) - F.log_softmax(q_logit)), 1)
    return F.sum(_kl) / xp.prod(xp.array(_kl.shape)) 
开发者ID:chainer,项目名称:models,代码行数:6,代码来源:net.py


注:本文中的chainer.functions.log_softmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。