当前位置: 首页>>代码示例>>Python>>正文


Python functional.log_softmax函数代码示例

本文整理汇总了Python中torch.nn.functional.log_softmax函数的典型用法代码示例。如果您正苦于以下问题:Python log_softmax函数的具体用法?Python log_softmax怎么用?Python log_softmax使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了log_softmax函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward

 def forward(self, x, y, z):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2(x), 2))
     x = x.view(-1, 1600)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, training=self.training)
     x = self.fc2(x)
     return F.log_softmax(x), F.log_softmax(x), F.log_softmax(x)
开发者ID:BrianDo2005,项目名称:torchsample,代码行数:8,代码来源:multi_input_multi_target.py

示例2: forward

 def forward(self, x):
     in_size = x.size(0)
     x = F.relu(self.mp(self.conv1(x)))
     x = F.relu(self.mp(self.conv2(x)))
     x = x.view(in_size, -1)  # flatten the tensor
     x = self.fc(x)
     return F.log_softmax(x)
开发者ID:jiayouwyhit,项目名称:PyTorchZeroToAll,代码行数:7,代码来源:10_1_cnn_mnist.py

示例3: forward

    def forward(self, fc_feats, att_feats, seq):
        batch_size = fc_feats.size(0)
        state = self.init_hidden(batch_size)
        outputs = []

        for i in range(seq.size(1)):
            if i == 0:
                xt = self.img_embed(fc_feats)
            else:
                if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
                    sample_prob = fc_feats.data.new(batch_size).uniform_(0, 1)
                    sample_mask = sample_prob < self.ss_prob
                    if sample_mask.sum() == 0:
                        it = seq[:, i-1].clone()
                    else:
                        sample_ind = sample_mask.nonzero().view(-1)
                        it = seq[:, i-1].data.clone()
                        #prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
                        #it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
                        prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
                        it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
                        it = Variable(it, requires_grad=False)
                else:
                    it = seq[:, i-1].clone()
                # break if all the sequences end
                if i >= 2 and seq[:, i-1].data.sum() == 0:
                    break
                xt = self.embed(it)

            output, state = self.core(xt, state)
            output = F.log_softmax(self.logit(output))
            outputs.append(output)

        return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
开发者ID:littlebadRobot,项目名称:AI_challenger_Chinese_Caption,代码行数:34,代码来源:FCModel.py

示例4: forward

 def forward(self, sentence):
     embeds = self.word_embeddings(sentence)  # sentence must be a list of word_ixs
     lstm_out, self.hidden = self.lstm(embeds.view(len(sentence), 1, -1), self.hidden)
     # print(lstm_out.view(len(sentence), -1).shape)  # torch.Size([5, 6]) or torch.Size([4, 6])
     tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))  # Batch, embeding_dim
     tag_scores = F.log_softmax(tag_space, dim=1)
     return tag_scores
开发者ID:coder352,项目名称:shellscript,代码行数:7,代码来源:l34_LSTM_Part-of-Speech-Tagging.py

示例5: cross_entropy2d

def cross_entropy2d(input, target, weight=None, size_average=True):
    """
    Function to compute pixelwise cross-entropy for 2D image. This is the segmentation loss.
    Args:
        input: input tensor of shape (minibatch x num_channels x h x w)
        target: 2D label map of shape (minibatch x h x w)
        weight (optional): tensor of size 'C' specifying the weights to be given to each class
        size_average (optional): boolean value indicating whether the NLL loss has to be normalized
            by the number of pixels in the image 
    """
    
    # input: (n, c, h, w), target: (n, h, w)
    n, c, h, w = input.size()
    
    # log_p: (n, c, h, w)
    log_p = F.log_softmax(input)
    
    # log_p: (n*h*w, c)
    log_p = log_p.transpose(1, 2).transpose(2, 3).contiguous().view(-1, c)
    try:
        log_p = log_p[target.view(n, h, w, 1).repeat(1, 1, 1, c) >= 0]
    except:
        print "Exception: ", target.size()
    log_p = log_p.view(-1, c)
    
    # target: (n*h*w,)
    mask = target >= 0
    target = target[mask]
    target = torch.squeeze(target)
    loss = F.nll_loss(log_p, target, weight=weight, size_average=False)
    if size_average:
        loss /= mask.data.sum()

    return loss
开发者ID:Wizaron,项目名称:LSD-seg,代码行数:34,代码来源:utils.py

示例6: predict

    def predict(self, inputs):
        classifier = self.nets.classifier

        outputs = classifier(inputs)
        predicted = torch.max(F.log_softmax(outputs, dim=1).data, 1)[1]

        return predicted
开发者ID:rdevon,项目名称:cortex,代码行数:7,代码来源:demo_classifier.py

示例7: calc_loss

def calc_loss(batch, net, tgt_net, gamma, device="cpu", save_prefix=None):
    states, actions, rewards, dones, next_states = common.unpack_batch(batch)
    batch_size = len(batch)

    states_v = torch.tensor(states).to(device)
    actions_v = torch.tensor(actions).to(device)
    next_states_v = torch.tensor(next_states).to(device)

    # next state distribution
    next_distr_v, next_qvals_v = tgt_net.both(next_states_v)
    next_actions = next_qvals_v.max(1)[1].data.cpu().numpy()
    next_distr = tgt_net.apply_softmax(next_distr_v).data.cpu().numpy()

    next_best_distr = next_distr[range(batch_size), next_actions]
    dones = dones.astype(np.bool)

    # project our distribution using Bellman update
    proj_distr = common.distr_projection(next_best_distr, rewards, dones, Vmin, Vmax, N_ATOMS, gamma)

    # calculate net output
    distr_v = net(states_v)
    state_action_values = distr_v[range(batch_size), actions_v.data]
    state_log_sm_v = F.log_softmax(state_action_values, dim=1)
    proj_distr_v = torch.tensor(proj_distr).to(device)

    if save_prefix is not None:
        pred = F.softmax(state_action_values, dim=1).data.cpu().numpy()
        save_transition_images(batch_size, pred, proj_distr, next_best_distr, dones, rewards, save_prefix)

    loss_v = -state_log_sm_v * proj_distr_v
    return loss_v.sum(dim=1).mean()
开发者ID:dhaopku,项目名称:Deep-Reinforcement-Learning-Hands-On,代码行数:31,代码来源:07_dqn_distrib.py

示例8: forward

 def forward(self, sentence):
     embeds = self.word_embeddings(sentence)
     lstm_out, self.hidden = self.lstm(
         embeds.view(len(sentence), 1, -1), self.hidden)
     tag_space = self.hidden2tag(lstm_out.view(len(sentence), -1))
     tag_scores = F.log_softmax(tag_space, dim=1)
     return tag_scores
开发者ID:Biocodings,项目名称:tutorials,代码行数:7,代码来源:sequence_models_tutorial.py

示例9: _decode_step

    def _decode_step(self, input_list, state_list, k=1,
                     feed_all_timesteps=False,
                     remove_unknown=False,
                     get_attention=False):

        view_shape = (-1, 1) if self.decoder.batch_first else (1, -1)
        time_dim = 1 if self.decoder.batch_first else 0
        device = next(self.decoder.parameters()).device

        # For recurrent models, the last input frame is all we care about,
        # use feed_all_timesteps whenever the whole input needs to be fed
        if feed_all_timesteps:
            inputs = [torch.tensor(inp, device=device, dtype=torch.long)
                      for inp in input_list]
            inputs = batch_sequences(
                inputs, device=device, batch_first=self.decoder.batch_first)[0]

        else:
            last_tokens = [inputs[-1] for inputs in input_list]
            inputs = torch.stack(last_tokens).view(*view_shape)

        states = State().from_list(state_list)
        logits, new_states = self.decode(
            inputs, states, get_attention=get_attention)
        # use only last prediction
        logits = logits.select(time_dim, -1).contiguous()
        if remove_unknown:
            # Remove possibility of unknown
            logits[:, UNK].fill_(-float('inf'))
        logprobs = log_softmax(logits, dim=1)
        logprobs, words = logprobs.topk(k, 1)
        new_states_list = [new_states[i] for i in range(len(input_list))]
        return words, logprobs, new_states_list
开发者ID:yangkexin,项目名称:seq2seq.pytorch,代码行数:33,代码来源:seq2seq_base.py

示例10: inference

    def inference(self, unary, num_iter=5):

        if not self.conf['logsoftmax']:
            lg_unary = torch.log(unary)
            prediction = exp_and_normalize(lg_unary, dim=1)
        else:
            lg_unary = nnfun.log_softmax(unary, dim=1, _stacklevel=5)
            if self.conf['softmax'] and False:
                prediction = exp_and_normalize(lg_unary, dim=1)
            else:
                prediction = lg_unary

        for i in range(num_iter):
            message = self.kernel.compute(prediction)

            if self.comp is not None:
                # message_r = message.view(tuple([1]) + message.shape)
                comp = self.comp(message)
                message = message + comp

            if self.weight is None:
                prediction = lg_unary + message
            else:
                prediction = (self.unary_weight - self.weight) * lg_unary + \
                    self.weight * message

            if not i == num_iter - 1 or self.final_softmax:
                if self.conf['softmax']:
                    prediction = exp_and_normalize(prediction, dim=1)

        return prediction
开发者ID:aaasss0636,项目名称:ConvCRF,代码行数:31,代码来源:convcrf.py

示例11: forward

 def forward(self, x):
     x = F.max_pool2d(F.relu(self.conv1(x)), 2)
     x = F.max_pool2d(F.relu(self.conv2(x)), 2)
     x = x.view(-1, 64 * 7 * 7)  # reshape Variable
     x = F.relu(self.fc1(x))
     x = self.fc2(x)
     return F.log_softmax(x, dim=-1)
开发者ID:limin24kobe,项目名称:cleverhans,代码行数:7,代码来源:mnist_tutorial_pytorch.py

示例12: forward

 def forward(self, x):
     x = F.relu(F.max_pool2d(self.conv1(x), 2))
     x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
     x = x.view(-1, 320)
     x = F.relu(self.fc1(x))
     x = F.dropout(x, training=self.training)
     return F.log_softmax(self.fc2(x))
开发者ID:nikcheerla,项目名称:mitosis-detection,代码行数:7,代码来源:pytorch_cnn.py

示例13: f_next

    def f_next(self, ctx_dict, y, h):
        # Get hidden states from the first decoder (purely cond. on LM)
        h1 = self.dec0(y, h)

        # Apply attention over multiple modalities
        txt_alpha_t, txt_z_t = self.txt_att(h1.unsqueeze(0), *ctx_dict['txt'])
        img_alpha_t, img_z_t = self.img_att(h1.unsqueeze(0), *ctx_dict['image'])

        # Context will double dimensionality if fusion_type is concat
        # final_z_t should be compatible with hidden_size
        final_z_t = self.fusion(txt_z_t, img_z_t)

        h2 = self.dec1(final_z_t, h1)

        # This is a bottleneck to avoid going from H to V directly
        logit = self.hid2out(h2)

        # Apply dropout if any
        if self.dropout_out > 0:
            logit = self.do_out(logit)

        # Transform logit to T*B*V (V: vocab_size)
        # Compute log_softmax over token dim
        log_p = -F.log_softmax(self.out2prob(logit), dim=-1)

        # Return log probs and new hidden states
        return log_p, h2
开发者ID:codealphago,项目名称:nmtpytorch,代码行数:27,代码来源:condmm_decoder.py

示例14: masked_cross_entropy

def masked_cross_entropy(logits, target, length):
    length = Variable(torch.LongTensor(length)).cuda()

    """
    Args:
        logits: A Variable containing a FloatTensor of size
            (batch, max_len, num_classes) which contains the
            unnormalized probability for each class.
        target: A Variable containing a LongTensor of size
            (batch, max_len) which contains the index of the true
            class for each corresponding step.
        length: A Variable containing a LongTensor of size (batch,)
            which contains the length of each data in a batch.

    Returns:
        loss: An average loss value masked by the length.
    """

    # logits_flat: (batch * max_len, num_classes)
    logits_flat = logits.view(-1, logits.size(-1))
    # log_probs_flat: (batch * max_len, num_classes)
    log_probs_flat = functional.log_softmax(logits_flat)
    # target_flat: (batch * max_len, 1)
    target_flat = target.view(-1, 1)
    # losses_flat: (batch * max_len, 1)
    losses_flat = -torch.gather(log_probs_flat, dim=1, index=target_flat)
    # losses: (batch, max_len)
    losses = losses_flat.view(*target.size())
    # mask: (batch, max_len)
    mask = sequence_mask(sequence_length=length, max_len=target.size(1))
    losses = losses * mask.float()
    loss = losses.sum() / length.float().sum()
    return loss
开发者ID:Cadene,项目名称:practical-pytorch,代码行数:33,代码来源:masked_cross_entropy.py

示例15: forward

 def forward(self, **sentence):
     input_words = sentence['input_words']
     embeds = self.word_embeddings(input_words)
     lstm_out, self.hidden = self.lstm(embeds.view(len(input_words), 1, -1))
     tag_space = self.hidden2tag(lstm_out.view(len(input_words), -1))
     tag_scores = F.log_softmax(tag_space)
     return tag_scores
开发者ID:zwt0204,项目名称:Medical-named-entity-recognition-for-ccks2017,代码行数:7,代码来源:LstmModel.py


注:本文中的torch.nn.functional.log_softmax函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。