当前位置: 首页>>代码示例>>Python>>正文


Python functional.softmax方法代码示例

本文整理汇总了Python中torch.nn.functional.softmax方法的典型用法代码示例。如果您正苦于以下问题:Python functional.softmax方法的具体用法?Python functional.softmax怎么用?Python functional.softmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.nn.functional的用法示例。


在下文中一共展示了functional.softmax方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_tf2torch

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def test_tf2torch(tf_model,torch_model,input_shape, num_rand_inp=10, precision=10**-2):
    """
    Checks consistency of torch and tf models before generating attacks
    :param tf_model: copied tf model
    :param torch_model: torch model to be transferred to tf
    :param input_shape: Format Channels X Height X Width
    :param num_rand_inp: number of random inputs to test consistency on
    :return: raises error if the outputs are not consistent
    """
    torch_model.eval()
    rand_x = torch.rand(num_rand_inp,input_shape[0],input_shape[1],input_shape[2])
    tf_op = tf_model.predict(rand_x.numpy())
    torch_op = F.softmax(torch_model(Variable(rand_x))).data.numpy()
    assert tf_op.shape == torch_op.shape, "Mismatch of dimensions of the outputs from tf and torch models"
    assert np.linalg.norm(torch_op-tf_op)/np.linalg.norm(torch_op)<=num_rand_inp*precision, "Outputs of the torch and tensorflow models" \
                                                            "do not agree"
    pass 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:19,代码来源:util.py

示例2: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def forward(self, encoding, lengths):
        lengths = Variable(torch.LongTensor(lengths))
        if torch.cuda.is_available():
            lengths = lengths.cuda()
        if self.method == 'mean':
            encoding_pad = nn.utils.rnn.pack_padded_sequence(encoding, lengths.data.tolist(), batch_first=True)
            encoding = nn.utils.rnn.pad_packed_sequence(encoding_pad, batch_first=True, padding_value=0)[0]
            lengths = lengths.float().view(-1, 1)
            return encoding.sum(1) / lengths, None
        elif self.method == 'max':
            return encoding.max(1)  # [bsz, in_dim], [bsz, in_dim] (position)
        elif self.method == 'attn':
            size = encoding.size()  # [bsz, len, in_dim]
            x_flat = encoding.contiguous().view(-1, size[2])  # [bsz*len, in_dim]
            hbar = self.tanh(self.ws1(x_flat))  # [bsz*len, attn_hid]
            alphas = self.ws2(hbar).view(size[0], size[1])  # [bsz, len]
            alphas = nn.utils.rnn.pack_padded_sequence(alphas, lengths.data.tolist(), batch_first=True)
            alphas = nn.utils.rnn.pad_packed_sequence(alphas, batch_first=True, padding_value=-1e8)[0]
            alphas = functional.softmax(alphas, dim=1)  # [bsz, len]
            alphas = alphas.view(size[0], 1, size[1])  # [bsz, 1, len]
            return torch.bmm(alphas, encoding).squeeze(1), alphas  # [bsz, in_dim], [bsz, len]
        elif self.method == 'last':
            return torch.cat([encoding[i][lengths[i] - 1] for i in range(encoding.size(0))], dim=0), None 
开发者ID:ExplorerFreda,项目名称:VSE-C,代码行数:25,代码来源:model.py

示例3: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def forward(self, query, key):
        querys = self.W_query(query)  # [N, T_q, num_units]
        keys = self.W_key(key)  # [N, T_k, num_units]
        values = self.W_value(key)

        split_size = self.num_units // self.num_heads
        querys = torch.stack(torch.split(querys, split_size, dim=2), dim=0)  # [h, N, T_q, num_units/h]
        keys = torch.stack(torch.split(keys, split_size, dim=2), dim=0)  # [h, N, T_k, num_units/h]
        values = torch.stack(torch.split(values, split_size, dim=2), dim=0)  # [h, N, T_k, num_units/h]

        # score = softmax(QK^T / (d_k ** 0.5))
        scores = torch.matmul(querys, keys.transpose(2, 3))  # [h, N, T_q, T_k]
        scores = scores / (self.key_dim ** 0.5)
        scores = F.softmax(scores, dim=3)

        # out = score * V
        out = torch.matmul(scores, values)  # [h, N, T_q, num_units/h]
        out = torch.cat(torch.split(out, 1, dim=0), dim=3).squeeze(0)  # [N, T_q, num_units]

        return out 
开发者ID:KinglittleQ,项目名称:GST-Tacotron,代码行数:22,代码来源:GST.py

示例4: get_entropy

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def get_entropy(self, pred, label):
        n, c, h, w = pred.size()
        label = label.unsqueeze(3).long()
        pred = F.softmax(pred, 1).permute(0, 2, 3, 1)
        one_hot_label = ((torch.arange(c)).cuda() == label).float()

        if self.eps == 0:
            prior = 0
        else:
            if self.priorType == 'gaussian':
                tensor = (torch.arange(c).cuda() - label).float()
                prior = NormalDist(tensor, c / 10)
            elif self.priorType == 'uniform':
                prior = 1 / (c-1)

        smoothed_label = (1 - self.eps) * one_hot_label + self.eps * prior * (1 - one_hot_label)
        entropy = smoothed_label * safe_log(pred) + (1 - smoothed_label) * safe_log(1 - pred)
        return -entropy 
开发者ID:miraiaroha,项目名称:ACAN,代码行数:20,代码来源:losses.py

示例5: calculate_outputs_and_gradients

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def calculate_outputs_and_gradients(inputs, model, target_label_idx, cuda=False):
    # do the pre-processing
    predict_idx = None
    gradients = []
    for input in inputs:
        input = pre_processing(input, cuda)
        output = model(input)
        output = F.softmax(output, dim=1)
        if target_label_idx is None:
            target_label_idx = torch.argmax(output, 1).item()
        index = np.ones((output.size()[0], 1)) * target_label_idx
        index = torch.tensor(index, dtype=torch.int64)
        if cuda:
            index = index.cuda()
        output = output.gather(1, index)
        # clear grad
        model.zero_grad()
        output.backward()
        gradient = input.grad.detach().cpu().numpy()[0]
        gradients.append(gradient)
    gradients = np.array(gradients)
    return gradients, target_label_idx 
开发者ID:TianhongDai,项目名称:integrated-gradient-pytorch,代码行数:24,代码来源:utils.py

示例6: _visualize_params

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def _visualize_params(logits_pis, means, log_scales, channel):
    """
    :param logits_pis:  NCKHW
    :param means: NCKHW
    :param log_scales: NCKHW
    :param channel: int
    :return:
    """
    assert logits_pis.shape == means.shape == log_scales.shape
    logits_pis = logits_pis[0, channel, ...].detach()
    means = means[0, channel, ...].detach()
    log_scales = log_scales[0, channel, ...].detach()

    pis = torch.softmax(logits_pis, dim=0)  # Kdim==0 -> KHW

    mixtures = ft.lconcat(
            zip(_iter_Kdim_normalized(pis, normalize=False),
                _iter_Kdim_normalized(means),
                _iter_Kdim_normalized(log_scales)))
    grid = vis.grid.prep_for_grid(mixtures)
    img = torchvision.utils.make_grid(grid, nrow=3)
    return img 
开发者ID:fab-jul,项目名称:L3C-PyTorch,代码行数:24,代码来源:logistic_mixture.py

示例7: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def forward(self, input, hidden, encoder_outputs):
        embedded = self.embedding(input).view(1, 1, -1)
        embedded = self.dropout(embedded)

        attn_weights = F.softmax(
            self.attn(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
        attn_applied = torch.bmm(attn_weights.unsqueeze(0),
                                 encoder_outputs.unsqueeze(0))

        output = torch.cat((embedded[0], attn_applied[0]), 1)
        output = self.attn_combine(output).unsqueeze(0)

        output = F.relu(output)
        output, hidden = self.gru(output, hidden)

        output = F.log_softmax(self.out(output[0]), dim=1)
        return output, hidden, attn_weights 
开发者ID:EvilPsyCHo,项目名称:TaskBot,代码行数:19,代码来源:tutorial.py

示例8: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def forward(self, x):
        # x: (batch, sentence_length)
        x = self.embed(x)
        # x: (batch, sentence_length, embed_dim)
        # TODO init embed matrix with pre-trained
        x = x.unsqueeze(1)
        # x: (batch, 1, sentence_length, embed_dim)
        x1 = self.conv_and_pool(x, self.conv11)  # (batch, kernel_num)
        x2 = self.conv_and_pool(x, self.conv12)  # (batch, kernel_num)
        x3 = self.conv_and_pool(x, self.conv13)  # (batch, kernel_num)
        x = torch.cat((x1, x2, x3), 1)  # (batch, 3 * kernel_num)
        x = self.dropout(x)
        logit = F.log_softmax(self.fc1(x), dim=1)
        # logit = F.softmax(self.fc1(x), dim=1)
        # logit = self.fc1(x)
        return logit 
开发者ID:EvilPsyCHo,项目名称:TaskBot,代码行数:18,代码来源:text_cnn.py

示例9: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def forward(self, decoder_hidden, encoder_outputs):
        """

        Args:
            decoder_hidden: <torch.FloatTensor>, shape(B,H)
                    previous hidden state of the last layer in decoder
            encoder_outputs: <torch.FloatTensor>, shape(T,B,H)
                    encoder outputs

        Returns:
            normalized attention weights: <torch.FloatTensor>, shape(B,T)
        """
        max_len = encoder_outputs.size(0)
        H = decoder_hidden.repeat(max_len, 1, 1).transpose(0, 1)  # (B,T,H)
        encoder_outputs = encoder_outputs.transpose(0, 1)  # (B,T,H)
        attn_energies = self.score(H, encoder_outputs)  # (B,T)
        return F.softmax(attn_energies).unsqueeze(1)  # (B,1,T) 
开发者ID:EvilPsyCHo,项目名称:TaskBot,代码行数:19,代码来源:attention.py

示例10: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def forward(self, input_set):
    """
      Args:
        input_set: shape N X D

      Returns:
        output_vec: shape 1 X 2D
    """
    num_element = input_set.shape[0]
    element_dim = input_set.shape[1]
    assert element_dim == self.element_dim
    hidden = torch.zeros(1, 2 * self.element_dim).to(input_set.device)
    memory = torch.zeros(1, self.element_dim).to(input_set.device)

    for tt in range(self.num_step_encoder):
      hidden, memory = self.LSTM(hidden, memory)
      energy = torch.tanh(torch.mm(hidden, self.W_1) + input_set).mm(self.W_2)
      att_weight = F.softmax(energy, dim=0)
      read = (input_set * att_weight).sum(dim=0, keepdim=True)
      hidden = torch.cat([hidden, read], dim=1)

    return hidden 
开发者ID:lrjconan,项目名称:LanczosNetwork,代码行数:24,代码来源:set2set.py

示例11: get_z_scores

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def get_z_scores(self, df_test):
        """Get softmaxed rationale importances.

        :param df_test: dataframe containing test data labels, tokens, masks,
            and counts
        :type df_test: pd.DataFrame
        :return:
            z_scores: softmaxed rationale scores with dimension
                (batch_size, length)
        :rtype: torch.FloatTensor
        """
        batch_dict = generate_data(df_test, self.use_cuda)
        x_tokens = batch_dict["x"]
        mask = batch_dict["m"]
        z_scores, _, _ = self.generator(x_tokens, mask)
        z_scores = F.softmax(z_scores, dim=-1)

        return z_scores 
开发者ID:interpretml,项目名称:interpret-text,代码行数:20,代码来源:model.py

示例12: _region_classification

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def _region_classification(self, fc7): 
    cls_score = self.cls_score_net(fc7)
    cls_pred = torch.max(cls_score, 1)[1]  # the prediction class of each bbox
    cls_prob = F.softmax(cls_score)
    bbox_pred = self.bbox_pred_net(fc7)
    bbox_prob = torch.stack([F.softmax(bbox_pred[:,i]) for i in range(bbox_pred.size(1))], 1)
    fuse_prob = cls_prob.mul(bbox_prob)
    image_prob = fuse_prob.sum(0,keepdim=True)
    
    self._predictions["cls_pred"] = cls_pred
    self._predictions["cls_prob"] = cls_prob
    self._predictions["bbox_prob"] = bbox_prob
    self._predictions["fuse_prob"] = fuse_prob
    self._predictions["image_prob"] = image_prob

    return cls_prob, bbox_prob, fuse_prob, image_prob 
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:18,代码来源:network.py

示例13: _region_classification_fast

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def _region_classification_fast(self, fc7):
    cls_score = self.cls_score_net_fast(fc7)
    cls_pred = torch.max(cls_score, 1)[1]
    cls_prob = F.softmax(cls_score)
    bbox_pred = self.bbox_pred_net_fast(fc7)

    self._predictions["cls_score_fast"] = cls_score
    self._predictions["cls_pred_fast"] = cls_pred
    self._predictions["cls_prob_fast"] = cls_prob
    self._predictions["bbox_pred_fast"] = bbox_pred

    return cls_prob, bbox_pred 
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:14,代码来源:network.py

示例14: attention

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def attention(query, key, value, mask=None, dropout=None):
    """Compute 'Scaled Dot Product Attention' """
    d_k = query.size(-1)
    scores = torch.matmul(query, key.transpose(-2, -1)) / math.sqrt(d_k)
    if mask is not None:
        scores = scores.masked_fill(mask == 0, -1e9)
    p_attn = F.softmax(scores, dim=-1)
    if dropout is not None:
        p_attn = dropout(p_attn)
    return torch.matmul(p_attn, value), p_attn 
开发者ID:Nrgeup,项目名称:controllable-text-attribute-transfer,代码行数:12,代码来源:model2.py

示例15: forward

# 需要导入模块: from torch.nn import functional [as 别名]
# 或者: from torch.nn.functional import softmax [as 别名]
def forward(self, x1, x2, x2_mask):
        '''
        Input:
        x1: batch x word_num1 x dim
        x2: batch x word_num2 x dim
        Output:
        scores: batch x word_num1 x word_num2
        '''
        # x1 = dropout(x1, p = dropout_p, training = self.training)
        # x2 = dropout(x2, p = dropout_p, training = self.training)

        x1_rep = x1
        x2_rep = x2
        batch = x1_rep.size(0)
        word_num1 = x1_rep.size(1)
        word_num2 = x2_rep.size(1)
        dim = x1_rep.size(2)
        if self.correlation_func == 2 or self.correlation_func == 3:
            x1_rep = self.linear(x1_rep.contiguous().view(-1, dim)).view(batch, word_num1, self.hidden_size)  # Wx1
            x2_rep = self.linear(x2_rep.contiguous().view(-1, dim)).view(batch, word_num2, self.hidden_size)  # Wx2
            if self.correlation_func == 3:
                x1_rep = F.relu(x1_rep)
                x2_rep = F.relu(x2_rep)
            x1_rep = x1_rep * self.diagonal.expand_as(x1_rep)
            # x1_rep is (Wx1)D or Relu(Wx1)D
            # x1_rep: batch x word_num1 x dim (corr=1) or hidden_size (corr=2,3)

        if self.correlation_func == 4:
            x2_rep = self.linear(x2_rep.contiguous().view(-1, dim)).view(batch, word_num2, dim)  # Wx2

        if self.correlation_func == 5:
            x1_rep = self.linear(x1_rep.contiguous().view(-1, dim)).view(batch, word_num1, self.hidden_size)  # Wx1
            x2_rep = self.linear(x2_rep.contiguous().view(-1, dim)).view(batch, word_num2, self.hidden_size)  # Wx2
            x1_rep = F.relu(x1_rep)
            x2_rep = F.relu(x2_rep)
        scores = x1_rep.bmm(x2_rep.transpose(1, 2))
        empty_mask = x2_mask.eq(0).expand_as(scores)
        scores.data.masked_fill_(empty_mask.data, -float('inf'))
        # softmax
        alpha_flat = F.softmax(scores, dim=-1)
        return alpha_flat 
开发者ID:Nrgeup,项目名称:controllable-text-attribute-transfer,代码行数:43,代码来源:model2.py


注:本文中的torch.nn.functional.softmax方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。