当前位置: 首页>>代码示例>>Python>>正文


Python Variable.sort方法代码示例

本文整理汇总了Python中torch.autograd.Variable.sort方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.sort方法的具体用法?Python Variable.sort怎么用?Python Variable.sort使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在torch.autograd.Variable的用法示例。


在下文中一共展示了Variable.sort方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _score_candidates

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sort [as 别名]
    def _score_candidates(self, cands, xe, encoder_output, hidden):
        # score each candidate separately

        # cands are exs_with_cands x cands_per_ex x words_per_cand
        # cview is total_cands x words_per_cand
        cview = cands.view(-1, cands.size(2))
        cands_xes = xe.expand(xe.size(0), cview.size(0), xe.size(2))
        sz = hidden.size()
        cands_hn = (
            hidden.view(sz[0], sz[1], 1, sz[2])
            .expand(sz[0], sz[1], cands.size(1), sz[2])
            .contiguous()
            .view(sz[0], -1, sz[2])
        )

        sz = encoder_output.size()
        cands_encoder_output = (
            encoder_output.contiguous()
            .view(sz[0], 1, sz[1], sz[2])
            .expand(sz[0], cands.size(1), sz[1], sz[2])
            .contiguous()
            .view(-1, sz[1], sz[2])
        )

        cand_scores = Variable(
                    self.cand_scores.resize_(cview.size(0)).fill_(0))
        cand_lengths = Variable(
                    self.cand_lengths.resize_(cview.size(0)).fill_(0))

        for i in range(cview.size(1)):
            output = self._apply_attention(cands_xes, cands_encoder_output, cands_hn) \
                    if self.use_attention else cands_xes

            output, cands_hn = self.decoder(output, cands_hn)
            preds, scores = self.hidden_to_idx(output, dropout=False)
            cs = cview.select(1, i)
            non_nulls = cs.ne(self.NULL_IDX)
            cand_lengths += non_nulls.long()
            score_per_cand = torch.gather(scores, 1, cs.unsqueeze(1))
            cand_scores += score_per_cand.squeeze() * non_nulls.float()
            cands_xes = self.lt2dec(self.lt(cs).unsqueeze(0))

        # set empty scores to -1, so when divided by 0 they become -inf
        cand_scores -= cand_lengths.eq(0).float()
        # average the scores per token
        cand_scores /= cand_lengths.float()

        cand_scores = cand_scores.view(cands.size(0), cands.size(1))
        srtd_scores, text_cand_inds = cand_scores.sort(1, True)
        text_cand_inds = text_cand_inds.data

        return text_cand_inds
开发者ID:jojonki,项目名称:ParlAI,代码行数:54,代码来源:seq2seq.py

示例2: _score_candidates

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sort [as 别名]

#.........这里部分代码省略.........
        # cands are exs_with_cands x cands_per_ex x words_per_cand
        # cview is total_cands x words_per_cand
        cview = cands.view(-1, cands.size(2))
        c_xes = start.expand(cview.size(0), start.size(0), start.size(1))

        if len(cand_inds) != hidden.size(1):
            # only use hidden state from inputs with associated candidates
            cand_indices = torch.LongTensor([i for i, _, _ in cand_inds])
            if self.use_cuda:
                cand_indices = cand_indices.cuda()
            cand_indices = Variable(cand_indices)
            hidden = hidden.index_select(1, cand_indices)

        sz = hidden.size()
        cands_hn = (
            hidden.view(sz[0], sz[1], 1, sz[2])
            .expand(sz[0], sz[1], cands.size(1), sz[2])
            .contiguous()
            .view(sz[0], -1, sz[2])
        )
        if type(self.decoder) == nn.LSTM:
            if len(cand_inds) != cell.size(1):
                # only use cell state from inputs with associated candidates
                cell = cell.index_select(1, cand_indices)
            cands_hn = (cands_hn, cell.view(sz[0], sz[1], 1, sz[2])
                                      .expand(sz[0], sz[1], cands.size(1), sz[2])
                                      .contiguous()
                                      .view(sz[0], -1, sz[2]))

        cand_scores = Variable(
            self.cand_scores.resize_(cview.size(0)).fill_(0))
        cand_lengths = Variable(
            self.cand_lengths.resize_(cview.size(0)).fill_(0))

        if self.attention != 'none':
            # using attention
            # select only encoder output matching xs we want
            if len(cand_inds) != len(encoder_output):
                indices = torch.LongTensor([i[0] for i in cand_inds])
                if self.use_cuda:
                    indices = indices.cuda()
                indices = Variable(indices)
                encoder_output = encoder_output.index_select(0, indices)
                attn_mask = attn_mask.index_select(0, indices)

            sz = encoder_output.size()
            cands_encoder_output = (
                encoder_output.contiguous()
                .view(sz[0], 1, sz[1], sz[2])
                .expand(sz[0], cands.size(1), sz[1], sz[2])
                .contiguous()
                .view(-1, sz[1], sz[2])
            )

            msz = attn_mask.size()
            cands_attn_mask = (
                attn_mask.contiguous()
                .view(msz[0], 1, msz[1])
                .expand(msz[0], cands.size(1), msz[1])
                .contiguous()
                .view(-1, msz[1])
            )
            for i in range(cview.size(1)):
                # process one token at a time
                h_att = cands_hn[0] if type(self.decoder) == nn.LSTM else cands_hn
                output = self._apply_attention(c_xes, cands_encoder_output, h_att, cands_attn_mask)
                output, cands_hn = self.decoder(output, cands_hn)
                _preds, scores = self.hidden_to_idx(output, is_training=False)
                cs = cview.select(1, i)
                non_nulls = cs.ne(self.NULL_IDX)
                cand_lengths += non_nulls.long()
                score_per_cand = torch.gather(scores.squeeze(), 1, cs.unsqueeze(1))
                cand_scores += score_per_cand.squeeze() * non_nulls.float()
                c_xes = self.dec_lt(cs).unsqueeze(1)
        else:
            # process entire sequence at once
            if cview.size(1) > 1:
                # feed in START + cands[:-2]
                cands_in = cview.narrow(1, 0, cview.size(1) - 1)
                c_xes = torch.cat([c_xes, self.dec_lt(cands_in)], 1)
            output, cands_hn = self.decoder(c_xes, cands_hn)
            _preds, scores = self.hidden_to_idx(output, is_training=False)

            for i in range(cview.size(1)):
                # calculate score at each token
                cs = cview.select(1, i)
                non_nulls = cs.ne(self.NULL_IDX)
                cand_lengths += non_nulls.long()
                score_per_cand = torch.gather(scores.select(1, i), 1, cs.unsqueeze(1))
                cand_scores += score_per_cand.squeeze() * non_nulls.float()

        # set empty scores to -1, so when divided by 0 they become -inf
        cand_scores -= cand_lengths.eq(0).float()
        # average the scores per token
        cand_scores /= cand_lengths.float()

        cand_scores = cand_scores.view(cands.size(0), cands.size(1))
        srtd_scores, text_cand_inds = cand_scores.sort(1, True)

        return text_cand_inds
开发者ID:youlei5898,项目名称:ParlAI,代码行数:104,代码来源:seq2seq.py

示例3: forward

# 需要导入模块: from torch.autograd import Variable [as 别名]
# 或者: from torch.autograd.Variable import sort [as 别名]

#.........这里部分代码省略.........
        bsz = hidden.size(1)
        esz = hidden.size(2)
        cands_per_ex = cands.size(1)
        words_per_cand = cands.size(2)

        # score each candidate separately
        # cands are exs_with_cands x cands_per_ex x words_per_cand
        # cview is total_cands x words_per_cand
        cview = cands.view(-1, words_per_cand)
        total_cands = cview.size(0)
        starts = start.expand(total_cands).unsqueeze(1)

        if len(cand_inds) != hidden.size(1):
            # select hidden states which have associated cands
            cand_indices = Variable(start.data.new([i[0] for i in cand_inds]))
            hidden = hidden.index_select(1, cand_indices)

        h_exp = (
            # expand hidden states so each cand has an initial hidden state
            # cands for the same input have the same initial hidden state
            hidden.unsqueeze(2)
            .expand(num_hid, bsz, cands_per_ex, esz)
            .contiguous()
            .view(num_hid, -1, esz))

        if cell is None:
            cands_hn = h_exp
        if cell is not None:
            if len(cand_inds) != cell.size(1):
                # only use cell state from inputs with associated candidates
                cell = cell.index_select(1, cand_indices)
            c_exp = (
                cell.unsqueeze(2)
                .expand(num_hid, bsz, cands_per_ex, esz)
                .contiguous()
                .view(num_hid, -1, esz))
            cands_hn = (h_exp, c_exp)

        cand_scores = Variable(self.buffer(hidden, 'cand_scores', total_cands))
        cand_lens = Variable(self.buffer(start, 'cand_lens', total_cands))

        if self.attn_type == 'none':
            # process entire sequence at once
            if cview.size(1) > 1:
                # feed in START + cands[:-2]
                cands_in = cview.narrow(1, 0, cview.size(1) - 1)
                starts = torch.cat([starts, self.dec_lt(cands_in)], 1)
            _preds, score, _h = self.decoder(starts, cands_hn, enc_out, attn_mask)

            for i in range(cview.size(1)):
                # calculate score at each token
                cs = cview.select(1, i)
                non_nulls = cs.ne(self.NULL_IDX)
                cand_lens += non_nulls.long()
                score_per_cand = torch.gather(score.select(1, i), 1,
                                              cs.unsqueeze(1))
                cand_scores += score_per_cand.squeeze() * non_nulls.float()
        else:
            # using attention
            if len(cand_inds) != len(enc_out):
                # select only encoder output matching xs we want
                indices = Variable(start.data.new([i[0] for i in cand_inds]))
                enc_out = enc_out.index_select(0, indices)
                attn_mask = attn_mask.index_select(0, indices)

            seq_len = enc_out.size(1)
            cands_enc_out = (
                enc_out.unsqueeze(1)
                .expand(bsz, cands_per_ex, seq_len, esz)
                .contiguous()
                .view(-1, seq_len, esz)
            )
            cands_attn_mask = (
                attn_mask.unsqueeze(1)
                .expand(bsz, cands_per_ex, seq_len)
                .contiguous()
                .view(-1, seq_len)
            )

            cs = starts
            for i in range(cview.size(1)):
                # process one token at a time
                _preds, score, _h = self.decoder(cs, cands_hn, cands_enc_out,
                                             cands_attn_mask)
                cs = cview.select(1, i)
                non_nulls = cs.ne(self.NULL_IDX)
                cand_lens += non_nulls.long()
                score_per_cand = torch.gather(score.squeeze(), 1,
                                              cs.unsqueeze(1))
                cand_scores += score_per_cand.squeeze() * non_nulls.float()

        # set empty scores to -1, so when divided by 0 they become -inf
        cand_scores -= cand_lens.eq(0).float()
        # average the scores per token
        cand_scores /= cand_lens.float()

        cand_scores = cand_scores.view(cands.size(0), cands.size(1))
        _srtd_scores, text_cand_inds = cand_scores.sort(1, True)

        return text_cand_inds
开发者ID:ahiroto,项目名称:ParlAI,代码行数:104,代码来源:modules.py


注:本文中的torch.autograd.Variable.sort方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。